diff --git a/index.html b/index.html index 45d4b6ca5df2..04f7a7bb8bec 100755 --- a/index.html +++ b/index.html @@ -351,7 +351,7 @@
-
+

Celo's optimism

@@ -369,18 +369,18 @@
-
+14732
-
-24
+
+23821
+
-17603
-
+548
-
-808
+
+667
+
-1028
-
+

This is an overview of the changes in Celo’s optimism implementation, a fork of Optimism’s optimism.

@@ -393,7 +393,7 @@
-
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/FeeCurrencyWhitelist.json CELO/packages/contracts-bedrock/snapshots/abi/FeeCurrencyWhitelist.json -new file mode 100644 -index 0000000000000000000000000000000000000000..47fa034f515f46dd4709444ea364a52248c9993c ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/FeeCurrencyWhitelist.json -@@ -0,0 +1,202 @@ -+[ -+ { -+ "inputs": [ -+ { -+ "internalType": "bool", -+ "name": "test", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "constructor" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ } -+ ], -+ "name": "addToken", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "getVersionNumber", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "pure", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "getWhitelist", -+ "outputs": [ -+ { -+ "internalType": "address[]", -+ "name": "", -+ "type": "address[]" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "initialize", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "initialized", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "index", -+ "type": "uint256" -+ } -+ ], -+ "name": "removeToken", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "name": "whitelist", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "FeeCurrencyWhitelistRemoved", -+ "type": "event" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "FeeCurrencyWhitelisted", -+ "type": "event" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "previousOwner", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "OwnershipTransferred", -+ "type": "event" -+ } -+] -\ No newline at end of file
+
diff --git OP/packages/contracts-bedrock/periphery-deploy-config/optimism-sepolia.json CELO/packages/contracts-bedrock/deploy-config-periphery/deploy/optimism-sepolia.json +rename from packages/contracts-bedrock/periphery-deploy-config/optimism-sepolia.json +rename to packages/contracts-bedrock/deploy-config-periphery/deploy/optimism-sepolia.json
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/FeeHandler.json CELO/packages/contracts-bedrock/snapshots/abi/FeeHandler.json -new file mode 100644 -index 0000000000000000000000000000000000000000..a584a53f686d0c6681b11a0bdbe857415bdc3774 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/FeeHandler.json -@@ -0,0 +1,813 @@ -+[ -+ { -+ "inputs": [ -+ { -+ "internalType": "bool", -+ "name": "test", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "constructor" -+ }, -+ { -+ "stateMutability": "payable", -+ "type": "receive" -+ }, -+ { -+ "inputs": [], -+ "name": "FIXED1_UINT", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "MIN_BURN", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ } -+ ], -+ "name": "activateToken", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "handlerAddress", -+ "type": "address" -+ } -+ ], -+ "name": "addToken", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "burnCelo", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "burnFraction", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "celoToBeBurned", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amountToBurn", -+ "type": "uint256" -+ } -+ ], -+ "name": "dailySellLimitHit", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ } -+ ], -+ "name": "deactivateToken", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ } -+ ], -+ "name": "distribute", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], +
diff --git OP/packages/contracts-bedrock/periphery-deploy-config/sepolia.json CELO/packages/contracts-bedrock/deploy-config-periphery/deploy/sepolia.json +rename from packages/contracts-bedrock/periphery-deploy-config/sepolia.json +rename to packages/contracts-bedrock/deploy-config-periphery/deploy/sepolia.json
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/periphery-deploy-config/drippie-config/sepolia-faucet-bridges.json CELO/packages/contracts-bedrock/deploy-config-periphery/drippie/sepolia-faucet-bridges.json +rename from packages/contracts-bedrock/periphery-deploy-config/drippie-config/sepolia-faucet-bridges.json +rename to packages/contracts-bedrock/deploy-config-periphery/drippie/sepolia-faucet-bridges.json
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/periphery-deploy-config/drippie-config/sepolia-faucet-core.json CELO/packages/contracts-bedrock/deploy-config-periphery/drippie/sepolia-faucet-core.json +rename from packages/contracts-bedrock/periphery-deploy-config/drippie-config/sepolia-faucet-core.json +rename to packages/contracts-bedrock/deploy-config-periphery/drippie/sepolia-faucet-core.json
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/periphery-deploy-config/drippie-config/sepolia-ops.json CELO/packages/contracts-bedrock/deploy-config-periphery/drippie/sepolia-ops.json +rename from packages/contracts-bedrock/periphery-deploy-config/drippie-config/sepolia-ops.json +rename to packages/contracts-bedrock/deploy-config-periphery/drippie/sepolia-ops.json
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/deploy-config/devnetL1-template.json CELO/packages/contracts-bedrock/deploy-config/devnetL1-template.json +index b2c991c27e657519fe95863748788adc45eaab4d..f8c2fbee4732c6ce09f991ab5aa13db83751e5a9 100644 +--- OP/packages/contracts-bedrock/deploy-config/devnetL1-template.json ++++ CELO/packages/contracts-bedrock/deploy-config/devnetL1-template.json +@@ -70,5 +70,6 @@ "daCommitmentType": "KeccakCommitment", + "daChallengeWindow": 160, + "daResolveWindow": 160, + "daBondSize": 1000000, +- "daResolverRefundPercentage": 0 ++ "daResolverRefundPercentage": 0, ++ "deployCeloContracts": true + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-2
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/foundry.toml CELO/packages/contracts-bedrock/foundry.toml +index b408087239f542f4d4b874b14dfe67a348c031c0..3e09e9129c4c9b2dea402ef7c05d80555687d9e9 100644 +--- OP/packages/contracts-bedrock/foundry.toml ++++ CELO/packages/contracts-bedrock/foundry.toml +@@ -36,12 +36,13 @@ { access='read-write', path='./.resource-metering.csv' }, + { access='read-write', path='./snapshots/' }, + { access='read-write', path='./deployments/' }, + { access='read', path='./deploy-config/' }, +- { access='read', path='./periphery-deploy-config/' }, ++ { access='read', path='./deploy-config-periphery/' }, + { access='read', path='./broadcast/' }, + { access='read', path = './forge-artifacts/' }, + { access='write', path='./semver-lock.json' }, + { access='read-write', path='./.testdata/' }, +- { access='read', path='./kout-deployment' } ++ { access='read', path='./kout-deployment' }, ++ { access='read-write', path='../../op-chain-ops/cmd/celo-migrate/testdata/' }, + ] + libs = ["node_modules", "lib"] +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/package.json CELO/packages/contracts-bedrock/package.json +index 13d1b79d933485a61553426b2bb0d6b1b171f596..adf176d3dfde8a4c77bffe607060602311a796f7 100644 +--- OP/packages/contracts-bedrock/package.json ++++ CELO/packages/contracts-bedrock/package.json +@@ -12,6 +12,7 @@ ], + "scripts": { + "prebuild": "./scripts/checks/check-foundry-install.sh", + "build": "forge build", ++ "build:linkedLibraries": "forge build --libraries src/celo/common/linkedlists/AddressSortedLinkedListWithMedian.sol:AddressSortedLinkedListWithMedian:0xED477A99035d0c1e11369F1D7A4e587893cc002B", + "build:go-ffi": "(cd scripts/go-ffi && go build)", + "autogen:invariant-docs": "npx tsx scripts/autogen/generate-invariant-docs.ts", + "test": "pnpm build:go-ffi && forge test", +@@ -19,7 +20,7 @@ "test:kontrol": "./test/kontrol/scripts/run-kontrol.sh script", + "genesis": "forge script scripts/L2Genesis.s.sol:L2Genesis --sig 'runWithStateDump()'", + "coverage": "pnpm build:go-ffi && (forge coverage || (bash -c \"forge coverage 2>&1 | grep -q 'Stack too deep' && echo -e '\\033[1;33mWARNING\\033[0m: Coverage failed with stack too deep, so overriding and exiting successfully' && exit 0 || exit 1\"))", + "coverage:lcov": "pnpm build:go-ffi && (forge coverage --report lcov || (bash -c \"forge coverage --report lcov 2>&1 | grep -q 'Stack too deep' && echo -e '\\033[1;33mWARNING\\033[0m: Coverage failed with stack too deep, so overriding and exiting successfully' && exit 0 || exit 1\"))", +- "deploy": "./scripts/deploy.sh", ++ "deploy": "./scripts/deploy/deploy.sh", + "gas-snapshot:no-build": "forge snapshot --match-contract GasBenchMark", + "statediff": "./scripts/statediff.sh && git diff --exit-code", + "gas-snapshot": "pnpm build:go-ffi && pnpm gas-snapshot:no-build",
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/ChainAssertions.sol CELO/packages/contracts-bedrock/scripts/ChainAssertions.sol +index a99c14e9514bce2c12a5e3e0782fa1e87f2dd945..b0245fc09fbb0e259af9f2a42ab87f7515e403ea 100644 +--- OP/packages/contracts-bedrock/scripts/ChainAssertions.sol ++++ CELO/packages/contracts-bedrock/scripts/ChainAssertions.sol +@@ -3,8 +3,8 @@ pragma solidity ^0.8.0; +  + import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; + import { ResourceMetering } from "src/L1/ResourceMetering.sol"; +-import { DeployConfig } from "scripts/DeployConfig.s.sol"; +-import { Deployer } from "scripts/Deployer.sol"; ++import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; ++import { Deployer } from "scripts/deploy/Deployer.sol"; + import { SystemConfig } from "src/L1/SystemConfig.sol"; + import { Constants } from "src/libraries/Constants.sol"; + import { L1StandardBridge } from "src/L1/L1StandardBridge.sol";
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+211
+
-3
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/L2Genesis.s.sol CELO/packages/contracts-bedrock/scripts/L2Genesis.s.sol +index 44607c53465089d6095be12ac4208b344f8dc001..5fbd9b30ad5743a9b221a7ca63cd799f76a3dd41 100644 +--- OP/packages/contracts-bedrock/scripts/L2Genesis.s.sol ++++ CELO/packages/contracts-bedrock/scripts/L2Genesis.s.sol +@@ -3,11 +3,11 @@ pragma solidity 0.8.15; +  + import { Script } from "forge-std/Script.sol"; + import { console2 as console } from "forge-std/console2.sol"; +-import { Deployer } from "scripts/Deployer.sol"; ++import { Deployer } from "scripts/deploy/Deployer.sol"; +  + import { Config, OutputMode, OutputModeUtils, Fork, ForkUtils, LATEST_FORK } from "scripts/Config.sol"; + import { Artifacts } from "scripts/Artifacts.s.sol"; +-import { DeployConfig } from "scripts/DeployConfig.s.sol"; ++import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; + import { Predeploys } from "src/libraries/Predeploys.sol"; + import { Preinstalls } from "src/libraries/Preinstalls.sol"; + import { L2CrossDomainMessenger } from "src/L2/L2CrossDomainMessenger.sol"; +@@ -26,6 +26,19 @@ import { L1StandardBridge } from "src/L1/L1StandardBridge.sol"; + import { FeeVault } from "src/universal/FeeVault.sol"; + import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; + import { Process } from "scripts/libraries/Process.sol"; ++import { GoldToken } from "src/celo/GoldToken.sol"; ++ ++import { CeloPredeploys } from "src/celo/CeloPredeploys.sol"; ++import { CeloRegistry } from "src/celo/CeloRegistry.sol"; ++import { FeeHandler } from "src/celo/FeeHandler.sol"; ++import { FeeCurrencyWhitelist } from "src/celo/FeeCurrencyWhitelist.sol"; ++import { MentoFeeHandlerSeller } from "src/celo/MentoFeeHandlerSeller.sol"; ++import { UniswapFeeHandlerSeller } from "src/celo/UniswapFeeHandlerSeller.sol"; ++import { SortedOracles } from "src/celo/stability/SortedOracles.sol"; ++import { FeeCurrencyDirectory } from "src/celo/FeeCurrencyDirectory.sol"; ++import { FeeCurrency } from "src/celo/testing/FeeCurrency.sol"; ++import { AddressSortedLinkedListWithMedian } from "src/celo/common/linkedlists/AddressSortedLinkedListWithMedian.sol"; ++import { StableTokenV2 } from "src/celo/StableTokenV2.sol"; +  + interface IInitializable { + function initialize(address _addr) external; +@@ -134,10 +147,15 @@ console.log("L2Genesis: outputMode: %s, fork: %s", _mode.toString(), _fork.toString()); + vm.startPrank(deployer); + vm.chainId(cfg.l2ChainID()); +  +- dealEthToPrecompiles(); ++ if (cfg.deployCeloContracts()) { ++ dealEthToPrecompiles(); ++ } + setPredeployProxies(); + setPredeployImplementations(_l1Dependencies); + setPreinstalls(); ++ if (cfg.deployCeloContracts()) { ++ setCeloPredeploys(); ++ } + if (cfg.fundDevAccounts()) { + fundDevAccounts(); + } +@@ -567,5 +585,195 @@ for (uint256 i; i < devAccounts.length; i++) { + console.log("Funding dev account %s with %s ETH", devAccounts[i], DEV_ACCOUNT_FUND_AMT / 1e18); + vm.deal(devAccounts[i], DEV_ACCOUNT_FUND_AMT); + } ++ } ++ ++ ///@notice Sets all proxies and implementations for Celo contracts ++ function setCeloPredeploys() internal { ++ console.log("Deploying Celo contracts"); ++ ++ setCeloRegistry(); ++ setCeloGoldToken(); ++ setCeloFeeHandler(); ++ setCeloFeeCurrencyWhitelist(); ++ setCeloMentoFeeHandlerSeller(); ++ setCeloUniswapFeeHandlerSeller(); ++ // setCeloSortedOracles(); ++ // setCeloAddressSortedLinkedListWithMedian(); ++ setCeloFeeCurrency(); ++ setFeeCurrencyDirectory(); ++ ++ address[] memory initialBalanceAddresses = new address[](1); ++ initialBalanceAddresses[0] = devAccounts[0]; ++ ++ uint256[] memory initialBalances = new uint256[](1); ++ initialBalances[0] = 100_000 ether; ++ //deploycUSD(initialBalanceAddresses, initialBalances, 2); ++ } ++ ++ /// @notice Sets up a proxy for the given impl address ++ function _setupProxy(address addr, address impl) internal returns (address) { ++ bytes memory code = vm.getDeployedCode("Proxy.sol:Proxy"); ++ vm.etch(addr, code); ++ EIP1967Helper.setAdmin(addr, Predeploys.PROXY_ADMIN); ++ ++ console.log("Setting proxy %s with implementation: %s", addr, impl); ++ EIP1967Helper.setImplementation(addr, impl); ++ ++ return addr; ++ } ++ ++ function setCeloRegistry() internal { ++ CeloRegistry kontract = new CeloRegistry({ test: false }); ++ ++ address precompile = CeloPredeploys.CELO_REGISTRY; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setCeloGoldToken() internal { ++ GoldToken kontract = new GoldToken({ test: false }); ++ ++ address precompile = CeloPredeploys.GOLD_TOKEN; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setCeloFeeHandler() internal { ++ FeeHandler kontract = new FeeHandler({ test: false }); ++ ++ address precompile = CeloPredeploys.FEE_HANDLER; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setCeloFeeCurrencyWhitelist() internal { ++ FeeCurrencyWhitelist kontract = new FeeCurrencyWhitelist({ test: false }); ++ ++ address precompile = CeloPredeploys.FEE_CURRENCY_WHITELIST; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setCeloMentoFeeHandlerSeller() internal { ++ MentoFeeHandlerSeller kontract = new MentoFeeHandlerSeller({ test: false }); ++ ++ address precompile = CeloPredeploys.MENTO_FEE_HANDLER_SELLER; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setCeloUniswapFeeHandlerSeller() internal { ++ UniswapFeeHandlerSeller kontract = new UniswapFeeHandlerSeller({ test: false }); ++ ++ address precompile = CeloPredeploys.UNISWAP_FEE_HANDLER_SELLER; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setCeloSortedOracles() internal { ++ SortedOracles kontract = new SortedOracles({ test: false }); ++ ++ address precompile = CeloPredeploys.SORTED_ORACLES; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function setFeeCurrencyDirectory() internal { ++ FeeCurrencyDirectory feeCurrencyDirectory = new FeeCurrencyDirectory({ test: false }); ++ ++ address precompile = CeloPredeploys.FEE_CURRENCY_DIRECTORY; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(feeCurrencyDirectory)); ++ ++ vm.resetNonce(address(feeCurrencyDirectory)); ++ _setupProxy(precompile, address(feeCurrencyDirectory)); ++ ++ vm.startPrank(devAccounts[0]); ++ FeeCurrencyDirectory(precompile).initialize(); ++ vm.stopPrank(); ++ } ++ ++ // function setCeloAddressSortedLinkedListWithMedian() internal { ++ // AddressSortedLinkedListWithMedian kontract = new AddressSortedLinkedListWithMedian({ ++ // }); ++ // address precompile = CeloPredeploys.ADDRESS_SORTED_LINKED_LIST_WITH_MEDIAN; ++ // string memory cname = CeloPredeploys.getName(precompile); ++ // console.log("Deploying %s implementation at: %s", cname, address(kontract )); ++ // vm.resetNonce(address(kontract )); ++ // _setupProxy(precompile, address(kontract)); ++ // } ++ ++ function setCeloFeeCurrency() internal { ++ FeeCurrency kontract = new FeeCurrency({ name_: "Test", symbol_: "TST" }); ++ address precompile = CeloPredeploys.FEE_CURRENCY; ++ string memory cname = CeloPredeploys.getName(precompile); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ vm.resetNonce(address(kontract)); ++ _setupProxy(precompile, address(kontract)); ++ } ++ ++ function deploycUSD( ++ address[] memory initialBalanceAddresses, ++ uint256[] memory initialBalanceValues, ++ uint256 celoPrice ++ ) ++ public ++ { ++ StableTokenV2 kontract = new StableTokenV2({ disable: false }); ++ address cusdProxyAddress = CeloPredeploys.cUSD; ++ string memory cname = CeloPredeploys.getName(cusdProxyAddress); ++ console.log("Deploying %s implementation at: %s", cname, address(kontract)); ++ vm.resetNonce(address(kontract)); ++ ++ _setupProxy(cusdProxyAddress, address(kontract)); ++ ++ kontract.initialize("Celo Dollar", "cUSD", initialBalanceAddresses, initialBalanceValues); ++ ++ SortedOracles sortedOracles = SortedOracles(CeloPredeploys.SORTED_ORACLES); ++ ++ console.log("beofre add oracle"); ++ ++ vm.startPrank(sortedOracles.owner()); ++ sortedOracles.addOracle(cusdProxyAddress, deployer); ++ vm.stopPrank(); ++ vm.startPrank(deployer); ++ ++ if (celoPrice != 0) { ++ sortedOracles.report(cusdProxyAddress, celoPrice * 1e24, address(0), address(0)); // TODO use fixidity ++ } ++ ++ /* ++ Arbitrary intrinsic gas number take from existing `FeeCurrencyDirectory.t.sol` tests ++ Source: ++ https://github.com/celo-org/celo-monorepo/blob/2cec07d43328cf4216c62491a35eacc4960fffb6/packages/protocol/test-sol/common/FeeCurrencyDirectory.t.sol#L27 ++ */ ++ uint256 mockIntrinsicGas = 21000; ++ ++ FeeCurrencyDirectory feeCurrencyDirectory = FeeCurrencyDirectory(CeloPredeploys.FEE_CURRENCY_DIRECTORY); ++ vm.startPrank(feeCurrencyDirectory.owner()); ++ feeCurrencyDirectory.setCurrencyConfig(cusdProxyAddress, address(sortedOracles), mockIntrinsicGas); ++ vm.stopPrank(); ++ vm.startPrank(deployer); + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-15
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/deploy.sh CELO/packages/contracts-bedrock/scripts/deploy.sh +deleted file mode 100755 +index bfbd436eb5fafd0bf54b7dce76da523d28aab6be..0000000000000000000000000000000000000000 +--- OP/packages/contracts-bedrock/scripts/deploy.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-#!/usr/bin/env bash +-set -euo pipefail +- +-verify_flag="" +-if [ -n "${DEPLOY_VERIFY:-}" ]; then +- verify_flag="--verify" +-fi +- +-echo "> Deploying contracts" +-forge script -vvv scripts/Deploy.s.sol:Deploy --rpc-url "$DEPLOY_ETH_RPC_URL" --broadcast --private-key "$DEPLOY_PRIVATE_KEY" $verify_flag +- +-if [ -n "${DEPLOY_GENERATE_HARDHAT_ARTIFACTS:-}" ]; then +- echo "> Generating hardhat artifacts" +- forge script -vvv scripts/Deploy.s.sol:Deploy --sig 'sync()' --rpc-url "$DEPLOY_ETH_RPC_URL" --broadcast --private-key "$DEPLOY_PRIVATE_KEY" +-fi
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/Deploy.s.sol CELO/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +rename from packages/contracts-bedrock/scripts/Deploy.s.sol +rename to packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +index 4b24d45926bc87e0c839e8e8cf18214153965086..52f7a505e63d6b710327f7b7e95ba3f7bf0aab6a 100644 +--- OP/packages/contracts-bedrock/scripts/Deploy.s.sol ++++ CELO/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +@@ -12,7 +12,7 @@ import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; + import { GnosisSafeProxyFactory as SafeProxyFactory } from "safe-contracts/proxies/GnosisSafeProxyFactory.sol"; + import { Enum as SafeOps } from "safe-contracts/common/Enum.sol"; +  +-import { Deployer } from "scripts/Deployer.sol"; ++import { Deployer } from "scripts/deploy/Deployer.sol"; +  + import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; + import { AddressManager } from "src/legacy/AddressManager.sol";
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+10
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/DeployConfig.s.sol CELO/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol +rename from packages/contracts-bedrock/scripts/DeployConfig.s.sol +rename to packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol +index 25869e97f0807563f327c6047d65cdb0b9156869..5faf1b94b3bddf08e49728392f44d7a5378731d2 100644 +--- OP/packages/contracts-bedrock/scripts/DeployConfig.s.sol ++++ CELO/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol +@@ -91,6 +91,8 @@ address public customGasTokenAddress; +  + bool public useInterop; +  ++ bool public deployCeloContracts; ++ + function read(string memory _path) public { + console.log("DeployConfig: reading file %s", _path); + try vm.readFile(_path) returns (string memory data) { +@@ -174,6 +176,9 @@ useCustomGasToken = _readOr(_json, "$.useCustomGasToken", false); + customGasTokenAddress = _readOr(_json, "$.customGasTokenAddress", address(0)); +  + useInterop = _readOr(_json, "$.useInterop", false); ++ ++ // Celo specific config ++ deployCeloContracts = _readOr(_json, "$.deployCeloContracts", false); + } +  + function fork() public view returns (Fork fork_) { +@@ -234,6 +239,11 @@ + /// @notice Allow the `fundDevAccounts` config to be overridden. + function setFundDevAccounts(bool _fundDevAccounts) public { + fundDevAccounts = _fundDevAccounts; ++ } ++ ++ /// @notice Allow the `deployCeloContracts` config to be overridden. ++ function setDeployCeloContracts(bool _deployCeloContracts) public { ++ deployCeloContracts = _deployCeloContracts; + } +  + /// @notice Allow the `useCustomGasToken` config to be overridden in testing environments
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/DeployOwnership.s.sol CELO/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +rename from packages/contracts-bedrock/scripts/DeployOwnership.s.sol +rename to packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +index bb436d69b15b37374db542726fd2e9e6d099f801..05fbfd54df93711c6bed69461abda1ca4d5513a5 100644 +--- OP/packages/contracts-bedrock/scripts/DeployOwnership.s.sol ++++ CELO/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +@@ -9,7 +9,7 @@ import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; + import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; + import { GuardManager } from "safe-contracts/base/GuardManager.sol"; +  +-import { Deployer } from "scripts/Deployer.sol"; ++import { Deployer } from "scripts/deploy/Deployer.sol"; +  + import { LivenessGuard } from "src/Safe/LivenessGuard.sol"; + import { LivenessModule } from "src/Safe/LivenessModule.sol";
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/Deployer.sol CELO/packages/contracts-bedrock/scripts/deploy/Deployer.sol +rename from packages/contracts-bedrock/scripts/Deployer.sol +rename to packages/contracts-bedrock/scripts/deploy/Deployer.sol +index aac3f5ac8ec2535c310fa3763971bbd6efbc0288..2a861ba34608416e7d1ea78d3958a37ca818c6a1 100644 +--- OP/packages/contracts-bedrock/scripts/Deployer.sol ++++ CELO/packages/contracts-bedrock/scripts/deploy/Deployer.sol +@@ -4,7 +4,7 @@ + import { Script } from "forge-std/Script.sol"; + import { Artifacts } from "scripts/Artifacts.s.sol"; + import { Config } from "scripts/Config.sol"; +-import { DeployConfig } from "scripts/DeployConfig.s.sol"; ++import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; + import { Executables } from "scripts/Executables.sol"; + import { console } from "forge-std/console.sol"; +
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+15
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/deploy/deploy.sh CELO/packages/contracts-bedrock/scripts/deploy/deploy.sh +new file mode 100755 +index 0000000000000000000000000000000000000000..bc497e0b8568a11a5b6166b64915ff42049df8f1 +--- /dev/null ++++ CELO/packages/contracts-bedrock/scripts/deploy/deploy.sh +@@ -0,0 +1,15 @@ ++#!/usr/bin/env bash ++set -euo pipefail ++ ++verify_flag="" ++if [ -n "${DEPLOY_VERIFY:-}" ]; then ++ verify_flag="--verify" ++fi ++ ++echo "> Deploying contracts" ++forge script -vvv scripts/deploy/Deploy.s.sol:Deploy --rpc-url "$DEPLOY_ETH_RPC_URL" --broadcast --private-key "$DEPLOY_PRIVATE_KEY" $verify_flag ++ ++if [ -n "${DEPLOY_GENERATE_HARDHAT_ARTIFACTS:-}" ]; then ++ echo "> Generating hardhat artifacts" ++ forge script -vvv scripts/deploy/Deploy.s.sol:Deploy --sig 'sync()' --rpc-url "$DEPLOY_ETH_RPC_URL" --broadcast --private-key "$DEPLOY_PRIVATE_KEY" ++fi
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol CELO/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol +index 434b5274d9290e9494b8f7fcadd869cdb11193a9..72011adfc896a784659d7c8638fba6325b7467c4 100644 +--- OP/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol ++++ CELO/packages/contracts-bedrock/scripts/fpac/FPACOPS.s.sol +@@ -7,7 +7,7 @@ import { AnchorStateRegistry, IAnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; + import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; + import { StdAssertions } from "forge-std/StdAssertions.sol"; + import "src/dispute/lib/Types.sol"; +-import "scripts/Deploy.s.sol"; ++import "scripts/deploy/Deploy.s.sol"; +  + /// @notice Deploys the Fault Proof Alpha Chad contracts. + contract FPACOPS is Deploy, StdAssertions {
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+135
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/getting-started/config-vars-celo.sh CELO/packages/contracts-bedrock/scripts/getting-started/config-vars-celo.sh +new file mode 100755 +index 0000000000000000000000000000000000000000..4de42fc4c1d601daba8d9fb7e89840d641268149 +--- /dev/null ++++ CELO/packages/contracts-bedrock/scripts/getting-started/config-vars-celo.sh +@@ -0,0 +1,135 @@ ++#!/usr/bin/env bash ++ ++# This script is used to generate the getting-started.json configuration file ++# used in the Getting Started quickstart guide on the docs site. Avoids the ++# need to have the getting-started.json committed to the repo since it's an ++# invalid JSON file when not filled in, which is annoying. ++ ++reqenv() { ++ if [ -z "${!1}" ]; then ++ echo "Error: environment variable '$1' is undefined" ++ exit 1 ++ fi ++} ++ ++# Check required environment variables ++reqenv "DEPLOYMENT_CONTEXT" ++reqenv "GS_ADMIN_ADDRESS" ++reqenv "GS_BATCHER_ADDRESS" ++reqenv "GS_PROPOSER_ADDRESS" ++reqenv "GS_SEQUENCER_ADDRESS" ++reqenv "L1_RPC_URL" ++reqenv "L1_CHAIN_ID" ++reqenv "L2_CHAIN_ID" ++reqenv "L1_BLOCK_TIME" ++reqenv "L2_BLOCK_TIME" ++reqenv "FUNDS_DEV_ACCOUNTS" ++reqenv "USE_PLASMA" ++reqenv "DEPLOY_CELO_CONTRACTS" ++ ++# Get the finalized block timestamp and hash ++block=$(cast block finalized --rpc-url "$L1_RPC_URL") ++timestamp=$(echo "$block" | awk '/timestamp/ { print $2 }') ++blockhash=$(echo "$block" | awk '/hash/ { print $2 }') ++batchInboxAddressSuffix=$(printf "%0$(expr 38 - ${#L2_CHAIN_ID})d" 0)$L2_CHAIN_ID ++batchInboxAddress=0xff$batchInboxAddressSuffix ++ ++# Generate the config file ++config=$(cat << EOL ++{ ++ "l1StartingBlockTag": "$blockhash", ++ ++ "l1ChainID": $L1_CHAIN_ID, ++ "l2ChainID": $L2_CHAIN_ID, ++ "l2BlockTime": $L2_BLOCK_TIME, ++ "l1BlockTime": $L1_BLOCK_TIME, ++ ++ "maxSequencerDrift": 600, ++ "sequencerWindowSize": 3600, ++ "channelTimeout": 300, ++ ++ "p2pSequencerAddress": "$GS_SEQUENCER_ADDRESS", ++ "batchInboxAddress": "$batchInboxAddress", ++ "batchSenderAddress": "$GS_BATCHER_ADDRESS", ++ ++ "l2OutputOracleSubmissionInterval": 120, ++ "l2OutputOracleStartingBlockNumber": 0, ++ "l2OutputOracleStartingTimestamp": $timestamp, ++ ++ "l2OutputOracleProposer": "$GS_PROPOSER_ADDRESS", ++ "l2OutputOracleChallenger": "$GS_ADMIN_ADDRESS", ++ ++ "finalizationPeriodSeconds": 12, ++ ++ "proxyAdminOwner": "$GS_ADMIN_ADDRESS", ++ "baseFeeVaultRecipient": "$GS_ADMIN_ADDRESS", ++ "l1FeeVaultRecipient": "$GS_ADMIN_ADDRESS", ++ "sequencerFeeVaultRecipient": "$GS_ADMIN_ADDRESS", ++ "finalSystemOwner": "$GS_ADMIN_ADDRESS", ++ "superchainConfigGuardian": "$GS_ADMIN_ADDRESS", ++ ++ "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "baseFeeVaultWithdrawalNetwork": 0, ++ "l1FeeVaultWithdrawalNetwork": 0, ++ "sequencerFeeVaultWithdrawalNetwork": 0, ++ ++ "gasPriceOracleOverhead": 0, ++ "gasPriceOracleScalar": 1000000, ++ ++ "deployCeloContracts": $DEPLOY_CELO_CONTRACTS, ++ ++ "enableGovernance": $ENABLE_GOVERNANCE, ++ "governanceTokenSymbol": "OP", ++ "governanceTokenName": "Optimism", ++ "governanceTokenOwner": "$GS_ADMIN_ADDRESS", ++ ++ "l2GenesisBlockGasLimit": "0x1c9c380", ++ "l2GenesisBlockBaseFeePerGas": "0x3b9aca00", ++ "l2GenesisRegolithTimeOffset": "0x0", ++ ++ "eip1559Denominator": 50, ++ "eip1559DenominatorCanyon": 250, ++ "eip1559Elasticity": 6, ++ ++ "l2GenesisFjordTimeOffset": "0x0", ++ "l2GenesisEcotoneTimeOffset": "0x0", ++ "l2GenesisDeltaTimeOffset": "0x0", ++ "l2GenesisCanyonTimeOffset": "0x0", ++ ++ "systemConfigStartBlock": 0, ++ ++ "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ ++ "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", ++ "faultGameMaxDepth": 44, ++ "faultGameClockExtension": 0, ++ "faultGameMaxClockDuration": 600, ++ "faultGameGenesisBlock": 0, ++ "faultGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "faultGameSplitDepth": 14, ++ "faultGameWithdrawalDelay": 604800, ++ ++ "preimageOracleMinProposalSize": 1800000, ++ "preimageOracleChallengePeriod": 86400, ++ ++ "fundDevAccounts": $FUNDS_DEV_ACCOUNTS, ++ "useFaultProofs": false, ++ "proofMaturityDelaySeconds": 604800, ++ "disputeGameFinalityDelaySeconds": 302400, ++ "respectedGameType": 0, ++ ++ "usePlasma": $USE_PLASMA, ++ "daCommitmentType": "KeccakCommitment", ++ "daChallengeWindow": 160, ++ "daResolveWindow": 160, ++ "daBondSize": 1000000, ++ "daResolverRefundPercentage": 0 ++} ++EOL ++) ++ ++# Write the config file ++echo "$config" > deploy-config/$DEPLOYMENT_CONTEXT.json
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+106
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/getting-started/config-vars-op-stack.sh CELO/packages/contracts-bedrock/scripts/getting-started/config-vars-op-stack.sh +new file mode 100755 +index 0000000000000000000000000000000000000000..6bd06486a139ad18fae06c5163be662ec5b89c46 +--- /dev/null ++++ CELO/packages/contracts-bedrock/scripts/getting-started/config-vars-op-stack.sh +@@ -0,0 +1,106 @@ ++#!/usr/bin/env bash ++ ++# This script is used to generate the getting-started.json configuration file ++# used in the Getting Started quickstart guide on the docs site. Avoids the ++# need to have the getting-started.json committed to the repo since it's an ++# invalid JSON file when not filled in, which is annoying. ++ ++reqenv() { ++ if [ -z "${!1}" ]; then ++ echo "Error: environment variable '$1' is undefined" ++ exit 1 ++ fi ++} ++ ++# Check required environment variables ++reqenv "GS_ADMIN_ADDRESS" ++reqenv "GS_BATCHER_ADDRESS" ++reqenv "GS_PROPOSER_ADDRESS" ++reqenv "GS_SEQUENCER_ADDRESS" ++reqenv "L1_RPC_URL" ++ ++# Get the finalized block timestamp and hash ++block=$(cast block finalized --rpc-url "$L1_RPC_URL") ++timestamp=$(echo "$block" | awk '/timestamp/ { print $2 }') ++blockhash=$(echo "$block" | awk '/hash/ { print $2 }') ++ ++# Generate the config file ++config=$(cat << EOL ++{ ++ "l1StartingBlockTag": "$blockhash", ++ ++ "l1ChainID": $L1_CHAIN_ID, ++ "l2ChainID": $L2_CHAIN_ID, ++ "l2BlockTime": 2, ++ "l1BlockTime": 12, ++ ++ "maxSequencerDrift": 600, ++ "sequencerWindowSize": 3600, ++ "channelTimeout": 300, ++ ++ "p2pSequencerAddress": "$GS_SEQUENCER_ADDRESS", ++ "batchInboxAddress": "0xff00000000000000000000000000000000042069", ++ "batchSenderAddress": "$GS_BATCHER_ADDRESS", ++ ++ "l2OutputOracleSubmissionInterval": 120, ++ "l2OutputOracleStartingBlockNumber": 0, ++ "l2OutputOracleStartingTimestamp": $timestamp, ++ ++ "l2OutputOracleProposer": "$GS_PROPOSER_ADDRESS", ++ "l2OutputOracleChallenger": "$GS_ADMIN_ADDRESS", ++ ++ "finalizationPeriodSeconds": 12, ++ ++ "proxyAdminOwner": "$GS_ADMIN_ADDRESS", ++ "baseFeeVaultRecipient": "$GS_ADMIN_ADDRESS", ++ "l1FeeVaultRecipient": "$GS_ADMIN_ADDRESS", ++ "sequencerFeeVaultRecipient": "$GS_ADMIN_ADDRESS", ++ "finalSystemOwner": "$GS_ADMIN_ADDRESS", ++ "superchainConfigGuardian": "$GS_ADMIN_ADDRESS", ++ ++ "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "baseFeeVaultWithdrawalNetwork": 0, ++ "l1FeeVaultWithdrawalNetwork": 0, ++ "sequencerFeeVaultWithdrawalNetwork": 0, ++ ++ "gasPriceOracleOverhead": 2100, ++ "gasPriceOracleScalar": 1000000, ++ ++ "enableGovernance": true, ++ "governanceTokenSymbol": "OP", ++ "governanceTokenName": "Optimism", ++ "governanceTokenOwner": "$GS_ADMIN_ADDRESS", ++ ++ "l2GenesisBlockGasLimit": "0x1c9c380", ++ "l2GenesisBlockBaseFeePerGas": "0x3b9aca00", ++ "l2GenesisRegolithTimeOffset": "0x0", ++ ++ "eip1559Denominator": 50, ++ "eip1559DenominatorCanyon": 250, ++ "eip1559Elasticity": 6, ++ ++ "l2GenesisDeltaTimeOffset": null, ++ "l2GenesisCanyonTimeOffset": "0x0", ++ ++ "systemConfigStartBlock": 0, ++ ++ "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ ++ "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", ++ "faultGameMaxDepth": 44, ++ "faultGameMaxDuration": 1200, ++ "faultGameGenesisBlock": 0, ++ "faultGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "faultGameSplitDepth": 14, ++ ++ "preimageOracleMinProposalSize": 1800000, ++ "preimageOracleChallengePeriod": 86400 ++} ++EOL ++) ++ ++# Write the config file ++echo "$config" > deploy-config/$DEPLOYMENT_CONTEXT.json
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+19
+
-6
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/libraries/Process.sol CELO/packages/contracts-bedrock/scripts/libraries/Process.sol +index c95a95d76c24fbbba01aafbf6204ef173a6e6f4d..d2cf5c3af4aa04bb69fb6801e63e3ce3fe569d16 100644 +--- OP/packages/contracts-bedrock/scripts/libraries/Process.sol ++++ CELO/packages/contracts-bedrock/scripts/libraries/Process.sol +@@ -10,14 +10,27 @@ + /// @notice Foundry cheatcode VM. + Vm private constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); +  +- function run(string[] memory cmd) internal returns (bytes memory stdout_) { +- Vm.FfiResult memory result = vm.tryFfi(cmd); ++ /// @notice Run a command in a subprocess. Fails if no output is returned. ++ /// @param _command Command to run. ++ function run(string[] memory _command) internal returns (bytes memory stdout_) { ++ stdout_ = run({ _command: _command, _allowEmpty: false }); ++ } ++ ++ /// @notice Run a command in a subprocess. ++ /// @param _command Command to run. ++ /// @param _allowEmpty Allow empty output. ++ function run(string[] memory _command, bool _allowEmpty) internal returns (bytes memory stdout_) { ++ Vm.FfiResult memory result = vm.tryFfi(_command); ++ string memory command; ++ for (uint256 i = 0; i < _command.length; i++) { ++ command = string.concat(command, _command[i], " "); ++ } + if (result.exitCode != 0) { +- string memory command; +- for (uint256 i = 0; i < cmd.length; i++) { +- command = string.concat(command, cmd[i], " "); +- } + revert FfiFailed(string.concat("Command: ", command, "\nError: ", string(result.stderr))); ++ } ++ // If the output is empty, result.stdout is "[]". ++ if (!_allowEmpty && keccak256(result.stdout) == keccak256(bytes("[]"))) { ++ revert FfiFailed(string.concat("No output from Command: ", command)); + } + stdout_ = result.stdout; + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/scripts/statediff.sh CELO/packages/contracts-bedrock/scripts/statediff.sh +index fa1aef11c88455cd79a2a6d59a2bdb093d812ee4..cce1138c962b66b347d037fa29f2045ac0aa65b1 100755 +--- OP/packages/contracts-bedrock/scripts/statediff.sh ++++ CELO/packages/contracts-bedrock/scripts/statediff.sh +@@ -2,4 +2,4 @@ #!/usr/bin/env bash + set -euo pipefail +  + echo "> Deploying contracts to generate state diff (non-broadcast)" +-forge script -vvv scripts/Deploy.s.sol:Deploy --sig 'runWithStateDiff()' ++forge script -vvv scripts/deploy/Deploy.s.sol:Deploy --sig 'runWithStateDiff()'
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+9
+
-9
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/semver-lock.json CELO/packages/contracts-bedrock/semver-lock.json +index a98510e2dfcd51a02a525b2d148a973cde7ed1b6..3457655acfeb815bb6aef26ee113355656f9ff1f 100644 +--- OP/packages/contracts-bedrock/semver-lock.json ++++ CELO/packages/contracts-bedrock/semver-lock.json +@@ -64,8 +64,8 @@ "initCodeHash": "0x623bf6892f0bdb536f2916bc9eb45e52012ad2c80893ff87d750757966b9be68", + "sourceCodeHash": "0x3a725791a0f5ed84dc46dcdae26f6170a759b2fe3dc360d704356d088b76cfd6" + }, + "src/L2/CrossL2Inbox.sol": { +- "initCodeHash": "0x46e15ac5de81ea415061d049730da25acf31040d6d5d70fe3a9bf4cac100c282", +- "sourceCodeHash": "0xc3d38bfa73fc33369891a2e8c987baf64b1e94c53d6104676fd4c93e1f5c8011" ++ "initCodeHash": "0x074af4b17cfdd1d1dafaaccb79d68ab4ceef50d35dc205aeeedc265e11ae2a92", ++ "sourceCodeHash": "0x5b4355b060e8e5ab81047e5f3d093869c2be7bae14a48a0e5ddf6872a219faf2" + }, + "src/L2/GasPriceOracle.sol": { + "initCodeHash": "0xb16f1e370e58c7693fd113a21a1b1e7ccebc03d4f1e5a76786fc27847ef51ead", +@@ -100,8 +100,8 @@ "initCodeHash": "0x08bbede75cd6dfd076903b8f04d24f82fa7881576c135825098778632e37eebc", + "sourceCodeHash": "0x8388b9b8075f31d580fed815b66b45394e40fb1a63cd8cda2272d2c390fc908c" + }, + "src/L2/L2ToL2CrossDomainMessenger.sol": { +- "initCodeHash": "0x975a4b620e71a1cacd5078972c5e042d010b01e52d0ccd17934cbc7c9890f23b", +- "sourceCodeHash": "0x249218d69909750f5245a42d247a789f1837c24863bded94dc577fcbec914175" ++ "initCodeHash": "0x15fbb6175eb98a7d7c6b99862de49e8c3f8ac768c656e82ad7c41c0d1739bd66", ++ "sourceCodeHash": "0x1f14aafab2cb15970cccedb461b72218fca8afa6ffd0ac696a9e28ff1415a068" + }, + "src/L2/SequencerFeeVault.sol": { + "initCodeHash": "0xb94145f571e92ee615c6fe903b6568e8aac5fe760b6b65148ffc45d2fb0f5433", +@@ -124,8 +124,8 @@ "initCodeHash": "0xde144889fe7d98dbf300a98f5331edd535086a4af8ae6d88ca190c7f4c754a2d", + "sourceCodeHash": "0x3ff4a3f21202478935412d47fd5ef7f94a170402ddc50e5c062013ce5544c83f" + }, + "src/cannon/MIPS.sol": { +- "initCodeHash": "0x1c5dbe83af31e70feb906e2bda2bb1d78d3d15012ec6b11ba5643785657af2a6", +- "sourceCodeHash": "0x9bdc97ff4e51fdec7c3e2113d5b60cd64eeb121a51122bea972789d4a5ac3dfa" ++ "initCodeHash": "0xe9183ee3b69d9ec9594d6b3923d78c86c996cd738ccbc09675bb281284c060af", ++ "sourceCodeHash": "0x7c2eab73da8b2eeadba30eadb39f20e91307bc29218938fadfc5f73fadcf13bc" + }, + "src/cannon/PreimageOracle.sol": { + "initCodeHash": "0xe5db668fe41436f53995e910488c7c140766ba8745e19743773ebab508efd090", +@@ -176,11 +176,11 @@ "initCodeHash": "0xefc67e1be541adfc92f9a5bef36746477299f5e76a4601c12f802af52fb02253", + "sourceCodeHash": "0x323f707d4cebc38f59f9241098a1d7e5e790ffcaf1719065edabf4cb794ac745" + }, + "src/universal/OptimismMintableERC20.sol": { +- "initCodeHash": "0x7c6e1cf86cf8622d8beceafa3610ff88eceb3b0fafff0491bfa26a7b876c4d9a", +- "sourceCodeHash": "0x52737b23e99bf79dd2c23196b3298e80aa41f740efc6adc7916e696833eb546a" ++ "initCodeHash": "0x9b18a1ae827de2c28d3b4f92d9fc718889f23f37fd973cf07ea31b93b8f71d87", ++ "sourceCodeHash": "0x9d5be3fd300151aae4722eb2f76c3ab0256adad0f4b8b2dec80bbeb2cd142ca1" + }, + "src/universal/OptimismMintableERC20Factory.sol": { +- "initCodeHash": "0xf6f522681e7ae940cb778db68004f122b25194296a65bba7ad1d792bd593c4a6", ++ "initCodeHash": "0xf433cfb2b9a65b29c1dd9b6a724bddd3c7bb64e49273492d5bd52e7cb424c4e2", + "sourceCodeHash": "0x9b8c73ea139f13028008eedef53a6b07576cd6b08979574e6dde3192656e9268" + }, + "src/universal/OptimismMintableERC721.sol": {
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/CalledByVm.json CELO/packages/contracts-bedrock/snapshots/abi/CalledByVm.json +new file mode 100644 +index 0000000000000000000000000000000000000000..0637a088a01e8ddab3bf3fa98dbe804cbde1a0dc +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/CalledByVm.json +@@ -0,0 +1 @@ ++[] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+246
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/CeloRegistry.json CELO/packages/contracts-bedrock/snapshots/abi/CeloRegistry.json +new file mode 100644 +index 0000000000000000000000000000000000000000..1f095b33d3bb0171ba35dfdc18e2e5361b4f3e5a +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/CeloRegistry.json +@@ -0,0 +1,247 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "bytes32", ++ "name": "identifierHash", ++ "type": "bytes32" ++ } ++ ], ++ "name": "getAddressFor", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "bytes32", ++ "name": "identifierHash", ++ "type": "bytes32" ++ } ++ ], ++ "name": "getAddressForOrDie", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "string", ++ "name": "identifier", ++ "type": "string" ++ } ++ ], ++ "name": "getAddressForString", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "string", ++ "name": "identifier", ++ "type": "string" ++ } ++ ], ++ "name": "getAddressForStringOrDie", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "bytes32[]", ++ "name": "identifierHashes", ++ "type": "bytes32[]" ++ }, ++ { ++ "internalType": "address", ++ "name": "sender", ++ "type": "address" ++ } ++ ], ++ "name": "isOneOf", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "bytes32", ++ "name": "", ++ "type": "bytes32" ++ } ++ ], ++ "name": "registry", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "string", ++ "name": "identifier", ++ "type": "string" ++ }, ++ { ++ "internalType": "address", ++ "name": "addr", ++ "type": "address" ++ } ++ ], ++ "name": "setAddressFor", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "OwnershipTransferred", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "string", ++ "name": "identifier", ++ "type": "string" ++ }, ++ { ++ "indexed": true, ++ "internalType": "bytes32", ++ "name": "identifierHash", ++ "type": "bytes32" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "addr", ++ "type": "address" ++ } ++ ], ++ "name": "RegistryUpdated", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+245
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/FeeCurrencyDirectory.json CELO/packages/contracts-bedrock/snapshots/abi/FeeCurrencyDirectory.json +new file mode 100644 +index 0000000000000000000000000000000000000000..4c4ccb64968e89d61bc9081c7cfedc3c8bc471f4 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/FeeCurrencyDirectory.json +@@ -0,0 +1,246 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "name": "currencies", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "oracle", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "intrinsicGas", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getCurrencies", ++ "outputs": [ ++ { ++ "internalType": "address[]", ++ "name": "", ++ "type": "address[]" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "getCurrencyConfig", ++ "outputs": [ ++ { ++ "components": [ ++ { ++ "internalType": "address", ++ "name": "oracle", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "intrinsicGas", ++ "type": "uint256" ++ } ++ ], ++ "internalType": "struct IFeeCurrencyDirectory.CurrencyConfig", ++ "name": "", ++ "type": "tuple" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "getExchangeRate", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "numerator", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "denominator", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getVersionNumber", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "index", ++ "type": "uint256" ++ } ++ ], ++ "name": "removeCurrencies", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "oracle", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "intrinsicGas", ++ "type": "uint256" ++ } ++ ], ++ "name": "setCurrencyConfig", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "OwnershipTransferred", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+201
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/FeeCurrencyWhitelist.json CELO/packages/contracts-bedrock/snapshots/abi/FeeCurrencyWhitelist.json +new file mode 100644 +index 0000000000000000000000000000000000000000..47fa034f515f46dd4709444ea364a52248c9993c +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/FeeCurrencyWhitelist.json +@@ -0,0 +1,202 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "addToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getVersionNumber", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getWhitelist", ++ "outputs": [ ++ { ++ "internalType": "address[]", ++ "name": "", ++ "type": "address[]" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "index", ++ "type": "uint256" ++ } ++ ], ++ "name": "removeToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "name": "whitelist", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "FeeCurrencyWhitelistRemoved", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "FeeCurrencyWhitelisted", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "OwnershipTransferred", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+812
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/FeeHandler.json CELO/packages/contracts-bedrock/snapshots/abi/FeeHandler.json +new file mode 100644 +index 0000000000000000000000000000000000000000..a584a53f686d0c6681b11a0bdbe857415bdc3774 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/FeeHandler.json +@@ -0,0 +1,813 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "stateMutability": "payable", ++ "type": "receive" ++ }, ++ { ++ "inputs": [], ++ "name": "FIXED1_UINT", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "MIN_BURN", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "activateToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "handlerAddress", ++ "type": "address" ++ } ++ ], ++ "name": "addToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "burnCelo", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "burnFraction", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "celoToBeBurned", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "amountToBurn", ++ "type": "uint256" ++ } ++ ], ++ "name": "dailySellLimitHit", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "deactivateToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "distribute", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], + "name": "distributeAll", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "feeBeneficiary", -+ "outputs": [ ++ "inputs": [], ++ "name": "feeBeneficiary", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getActiveTokens", ++ "outputs": [ ++ { ++ "internalType": "address[]", ++ "name": "", ++ "type": "address[]" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "getPastBurnForToken", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "getTokenActive", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "getTokenCurrentDaySellLimit", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "getTokenDailySellLimit", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "getTokenHandler", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "getTokenMaxSlippage", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "getTokenToDistribute", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getVersionNumber", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "handle", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "handleAll", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "_registryAddress", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "newFeeBeneficiary", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "newBurnFraction", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "address[]", ++ "name": "tokens", ++ "type": "address[]" ++ }, ++ { ++ "internalType": "address[]", ++ "name": "handlers", ++ "type": "address[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "newLimits", ++ "type": "uint256[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "newMaxSlippages", ++ "type": "uint256[]" ++ } ++ ], ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "lastLimitDay", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "registry", ++ "outputs": [ ++ { ++ "internalType": "contract ICeloRegistry", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "removeToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "sell", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "uint256", ++ "name": "fraction", ++ "type": "uint256" ++ } ++ ], ++ "name": "setBurnFraction", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "newLimit", ++ "type": "uint256" ++ } ++ ], ++ "name": "setDailySellLimit", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "beneficiary", ++ "type": "address" ++ } ++ ], ++ "name": "setFeeBeneficiary", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "handlerAddress", ++ "type": "address" ++ } ++ ], ++ "name": "setHandler", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "newMax", ++ "type": "uint256" ++ } ++ ], ++ "name": "setMaxSplippage", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "setRegistry", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "recipient", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "transfer", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "fraction", ++ "type": "uint256" ++ } ++ ], ++ "name": "BurnFractionSet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "burning", ++ "type": "uint256" ++ } ++ ], ++ "name": "DailyLimitHit", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "newLimit", ++ "type": "uint256" ++ } ++ ], ++ "name": "DailyLimitSet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" ++ } ++ ], ++ "name": "DailySellLimitUpdated", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "newBeneficiary", ++ "type": "address" ++ } ++ ], ++ "name": "FeeBeneficiarySet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "maxSlippage", ++ "type": "uint256" ++ } ++ ], ++ "name": "MaxSlippageSet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "OwnershipTransferred", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "RegistrySet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "SoldAndBurnedToken", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "handlerAddress", ++ "type": "address" ++ } ++ ], ++ "name": "TokenAdded", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ } ++ ], ++ "name": "TokenRemoved", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+92
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/Freezable.json CELO/packages/contracts-bedrock/snapshots/abi/Freezable.json +new file mode 100644 +index 0000000000000000000000000000000000000000..dc8fa7e0f21ca37add36efa01627337c9521293c +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/Freezable.json +@@ -0,0 +1,93 @@ ++[ ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "registry", ++ "outputs": [ ++ { ++ "internalType": "contract ICeloRegistry", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "setRegistry", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "OwnershipTransferred", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "RegistrySet", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+551
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/GoldToken.json CELO/packages/contracts-bedrock/snapshots/abi/GoldToken.json +new file mode 100644 +index 0000000000000000000000000000000000000000..a52ef10b6a528ba8b47235bebe82741ce20d5640 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/GoldToken.json +@@ -0,0 +1,552 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "owner", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "spender", ++ "type": "address" ++ } ++ ], ++ "name": "allowance", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "spender", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "approve", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "owner", ++ "type": "address" ++ } ++ ], ++ "name": "balanceOf", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "burn", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "circulatingSupply", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "decimals", ++ "outputs": [ ++ { ++ "internalType": "uint8", ++ "name": "", ++ "type": "uint8" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "spender", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "decreaseAllowance", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getBurnedAmount", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "getVersionNumber", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "spender", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "increaseAllowance", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" ++ } ++ ], ++ "name": "increaseSupply", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "mint", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "name", ++ "outputs": [ ++ { ++ "internalType": "string", ++ "name": "", ++ "type": "string" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "registry", ++ "outputs": [ ++ { ++ "internalType": "contract ICeloRegistry", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "setRegistry", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "symbol", ++ "outputs": [ ++ { ++ "internalType": "string", ++ "name": "", ++ "type": "string" ++ } ++ ], ++ "stateMutability": "pure", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "totalSupply", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "transfer", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "from", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "transferFrom", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" ++ } ++ ], ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "string", ++ "name": "comment", ++ "type": "string" ++ } ++ ], ++ "name": "transferWithComment", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "owner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "spender", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "Approval", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ + { ++ "indexed": true, + "internalType": "address", -+ "name": "", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", + "type": "address" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "OwnershipTransferred", ++ "type": "event" + }, + { -+ "inputs": [], -+ "name": "getActiveTokens", -+ "outputs": [ ++ "anonymous": false, ++ "inputs": [ + { -+ "internalType": "address[]", -+ "name": "", -+ "type": "address[]" ++ "indexed": true, ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "RegistrySet", ++ "type": "event" + }, + { ++ "anonymous": false, + "inputs": [ + { ++ "indexed": true, + "internalType": "address", -+ "name": "token", ++ "name": "from", + "type": "address" -+ } -+ ], -+ "name": "getPastBurnForToken", -+ "outputs": [ ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ }, + { ++ "indexed": false, + "internalType": "uint256", -+ "name": "", ++ "name": "value", + "type": "uint256" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "Transfer", ++ "type": "event" + }, + { ++ "anonymous": false, + "inputs": [ + { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" ++ "indexed": false, ++ "internalType": "string", ++ "name": "comment", ++ "type": "string" + } + ], -+ "name": "getTokenActive", ++ "name": "TransferComment", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+25
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/Initializable.json CELO/packages/contracts-bedrock/snapshots/abi/Initializable.json +new file mode 100644 +index 0000000000000000000000000000000000000000..aeef476ab67fdf303022548658b887d36bf6f042 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/Initializable.json +@@ -0,0 +1,26 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "testingDeployment", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", + "outputs": [ + { + "internalType": "bool", @@ -2537,16 +5397,95 @@ + ], + "stateMutability": "view", + "type": "function" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+349
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/MentoFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/abi/MentoFeeHandlerSeller.json +new file mode 100644 +index 0000000000000000000000000000000000000000..7190d528858e5ac8ec5feae77b968b6afaca1d0e +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/MentoFeeHandlerSeller.json +@@ -0,0 +1,350 @@ ++[ ++ { ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "stateMutability": "payable", ++ "type": "receive" + }, + { + "inputs": [ + { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" ++ "internalType": "uint256", ++ "name": "midPriceNumerator", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "midPriceDenominator", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "maxSlippage", ++ "type": "uint256" + } + ], -+ "name": "getTokenCurrentDaySellLimit", ++ "name": "calculateMinAmount", + "outputs": [ + { + "internalType": "uint256", @@ -2554,42 +5493,68 @@ + "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "pure", + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ } -+ ], -+ "name": "getTokenDailySellLimit", ++ "inputs": [], ++ "name": "getVersionNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "_registryAddress", + "type": "address" ++ }, ++ { ++ "internalType": "address[]", ++ "name": "tokenAddresses", ++ "type": "address[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "newMininumReports", ++ "type": "uint256[]" + } + ], -+ "name": "getTokenHandler", ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "initialized", + "outputs": [ + { -+ "internalType": "address", ++ "internalType": "bool", + "name": "", -+ "type": "address" ++ "type": "bool" + } + ], + "stateMutability": "view", @@ -2599,11 +5564,11 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "", + "type": "address" + } + ], -+ "name": "getTokenMaxSlippage", ++ "name": "minimumReports", + "outputs": [ + { + "internalType": "uint256", @@ -2615,19 +5580,26 @@ + "type": "function" + }, + { -+ "inputs": [ ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ + { + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "", + "type": "address" + } + ], -+ "name": "getTokenToDistribute", ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "registry", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "contract ICeloRegistry", + "name": "", -+ "type": "uint256" ++ "type": "address" + } + ], + "stateMutability": "view", @@ -2635,30 +5607,43 @@ + }, + { + "inputs": [], -+ "name": "getVersionNumber", -+ "outputs": [ ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ + { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" ++ "internalType": "address", ++ "name": "sellTokenAddress", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "buyTokenAddress", ++ "type": "address" + }, + { + "internalType": "uint256", -+ "name": "", ++ "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "", ++ "name": "maxSlippage", + "type": "uint256" -+ }, ++ } ++ ], ++ "name": "sell", ++ "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], -+ "stateMutability": "pure", ++ "stateMutability": "nonpayable", + "type": "function" + }, + { @@ -2667,16 +5652,27 @@ + "internalType": "address", + "name": "tokenAddress", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "newMininumReports", ++ "type": "uint256" + } + ], -+ "name": "handle", ++ "name": "setMinimumReports", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "handleAll", ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "setRegistry", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -2685,141 +5681,199 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "_registryAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "newFeeBeneficiary", ++ "name": "token", + "type": "address" + }, + { + "internalType": "uint256", -+ "name": "newBurnFraction", ++ "name": "amount", + "type": "uint256" + }, + { -+ "internalType": "address[]", -+ "name": "tokens", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "address[]", -+ "name": "handlers", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "uint256[]", -+ "name": "newLimits", -+ "type": "uint256[]" -+ }, ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ } ++ ], ++ "name": "transfer", ++ "outputs": [ + { -+ "internalType": "uint256[]", -+ "name": "newMaxSlippages", -+ "type": "uint256[]" ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" + } + ], -+ "name": "initialize", -+ "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "initialized", -+ "outputs": [ ++ "inputs": [ + { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" + } + ], -+ "stateMutability": "view", ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "lastLimitDay", -+ "outputs": [ ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "tokenAddress", ++ "type": "address" ++ }, + { ++ "indexed": false, + "internalType": "uint256", -+ "name": "", ++ "name": "minimumReports", + "type": "uint256" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "MinimumReportsSet", ++ "type": "event" + }, + { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ ++ "anonymous": false, ++ "inputs": [ + { ++ "indexed": true, + "internalType": "address", -+ "name": "", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", + "type": "address" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "OwnershipTransferred", ++ "type": "event" + }, + { -+ "inputs": [], -+ "name": "registry", -+ "outputs": [ ++ "anonymous": false, ++ "inputs": [ + { -+ "internalType": "contract ICeloRegistry", -+ "name": "", ++ "indexed": true, ++ "internalType": "address", ++ "name": "registryAddress", + "type": "address" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "RegistrySet", ++ "type": "event" + }, + { ++ "anonymous": false, + "inputs": [ + { ++ "indexed": false, + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "soldTokenAddress", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "boughtTokenAddress", + "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" + } + ], -+ "name": "removeToken", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, ++ "name": "TokenSold", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+248
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/MockSortedOracles.json CELO/packages/contracts-bedrock/snapshots/abi/MockSortedOracles.json +new file mode 100644 +index 0000000000000000000000000000000000000000..f56f9b579aa578b992b3f8960a77db6c768cbac9 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/MockSortedOracles.json +@@ -0,0 +1,249 @@ ++[ + { + "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "name": "DENOMINATOR", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "", + "type": "address" + } + ], -+ "name": "sell", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ ++ "name": "expired", ++ "outputs": [ + { -+ "internalType": "uint256", -+ "name": "fraction", -+ "type": "uint256" ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" + } + ], -+ "name": "setBurnFraction", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { @@ -2828,47 +5882,46 @@ + "internalType": "address", + "name": "token", + "type": "address" ++ } ++ ], ++ "name": "getExchangeRate", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "numerator", ++ "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "newLimit", ++ "name": "denominator", + "type": "uint256" + } + ], -+ "name": "setDailySellLimit", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "beneficiary", ++ "name": "token", + "type": "address" + } + ], -+ "name": "setFeeBeneficiary", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ ++ "name": "isOldestReportExpired", ++ "outputs": [ + { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" + }, + { + "internalType": "address", -+ "name": "handlerAddress", ++ "name": "", + "type": "address" + } + ], -+ "name": "setHandler", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { @@ -2877,263 +5930,313 @@ + "internalType": "address", + "name": "token", + "type": "address" ++ } ++ ], ++ "name": "medianRate", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "newMax", ++ "name": "", + "type": "uint256" + } + ], -+ "name": "setMaxSplippage", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "registryAddress", ++ "name": "", + "type": "address" + } + ], -+ "name": "setRegistry", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "name": "medianTimestamp", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "recipient", ++ "name": "", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" + } + ], -+ "name": "transfer", ++ "name": "numRates", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "uint256", + "name": "", -+ "type": "bool" ++ "type": "uint256" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "newOwner", ++ "name": "", + "type": "address" + } + ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ ++ "name": "numerators", ++ "outputs": [ + { -+ "indexed": false, + "internalType": "uint256", -+ "name": "fraction", ++ "name": "", + "type": "uint256" + } + ], -+ "name": "BurnFractionSet", -+ "type": "event" ++ "stateMutability": "view", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, + "internalType": "address", + "name": "token", + "type": "address" + }, + { -+ "indexed": false, + "internalType": "uint256", -+ "name": "burning", ++ "name": "numerator", + "type": "uint256" + } + ], -+ "name": "DailyLimitHit", -+ "type": "event" ++ "name": "setMedianRate", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "token", + "type": "address" + }, + { -+ "indexed": false, + "internalType": "uint256", -+ "name": "newLimit", ++ "name": "timestamp", + "type": "uint256" + } + ], -+ "name": "DailyLimitSet", -+ "type": "event" ++ "name": "setMedianTimestamp", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" ++ "internalType": "address", ++ "name": "token", ++ "type": "address" + } + ], -+ "name": "DailySellLimitUpdated", -+ "type": "event" ++ "name": "setMedianTimestampToNow", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, + "internalType": "address", -+ "name": "newBeneficiary", ++ "name": "token", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "rate", ++ "type": "uint256" + } + ], -+ "name": "FeeBeneficiarySet", -+ "type": "event" ++ "name": "setNumRates", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, + "internalType": "address", + "name": "token", + "type": "address" ++ } ++ ], ++ "name": "setOldestReportExpired", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+84
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json CELO/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json +index 3c6f5e9ab34802a95d672b4b5a34ca3f431645f9..8ced7535ac1dea765bb9709a1be28c9e3824547f 100644 +--- OP/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json ++++ CELO/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json +@@ -155,6 +155,90 @@ "stateMutability": "nonpayable", + "type": "function" + }, + { ++ "inputs": [ ++ { ++ "internalType": "address[]", ++ "name": "recipients", ++ "type": "address[]" + }, + { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "maxSlippage", -+ "type": "uint256" ++ "internalType": "uint256[]", ++ "name": "amounts", ++ "type": "uint256[]" + } + ], -+ "name": "MaxSlippageSet", -+ "type": "event" ++ "name": "creditGasFees", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": true, + "internalType": "address", -+ "name": "previousOwner", ++ "name": "from", + "type": "address" + }, + { -+ "indexed": true, + "internalType": "address", -+ "name": "newOwner", ++ "name": "feeRecipient", + "type": "address" -+ } -+ ], -+ "name": "OwnershipTransferred", -+ "type": "event" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ ++ }, + { -+ "indexed": true, + "internalType": "address", -+ "name": "registryAddress", ++ "name": "", + "type": "address" -+ } -+ ], -+ "name": "RegistrySet", -+ "type": "event" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ ++ }, + { -+ "indexed": false, + "internalType": "address", -+ "name": "token", ++ "name": "communityFund", + "type": "address" + }, + { -+ "indexed": false, + "internalType": "uint256", -+ "name": "value", ++ "name": "refund", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "tipTxFee", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "baseTxFee", + "type": "uint256" + } + ], -+ "name": "SoldAndBurnedToken", -+ "type": "event" ++ "name": "creditGasFees", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "from", + "type": "address" + }, + { -+ "indexed": false, -+ "internalType": "address", -+ "name": "handlerAddress", -+ "type": "address" ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" + } + ], -+ "name": "TokenAdded", -+ "type": "event" ++ "name": "debitGasFees", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ } -+ ], -+ "name": "TokenRemoved", -+ "type": "event" -+ } -+] -\ No newline at end of file
+ "inputs": [], + "name": "decimals", + "outputs": [
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/Freezable.json CELO/packages/contracts-bedrock/snapshots/abi/Freezable.json +
diff --git OP/packages/contracts-bedrock/snapshots/abi/SortedOracles.json CELO/packages/contracts-bedrock/snapshots/abi/SortedOracles.json new file mode 100644 -index 0000000000000000000000000000000000000000..dc8fa7e0f21ca37add36efa01627337c9521293c +index 0000000000000000000000000000000000000000..12a253c5c08be2f7b7d727d99d56c5499b5ed43a --- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/Freezable.json -@@ -0,0 +1,93 @@ ++++ CELO/packages/contracts-bedrock/snapshots/abi/SortedOracles.json +@@ -0,0 +1,832 @@ +[ + { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ ++ "inputs": [ ++ { ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "constructor" ++ }, ++ { ++ "inputs": [ + { + "internalType": "address", -+ "name": "", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "oracleAddress", + "type": "address" + } + ], -+ "stateMutability": "view", ++ "name": "addOracle", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], -+ "name": "registry", ++ "name": "breakerBox", + "outputs": [ + { -+ "internalType": "contract ICeloRegistry", ++ "internalType": "contract IBreakerBox", + "name": "", + "type": "address" + } @@ -3199,21 +6318,14 @@ + "type": "function" + }, + { -+ "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { + "inputs": [ + { + "internalType": "address", -+ "name": "registryAddress", ++ "name": "token", + "type": "address" + } + ], -+ "name": "setRegistry", ++ "name": "deleteEquivalentToken", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -3222,126 +6334,107 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "newOwner", ++ "name": "", + "type": "address" + } + ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "name": "equivalentTokens", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", + "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": true, + "internalType": "address", -+ "name": "previousOwner", ++ "name": "token", + "type": "address" -+ }, ++ } ++ ], ++ "name": "getEquivalentToken", ++ "outputs": [ + { -+ "indexed": true, + "internalType": "address", -+ "name": "newOwner", ++ "name": "", + "type": "address" + } + ], -+ "name": "OwnershipTransferred", -+ "type": "event" ++ "stateMutability": "view", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": true, + "internalType": "address", -+ "name": "registryAddress", ++ "name": "token", + "type": "address" + } + ], -+ "name": "RegistrySet", -+ "type": "event" -+ } -+] -\ No newline at end of file
-
- - -
- - -
-
-
- - (new) - -
- -
- - CELO - -
-
-
- -
- -
- -
+551
-
-0
- -
- -
-
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/GoldToken.json CELO/packages/contracts-bedrock/snapshots/abi/GoldToken.json -new file mode 100644 -index 0000000000000000000000000000000000000000..a52ef10b6a528ba8b47235bebe82741ce20d5640 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/GoldToken.json -@@ -0,0 +1,552 @@ -+[ ++ "name": "getExchangeRate", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "numerator", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "denominator", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, + { + "inputs": [ + { -+ "internalType": "bool", -+ "name": "test", -+ "type": "bool" ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "getOracles", ++ "outputs": [ ++ { ++ "internalType": "address[]", ++ "name": "", ++ "type": "address[]" + } + ], -+ "stateMutability": "nonpayable", -+ "type": "constructor" ++ "stateMutability": "view", ++ "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "owner", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "spender", ++ "name": "token", + "type": "address" + } + ], -+ "name": "allowance", ++ "name": "getRates", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "address[]", + "name": "", -+ "type": "uint256" ++ "type": "address[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "", ++ "type": "uint256[]" ++ }, ++ { ++ "internalType": "enum SortedLinkedListWithMedian.MedianRelation[]", ++ "name": "", ++ "type": "uint8[]" + } + ], + "stateMutability": "view", @@ -3351,35 +6444,40 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "spender", ++ "name": "token", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" + } + ], -+ "name": "approve", ++ "name": "getTimestamps", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "address[]", + "name": "", -+ "type": "bool" ++ "type": "address[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "", ++ "type": "uint256[]" ++ }, ++ { ++ "internalType": "enum SortedLinkedListWithMedian.MedianRelation[]", ++ "name": "", ++ "type": "uint8[]" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "owner", ++ "name": "token", + "type": "address" + } + ], -+ "name": "balanceOf", ++ "name": "getTokenReportExpirySeconds", + "outputs": [ + { + "internalType": "uint256", @@ -3391,90 +6489,116 @@ + "type": "function" + }, + { -+ "inputs": [ ++ "inputs": [], ++ "name": "getVersionNumber", ++ "outputs": [ + { + "internalType": "uint256", -+ "name": "value", ++ "name": "", + "type": "uint256" -+ } -+ ], -+ "name": "burn", -+ "outputs": [ ++ }, + { -+ "internalType": "bool", ++ "internalType": "uint256", + "name": "", -+ "type": "bool" ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "pure", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "circulatingSupply", -+ "outputs": [ ++ "inputs": [ + { + "internalType": "uint256", -+ "name": "", ++ "name": "_reportExpirySeconds", + "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], -+ "name": "decimals", ++ "name": "initialized", + "outputs": [ + { -+ "internalType": "uint8", ++ "internalType": "bool", + "name": "", -+ "type": "uint8" ++ "type": "bool" + } + ], -+ "stateMutability": "pure", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "spender", ++ "name": "token", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" + } + ], -+ "name": "decreaseAllowance", ++ "name": "isOldestReportExpired", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" ++ }, ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "getBurnedAmount", ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "name": "isOracle", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "bool", + "name": "", -+ "type": "uint256" ++ "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "getVersionNumber", ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "medianRate", + "outputs": [ + { + "internalType": "uint256", @@ -3485,7 +6609,21 @@ + "internalType": "uint256", + "name": "", + "type": "uint256" -+ }, ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "medianRateWithoutEquivalentMapping", ++ "outputs": [ + { + "internalType": "uint256", + "name": "", @@ -3497,67 +6635,61 @@ + "type": "uint256" + } + ], -+ "stateMutability": "pure", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "spender", ++ "name": "token", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" + } + ], -+ "name": "increaseAllowance", ++ "name": "medianTimestamp", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "uint256", + "name": "", -+ "type": "bool" ++ "type": "uint256" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ } ++ ], ++ "name": "numRates", ++ "outputs": [ ++ { + "internalType": "uint256", -+ "name": "amount", ++ "name": "", + "type": "uint256" + } + ], -+ "name": "increaseSupply", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "registryAddress", ++ "name": "token", + "type": "address" + } + ], -+ "name": "initialize", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "initialized", ++ "name": "numTimestamps", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "uint256", + "name": "", -+ "type": "bool" ++ "type": "uint256" + } + ], + "stateMutability": "view", @@ -3567,63 +6699,78 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "to", ++ "name": "", + "type": "address" + }, + { + "internalType": "uint256", -+ "name": "value", ++ "name": "", + "type": "uint256" + } + ], -+ "name": "mint", ++ "name": "oracles", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "address", + "name": "", -+ "type": "bool" ++ "type": "address" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], -+ "name": "name", ++ "name": "owner", + "outputs": [ + { -+ "internalType": "string", ++ "internalType": "address", + "name": "", -+ "type": "string" ++ "type": "address" + } + ], -+ "stateMutability": "pure", ++ "stateMutability": "view", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ ++ "inputs": [ + { + "internalType": "address", -+ "name": "", ++ "name": "token", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "n", ++ "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "name": "removeExpiredReports", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "registry", -+ "outputs": [ ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, + { -+ "internalType": "contract ICeloRegistry", -+ "name": "", ++ "internalType": "address", ++ "name": "oracleAddress", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "index", ++ "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "name": "removeOracle", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { @@ -3637,62 +6784,84 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "registryAddress", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "address", ++ "name": "lesserKey", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "greaterKey", + "type": "address" + } + ], -+ "name": "setRegistry", ++ "name": "report", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], -+ "name": "symbol", ++ "name": "reportExpirySeconds", + "outputs": [ + { -+ "internalType": "string", ++ "internalType": "uint256", + "name": "", -+ "type": "string" ++ "type": "uint256" + } + ], -+ "stateMutability": "pure", ++ "stateMutability": "view", + "type": "function" + }, + { -+ "inputs": [], -+ "name": "totalSupply", -+ "outputs": [ ++ "inputs": [ + { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" ++ "internalType": "contract IBreakerBox", ++ "name": "newBreakerBox", ++ "type": "address" + } + ], -+ "stateMutability": "view", ++ "name": "setBreakerBox", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "to", ++ "name": "token", + "type": "address" + }, + { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" ++ "internalType": "address", ++ "name": "equivalentToken", ++ "type": "address" + } + ], -+ "name": "transfer", -+ "outputs": [ ++ "name": "setEquivalentToken", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ + { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" ++ "internalType": "uint256", ++ "name": "_reportExpirySeconds", ++ "type": "uint256" + } + ], ++ "name": "setReportExpiry", ++ "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, @@ -3700,29 +6869,37 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "to", ++ "name": "_token", + "type": "address" + }, + { + "internalType": "uint256", -+ "name": "value", ++ "name": "_reportExpirySeconds", + "type": "uint256" + } + ], -+ "name": "transferFrom", ++ "name": "setTokenReportExpiry", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "name": "tokenReportExpirySeconds", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "uint256", + "name": "", -+ "type": "bool" ++ "type": "uint256" + } + ], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { @@ -3739,33 +6916,17 @@ + "type": "function" + }, + { ++ "anonymous": false, + "inputs": [ + { ++ "indexed": true, + "internalType": "address", -+ "name": "to", ++ "name": "newBreakerBox", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "string", -+ "name": "comment", -+ "type": "string" -+ } -+ ], -+ "name": "transferWithComment", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" + } + ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "name": "BreakerBoxUpdated", ++ "type": "event" + }, + { + "anonymous": false, @@ -3773,13 +6934,26 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "owner", ++ "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", -+ "name": "spender", ++ "name": "equivalentToken", ++ "type": "address" ++ } ++ ], ++ "name": "EquivalentTokenSet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "token", + "type": "address" + }, + { @@ -3789,7 +6963,7 @@ + "type": "uint256" + } + ], -+ "name": "Approval", ++ "name": "MedianUpdated", + "type": "event" + }, + { @@ -3798,17 +6972,17 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "previousOwner", ++ "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", -+ "name": "newOwner", ++ "name": "oracleAddress", + "type": "address" + } + ], -+ "name": "OwnershipTransferred", ++ "name": "OracleAdded", + "type": "event" + }, + { @@ -3817,11 +6991,17 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "registryAddress", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "oracleAddress", + "type": "address" + } + ], -+ "name": "RegistrySet", ++ "name": "OracleRemoved", + "type": "event" + }, + { @@ -3830,110 +7010,100 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "from", ++ "name": "token", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", -+ "name": "to", ++ "name": "oracle", + "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" + } + ], -+ "name": "Transfer", ++ "name": "OracleReportRemoved", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { ++ "indexed": true, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "oracle", ++ "type": "address" ++ }, ++ { + "indexed": false, -+ "internalType": "string", -+ "name": "comment", -+ "type": "string" -+ } -+ ], -+ "name": "TransferComment", -+ "type": "event" -+ } -+] -\ No newline at end of file
-
- - -
- - -
-
-
- - (new) - -
- -
- - CELO - -
-
-
- -
- -
- -
+25
-
-0
- -
- -
-
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/Initializable.json CELO/packages/contracts-bedrock/snapshots/abi/Initializable.json -new file mode 100644 -index 0000000000000000000000000000000000000000..aeef476ab67fdf303022548658b887d36bf6f042 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/Initializable.json -@@ -0,0 +1,26 @@ -+[ ++ "internalType": "uint256", ++ "name": "timestamp", ++ "type": "uint256" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ } ++ ], ++ "name": "OracleReported", ++ "type": "event" ++ }, + { ++ "anonymous": false, + "inputs": [ + { -+ "internalType": "bool", -+ "name": "testingDeployment", -+ "type": "bool" ++ "indexed": true, ++ "internalType": "address", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", ++ "type": "address" + } + ], -+ "stateMutability": "nonpayable", -+ "type": "constructor" ++ "name": "OwnershipTransferred", ++ "type": "event" + }, + { -+ "inputs": [], -+ "name": "initialized", -+ "outputs": [ ++ "anonymous": false, ++ "inputs": [ + { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "reportExpiry", ++ "type": "uint256" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "ReportExpirySet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "reportExpiry", ++ "type": "uint256" ++ } ++ ], ++ "name": "TokenReportExpirySet", ++ "type": "event" + } +] \ No newline at end of file
@@ -3943,9 +7113,9 @@
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/MentoFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/abi/MentoFeeHandlerSeller.json +
diff --git OP/packages/contracts-bedrock/snapshots/abi/StableTokenV2.json CELO/packages/contracts-bedrock/snapshots/abi/StableTokenV2.json new file mode 100644 -index 0000000000000000000000000000000000000000..7190d528858e5ac8ec5feae77b968b6afaca1d0e +index 0000000000000000000000000000000000000000..693b960cea99c0c7b6d9ab45341f3f56eda21853 --- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/MentoFeeHandlerSeller.json -@@ -0,0 +1,350 @@ ++++ CELO/packages/contracts-bedrock/snapshots/abi/StableTokenV2.json +@@ -0,0 +1,742 @@ +[ + { + "inputs": [ + { + "internalType": "bool", -+ "name": "test", ++ "name": "disable", + "type": "bool" + } + ], @@ -3996,116 +7166,32 @@ + "type": "constructor" + }, + { -+ "stateMutability": "payable", -+ "type": "receive" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "uint256", -+ "name": "midPriceNumerator", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "midPriceDenominator", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "maxSlippage", -+ "type": "uint256" -+ } -+ ], -+ "name": "calculateMinAmount", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "pure", -+ "type": "function" -+ }, -+ { + "inputs": [], -+ "name": "getVersionNumber", ++ "name": "DOMAIN_SEPARATOR", + "outputs": [ + { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", ++ "internalType": "bytes32", + "name": "", -+ "type": "uint256" ++ "type": "bytes32" + } + ], -+ "stateMutability": "pure", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "_registryAddress", ++ "name": "owner", + "type": "address" + }, + { -+ "internalType": "address[]", -+ "name": "tokenAddresses", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "uint256[]", -+ "name": "newMininumReports", -+ "type": "uint256[]" -+ } -+ ], -+ "name": "initialize", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "initialized", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { + "internalType": "address", -+ "name": "", ++ "name": "spender", + "type": "address" + } + ], -+ "name": "minimumReports", ++ "name": "allowance", + "outputs": [ + { + "internalType": "uint256", @@ -4117,67 +7203,24 @@ + "type": "function" + }, + { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "registry", -+ "outputs": [ -+ { -+ "internalType": "contract ICeloRegistry", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { + "inputs": [ + { + "internalType": "address", -+ "name": "sellTokenAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "buyTokenAddress", ++ "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "maxSlippage", -+ "type": "uint256" + } + ], -+ "name": "sell", ++ "name": "approve", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "bool", + "name": "", -+ "type": "uint256" ++ "type": "bool" + } + ], + "stateMutability": "nonpayable", @@ -4187,52 +7230,43 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "account", + "type": "address" -+ }, ++ } ++ ], ++ "name": "balanceOf", ++ "outputs": [ + { + "internalType": "uint256", -+ "name": "newMininumReports", ++ "name": "", + "type": "uint256" + } + ], -+ "name": "setMinimumReports", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { -+ "inputs": [ ++ "inputs": [], ++ "name": "broker", ++ "outputs": [ + { + "internalType": "address", -+ "name": "registryAddress", ++ "name": "", + "type": "address" + } + ], -+ "name": "setRegistry", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { + "internalType": "uint256", -+ "name": "amount", ++ "name": "value", + "type": "uint256" -+ }, -+ { -+ "internalType": "address", -+ "name": "to", -+ "type": "address" + } + ], -+ "name": "transfer", ++ "name": "burn", + "outputs": [ + { + "internalType": "bool", @@ -4247,148 +7281,113 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "newOwner", ++ "name": "from", + "type": "address" -+ } -+ ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ ++ }, + { -+ "indexed": false, + "internalType": "address", -+ "name": "tokenAddress", ++ "name": "feeRecipient", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "gatewayFeeRecipient", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "communityFund", + "type": "address" + }, + { -+ "indexed": false, + "internalType": "uint256", -+ "name": "minimumReports", ++ "name": "refund", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "tipTxFee", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "gatewayFee", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "baseTxFee", + "type": "uint256" + } + ], -+ "name": "MinimumReportsSet", -+ "type": "event" ++ "name": "creditGasFees", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": true, + "internalType": "address", -+ "name": "previousOwner", ++ "name": "from", + "type": "address" + }, + { -+ "indexed": true, -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" + } + ], -+ "name": "OwnershipTransferred", -+ "type": "event" ++ "name": "debitGasFees", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { -+ "anonymous": false, -+ "inputs": [ ++ "inputs": [], ++ "name": "decimals", ++ "outputs": [ + { -+ "indexed": true, -+ "internalType": "address", -+ "name": "registryAddress", -+ "type": "address" ++ "internalType": "uint8", ++ "name": "", ++ "type": "uint8" + } + ], -+ "name": "RegistrySet", -+ "type": "event" ++ "stateMutability": "view", ++ "type": "function" + }, + { -+ "anonymous": false, + "inputs": [ + { -+ "indexed": false, -+ "internalType": "address", -+ "name": "soldTokenAddress", -+ "type": "address" -+ }, -+ { -+ "indexed": false, + "internalType": "address", -+ "name": "boughtTokenAddress", ++ "name": "spender", + "type": "address" + }, + { -+ "indexed": false, + "internalType": "uint256", -+ "name": "amount", ++ "name": "subtractedValue", + "type": "uint256" + } + ], -+ "name": "TokenSold", -+ "type": "event" -+ } -+] -\ No newline at end of file
-
- - -
- - -
-
-
- - (new) - -
- -
- - CELO - -
-
-
- -
- -
- -
+248
-
-0
- -
- -
-
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/MockSortedOracles.json CELO/packages/contracts-bedrock/snapshots/abi/MockSortedOracles.json -new file mode 100644 -index 0000000000000000000000000000000000000000..f56f9b579aa578b992b3f8960a77db6c768cbac9 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/MockSortedOracles.json -@@ -0,0 +1,249 @@ -+[ ++ "name": "decreaseAllowance", ++ "outputs": [ ++ { ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" ++ } ++ ], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, + { + "inputs": [], -+ "name": "DENOMINATOR", ++ "name": "exchange", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "address", + "name": "", -+ "type": "uint256" ++ "type": "address" + } + ], + "stateMutability": "view", @@ -4398,11 +7397,16 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "", ++ "name": "spender", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "addedValue", ++ "type": "uint256" + } + ], -+ "name": "expired", ++ "name": "increaseAllowance", + "outputs": [ + { + "internalType": "bool", @@ -4410,95 +7414,92 @@ + "type": "bool" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "getExchangeRate", -+ "outputs": [ ++ "internalType": "string", ++ "name": "_name", ++ "type": "string" ++ }, + { -+ "internalType": "uint256", -+ "name": "numerator", -+ "type": "uint256" ++ "internalType": "string", ++ "name": "_symbol", ++ "type": "string" + }, + { -+ "internalType": "uint256", -+ "name": "denominator", -+ "type": "uint256" ++ "internalType": "address[]", ++ "name": "initialBalanceAddresses", ++ "type": "address[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "initialBalanceValues", ++ "type": "uint256[]" + } + ], -+ "stateMutability": "view", ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "_broker", + "type": "address" -+ } -+ ], -+ "name": "isOldestReportExpired", -+ "outputs": [ ++ }, + { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" ++ "internalType": "address", ++ "name": "_validators", ++ "type": "address" + }, + { + "internalType": "address", -+ "name": "", ++ "name": "_exchange", + "type": "address" + } + ], -+ "stateMutability": "view", ++ "name": "initializeV2", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "to", + "type": "address" -+ } -+ ], -+ "name": "medianRate", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "", ++ "name": "value", + "type": "uint256" + } + ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ ++ "name": "mint", ++ "outputs": [ + { -+ "internalType": "address", ++ "internalType": "bool", + "name": "", -+ "type": "address" ++ "type": "bool" + } + ], -+ "name": "medianTimestamp", ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "name", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "string", + "name": "", -+ "type": "uint256" ++ "type": "string" + } + ], + "stateMutability": "view", @@ -4508,11 +7509,11 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "", ++ "name": "owner", + "type": "address" + } + ], -+ "name": "numRates", ++ "name": "nonces", + "outputs": [ + { + "internalType": "uint256", @@ -4524,21 +7525,15 @@ + "type": "function" + }, + { -+ "inputs": [ ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], -+ "name": "numerators", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], + "stateMutability": "view", + "type": "function" + }, @@ -4546,53 +7541,48 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "owner", + "type": "address" + }, + { -+ "internalType": "uint256", -+ "name": "numerator", -+ "type": "uint256" -+ } -+ ], -+ "name": "setMedianRate", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { + "internalType": "address", -+ "name": "token", ++ "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", -+ "name": "timestamp", ++ "name": "value", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "deadline", + "type": "uint256" ++ }, ++ { ++ "internalType": "uint8", ++ "name": "v", ++ "type": "uint8" ++ }, ++ { ++ "internalType": "bytes32", ++ "name": "r", ++ "type": "bytes32" ++ }, ++ { ++ "internalType": "bytes32", ++ "name": "s", ++ "type": "bytes32" + } + ], -+ "name": "setMedianTimestamp", ++ "name": "permit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "setMedianTimestampToNow", ++ "inputs": [], ++ "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -4601,16 +7591,11 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "_broker", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "rate", -+ "type": "uint256" + } + ], -+ "name": "setNumRates", ++ "name": "setBroker", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -4619,78 +7604,11 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "_exchange", + "type": "address" + } + ], -+ "name": "setOldestReportExpired", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ } -+] -\ No newline at end of file
-
- - -
- - -
-
-
- - OP - -
- -
- - CELO - -
-
-
- -
- -
- -
+84
-
-0
- -
- -
-
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json CELO/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json -index 3c6f5e9ab34802a95d672b4b5a34ca3f431645f9..8ced7535ac1dea765bb9709a1be28c9e3824547f 100644 ---- OP/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json -+++ CELO/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC20.json -@@ -155,6 +155,90 @@ "stateMutability": "nonpayable", - "type": "function" - }, - { -+ "inputs": [ -+ { -+ "internalType": "address[]", -+ "name": "recipients", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "uint256[]", -+ "name": "amounts", -+ "type": "uint256[]" -+ } -+ ], -+ "name": "creditGasFees", ++ "name": "setExchange", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -4699,170 +7617,103 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "feeRecipient", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "communityFund", ++ "name": "_validators", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "refund", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "tipTxFee", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "baseTxFee", -+ "type": "uint256" + } + ], -+ "name": "creditGasFees", ++ "name": "setValidators", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, ++ "inputs": [], ++ "name": "symbol", ++ "outputs": [ + { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" ++ "internalType": "string", ++ "name": "", ++ "type": "string" + } + ], -+ "name": "debitGasFees", -+ "outputs": [], -+ "stateMutability": "nonpayable", ++ "stateMutability": "view", + "type": "function" + }, + { - "inputs": [], - "name": "decimals", - "outputs": [
-
- - -
- - -
-
-
- - (new) - -
- -
- - CELO - -
-
-
- -
- -
- -
+831
-
-0
- -
- -
-
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/SortedOracles.json CELO/packages/contracts-bedrock/snapshots/abi/SortedOracles.json -new file mode 100644 -index 0000000000000000000000000000000000000000..12a253c5c08be2f7b7d727d99d56c5499b5ed43a ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/SortedOracles.json -@@ -0,0 +1,832 @@ -+[ ++ "inputs": [], ++ "name": "totalSupply", ++ "outputs": [ ++ { ++ "internalType": "uint256", ++ "name": "", ++ "type": "uint256" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, + { + "inputs": [ + { ++ "internalType": "address", ++ "name": "to", ++ "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" ++ } ++ ], ++ "name": "transfer", ++ "outputs": [ ++ { + "internalType": "bool", -+ "name": "test", ++ "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", -+ "type": "constructor" ++ "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "from", + "type": "address" + }, + { + "internalType": "address", -+ "name": "oracleAddress", ++ "name": "to", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" + } + ], -+ "name": "addOracle", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "breakerBox", ++ "name": "transferFrom", + "outputs": [ + { -+ "internalType": "contract IBreakerBox", ++ "internalType": "bool", + "name": "", -+ "type": "address" ++ "type": "bool" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "newOwner", + "type": "address" + } + ], -+ "name": "deleteEquivalentToken", ++ "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -4871,30 +7722,34 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "", ++ "name": "to", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "string", ++ "name": "comment", ++ "type": "string" + } + ], -+ "name": "equivalentTokens", ++ "name": "transferWithComment", + "outputs": [ + { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" ++ "internalType": "bool", ++ "name": "", ++ "type": "bool" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "getEquivalentToken", ++ "inputs": [], ++ "name": "validators", + "outputs": [ + { + "internalType": "address", @@ -4906,177 +7761,235 @@ + "type": "function" + }, + { ++ "anonymous": false, + "inputs": [ + { ++ "indexed": true, + "internalType": "address", -+ "name": "token", ++ "name": "owner", + "type": "address" -+ } -+ ], -+ "name": "getExchangeRate", -+ "outputs": [ ++ }, + { -+ "internalType": "uint256", -+ "name": "numerator", -+ "type": "uint256" ++ "indexed": true, ++ "internalType": "address", ++ "name": "spender", ++ "type": "address" + }, + { ++ "indexed": false, + "internalType": "uint256", -+ "name": "denominator", ++ "name": "value", + "type": "uint256" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "Approval", ++ "type": "event" + }, + { ++ "anonymous": false, + "inputs": [ + { ++ "indexed": false, + "internalType": "address", -+ "name": "token", ++ "name": "broker", + "type": "address" + } + ], -+ "name": "getOracles", -+ "outputs": [ ++ "name": "BrokerUpdated", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ + { -+ "internalType": "address[]", -+ "name": "", -+ "type": "address[]" ++ "indexed": false, ++ "internalType": "address", ++ "name": "exchange", ++ "type": "address" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "ExchangeUpdated", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "uint8", ++ "name": "version", ++ "type": "uint8" ++ } ++ ], ++ "name": "Initialized", ++ "type": "event" + }, + { ++ "anonymous": false, + "inputs": [ + { ++ "indexed": true, + "internalType": "address", -+ "name": "token", ++ "name": "previousOwner", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "newOwner", + "type": "address" + } + ], -+ "name": "getRates", -+ "outputs": [ ++ "name": "OwnershipTransferred", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ + { -+ "internalType": "address[]", -+ "name": "", -+ "type": "address[]" ++ "indexed": true, ++ "internalType": "address", ++ "name": "from", ++ "type": "address" + }, + { -+ "internalType": "uint256[]", -+ "name": "", -+ "type": "uint256[]" ++ "indexed": true, ++ "internalType": "address", ++ "name": "to", ++ "type": "address" + }, + { -+ "internalType": "enum SortedLinkedListWithMedian.MedianRelation[]", -+ "name": "", -+ "type": "uint8[]" ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "value", ++ "type": "uint256" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "name": "Transfer", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "string", ++ "name": "comment", ++ "type": "string" ++ } ++ ], ++ "name": "TransferComment", ++ "type": "event" + }, + { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "validators", ++ "type": "address" ++ } ++ ], ++ "name": "ValidatorsUpdated", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+480
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/UniswapFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/abi/UniswapFeeHandlerSeller.json +new file mode 100644 +index 0000000000000000000000000000000000000000..19c31c979af28ecd9ffcfcda9095f993e30b3ed1 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/UniswapFeeHandlerSeller.json +@@ -0,0 +1,481 @@ ++[ ++ { + "inputs": [ + { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "getTimestamps", -+ "outputs": [ -+ { -+ "internalType": "address[]", -+ "name": "", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "uint256[]", -+ "name": "", -+ "type": "uint256[]" -+ }, -+ { -+ "internalType": "enum SortedLinkedListWithMedian.MedianRelation[]", -+ "name": "", -+ "type": "uint8[]" ++ "internalType": "bool", ++ "name": "test", ++ "type": "bool" + } + ], -+ "stateMutability": "view", -+ "type": "function" ++ "stateMutability": "nonpayable", ++ "type": "constructor" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "getTokenReportExpirySeconds", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "stateMutability": "payable", ++ "type": "receive" + }, + { -+ "inputs": [], -+ "name": "getVersionNumber", -+ "outputs": [ ++ "inputs": [ + { + "internalType": "uint256", -+ "name": "", ++ "name": "midPriceNumerator", + "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "", ++ "name": "midPriceDenominator", + "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "", ++ "name": "amount", + "type": "uint256" + }, + { + "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "pure", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "uint256", -+ "name": "_reportExpirySeconds", ++ "name": "maxSlippage", + "type": "uint256" + } + ], -+ "name": "initialize", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "initialized", ++ "name": "calculateMinAmount", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "uint256", + "name": "", -+ "type": "bool" ++ "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "pure", + "type": "function" + }, + { @@ -5087,55 +8000,20 @@ + "type": "address" + } + ], -+ "name": "isOldestReportExpired", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ }, -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "name": "isOracle", ++ "name": "getRoutersForToken", + "outputs": [ + { -+ "internalType": "bool", ++ "internalType": "address[]", + "name": "", -+ "type": "bool" ++ "type": "address[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "medianRate", ++ "inputs": [], ++ "name": "getVersionNumber", + "outputs": [ + { + "internalType": "uint256", @@ -5146,21 +8024,7 @@ + "internalType": "uint256", + "name": "", + "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "medianRateWithoutEquivalentMapping", -+ "outputs": [ ++ }, + { + "internalType": "uint256", + "name": "", @@ -5172,42 +8036,40 @@ + "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "_registryAddress", + "type": "address" -+ } -+ ], -+ "name": "medianTimestamp", -+ "outputs": [ ++ }, + { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" ++ "internalType": "address[]", ++ "name": "tokenAddresses", ++ "type": "address[]" ++ }, ++ { ++ "internalType": "uint256[]", ++ "name": "newMininumReports", ++ "type": "uint256[]" + } + ], -+ "stateMutability": "view", ++ "name": "initialize", ++ "outputs": [], ++ "stateMutability": "nonpayable", + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "numRates", ++ "inputs": [], ++ "name": "initialized", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "bool", + "name": "", -+ "type": "uint256" ++ "type": "bool" + } + ], + "stateMutability": "view", @@ -5217,11 +8079,11 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "", + "type": "address" + } + ], -+ "name": "numTimestamps", ++ "name": "minimumReports", + "outputs": [ + { + "internalType": "uint256", @@ -5233,19 +8095,8 @@ + "type": "function" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "name": "oracles", ++ "inputs": [], ++ "name": "owner", + "outputs": [ + { + "internalType": "address", @@ -5258,10 +8109,10 @@ + }, + { + "inputs": [], -+ "name": "owner", ++ "name": "registry", + "outputs": [ + { -+ "internalType": "address", ++ "internalType": "contract ICeloRegistry", + "name": "", + "type": "address" + } @@ -5277,35 +8128,12 @@ + "type": "address" + }, + { -+ "internalType": "uint256", -+ "name": "n", -+ "type": "uint256" -+ } -+ ], -+ "name": "removeExpiredReports", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { + "internalType": "address", -+ "name": "oracleAddress", ++ "name": "router", + "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "index", -+ "type": "uint256" + } + ], -+ "name": "removeOracle", ++ "name": "removeRouter", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -5321,33 +8149,26 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "token", ++ "name": "sellTokenAddress", + "type": "address" + }, + { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ }, -+ { + "internalType": "address", -+ "name": "lesserKey", ++ "name": "buyTokenAddress", + "type": "address" + }, + { -+ "internalType": "address", -+ "name": "greaterKey", -+ "type": "address" ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "maxSlippage", ++ "type": "uint256" + } + ], -+ "name": "report", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "reportExpirySeconds", ++ "name": "sell", + "outputs": [ + { + "internalType": "uint256", @@ -5355,18 +8176,23 @@ + "type": "uint256" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { -+ "internalType": "contract IBreakerBox", -+ "name": "newBreakerBox", ++ "internalType": "address", ++ "name": "tokenAddress", + "type": "address" ++ }, ++ { ++ "internalType": "uint256", ++ "name": "newMininumReports", ++ "type": "uint256" + } + ], -+ "name": "setBreakerBox", ++ "name": "setMinimumReports", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -5375,16 +8201,11 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "equivalentToken", ++ "name": "registryAddress", + "type": "address" + } + ], -+ "name": "setEquivalentToken", ++ "name": "setRegistry", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -5392,12 +8213,17 @@ + { + "inputs": [ + { -+ "internalType": "uint256", -+ "name": "_reportExpirySeconds", -+ "type": "uint256" ++ "internalType": "address", ++ "name": "token", ++ "type": "address" ++ }, ++ { ++ "internalType": "address", ++ "name": "router", ++ "type": "address" + } + ], -+ "name": "setReportExpiry", ++ "name": "setRouter", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" @@ -5406,37 +8232,29 @@ + "inputs": [ + { + "internalType": "address", -+ "name": "_token", ++ "name": "token", + "type": "address" + }, + { + "internalType": "uint256", -+ "name": "_reportExpirySeconds", ++ "name": "amount", + "type": "uint256" -+ } -+ ], -+ "name": "setTokenReportExpiry", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ ++ }, + { + "internalType": "address", -+ "name": "", ++ "name": "to", + "type": "address" + } + ], -+ "name": "tokenReportExpirySeconds", ++ "name": "transfer", + "outputs": [ + { -+ "internalType": "uint256", ++ "internalType": "bool", + "name": "", -+ "type": "uint256" ++ "type": "bool" + } + ], -+ "stateMutability": "view", ++ "stateMutability": "nonpayable", + "type": "function" + }, + { @@ -5456,13 +8274,19 @@ + "anonymous": false, + "inputs": [ + { -+ "indexed": true, ++ "indexed": false, + "internalType": "address", -+ "name": "newBreakerBox", ++ "name": "tokenAddress", + "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "minimumReports", ++ "type": "uint256" + } + ], -+ "name": "BreakerBoxUpdated", ++ "name": "MinimumReportsSet", + "type": "event" + }, + { @@ -5471,17 +8295,17 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "token", ++ "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", -+ "name": "equivalentToken", ++ "name": "newOwner", + "type": "address" + } + ], -+ "name": "EquivalentTokenSet", ++ "name": "OwnershipTransferred", + "type": "event" + }, + { @@ -5490,17 +8314,23 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "token", ++ "name": "tokneAddress", ++ "type": "address" ++ }, ++ { ++ "indexed": true, ++ "internalType": "address", ++ "name": "router", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", -+ "name": "value", ++ "name": "quote", + "type": "uint256" + } + ], -+ "name": "MedianUpdated", ++ "name": "ReceivedQuote", + "type": "event" + }, + { @@ -5509,56 +8339,196 @@ + { + "indexed": true, + "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "RegistrySet", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", + "name": "token", + "type": "address" + }, + { -+ "indexed": true, ++ "indexed": false, + "internalType": "address", -+ "name": "oracleAddress", ++ "name": "router", + "type": "address" + } + ], -+ "name": "OracleAdded", ++ "name": "RouterAddressRemoved", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { -+ "indexed": true, ++ "indexed": false, + "internalType": "address", + "name": "token", + "type": "address" + }, + { -+ "indexed": true, ++ "indexed": false, + "internalType": "address", -+ "name": "oracleAddress", ++ "name": "router", + "type": "address" + } + ], -+ "name": "OracleRemoved", ++ "name": "RouterAddressSet", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { -+ "indexed": true, ++ "indexed": false, + "internalType": "address", -+ "name": "token", ++ "name": "router", ++ "type": "address" ++ } ++ ], ++ "name": "RouterUsed", ++ "type": "event" ++ }, ++ { ++ "anonymous": false, ++ "inputs": [ ++ { ++ "indexed": false, ++ "internalType": "address", ++ "name": "soldTokenAddress", + "type": "address" + }, + { -+ "indexed": true, ++ "indexed": false, + "internalType": "address", -+ "name": "oracle", ++ "name": "boughtTokenAddress", ++ "type": "address" ++ }, ++ { ++ "indexed": false, ++ "internalType": "uint256", ++ "name": "amount", ++ "type": "uint256" ++ } ++ ], ++ "name": "TokenSold", ++ "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+92
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/abi/UsingRegistry.json CELO/packages/contracts-bedrock/snapshots/abi/UsingRegistry.json +new file mode 100644 +index 0000000000000000000000000000000000000000..dc8fa7e0f21ca37add36efa01627337c9521293c +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/abi/UsingRegistry.json +@@ -0,0 +1,93 @@ ++[ ++ { ++ "inputs": [], ++ "name": "owner", ++ "outputs": [ ++ { ++ "internalType": "address", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "registry", ++ "outputs": [ ++ { ++ "internalType": "contract ICeloRegistry", ++ "name": "", ++ "type": "address" ++ } ++ ], ++ "stateMutability": "view", ++ "type": "function" ++ }, ++ { ++ "inputs": [], ++ "name": "renounceOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "registryAddress", ++ "type": "address" ++ } ++ ], ++ "name": "setRegistry", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" ++ }, ++ { ++ "inputs": [ ++ { ++ "internalType": "address", ++ "name": "newOwner", + "type": "address" + } + ], -+ "name": "OracleReportRemoved", -+ "type": "event" ++ "name": "transferOwnership", ++ "outputs": [], ++ "stateMutability": "nonpayable", ++ "type": "function" + }, + { + "anonymous": false, @@ -5566,29 +8536,17 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "token", ++ "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", -+ "name": "oracle", ++ "name": "newOwner", + "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "timestamp", -+ "type": "uint256" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" + } + ], -+ "name": "OracleReported", ++ "name": "OwnershipTransferred", + "type": "event" + }, + { @@ -5597,50 +8555,277 @@ + { + "indexed": true, + "internalType": "address", -+ "name": "previousOwner", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "newOwner", ++ "name": "registryAddress", + "type": "address" + } + ], -+ "name": "OwnershipTransferred", ++ "name": "RegistrySet", + "type": "event" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/CalledByVm.json CELO/packages/contracts-bedrock/snapshots/storageLayout/CalledByVm.json +new file mode 100644 +index 0000000000000000000000000000000000000000..0637a088a01e8ddab3bf3fa98dbe804cbde1a0dc +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/CalledByVm.json +@@ -0,0 +1 @@ ++[] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+22
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/CeloRegistry.json CELO/packages/contracts-bedrock/snapshots/storageLayout/CeloRegistry.json +new file mode 100644 +index 0000000000000000000000000000000000000000..17b0df2bd7f9e8254e7ac4730d34917b70b3063b +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/CeloRegistry.json +@@ -0,0 +1,23 @@ ++[ ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" ++ }, ++ { ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 20, ++ "slot": "0", ++ "type": "bool" ++ }, ++ { ++ "bytes": "32", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "mapping(bytes32 => address)" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+29
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyDirectory.json CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyDirectory.json +new file mode 100644 +index 0000000000000000000000000000000000000000..61ccdc5fb15116df778992284198adbb9aeaa26b +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyDirectory.json +@@ -0,0 +1,30 @@ ++[ ++ { ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 0, ++ "slot": "0", ++ "type": "bool" ++ }, ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 1, ++ "slot": "0", ++ "type": "address" ++ }, ++ { ++ "bytes": "32", ++ "label": "currencies", ++ "offset": 0, ++ "slot": "1", ++ "type": "mapping(address => struct IFeeCurrencyDirectory.CurrencyConfig)" ++ }, ++ { ++ "bytes": "32", ++ "label": "currencyList", ++ "offset": 0, ++ "slot": "2", ++ "type": "address[]" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+22
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyWhitelist.json CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyWhitelist.json +new file mode 100644 +index 0000000000000000000000000000000000000000..fed27094a71bd0794f119b2f272d22847d0e6b2d +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyWhitelist.json +@@ -0,0 +1,23 @@ ++[ ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "reportExpiry", -+ "type": "uint256" -+ } -+ ], -+ "name": "ReportExpirySet", -+ "type": "event" ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 20, ++ "slot": "0", ++ "type": "bool" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "reportExpiry", -+ "type": "uint256" -+ } -+ ], -+ "name": "TokenReportExpirySet", -+ "type": "event" ++ "bytes": "32", ++ "label": "whitelist", ++ "offset": 0, ++ "slot": "1", ++ "type": "address[]" + } +] \ No newline at end of file
@@ -5650,9 +8835,9 @@
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/StableTokenV2.json CELO/packages/contracts-bedrock/snapshots/abi/StableTokenV2.json +
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/FeeHandler.json CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeHandler.json new file mode 100644 -index 0000000000000000000000000000000000000000..693b960cea99c0c7b6d9ab45341f3f56eda21853 +index 0000000000000000000000000000000000000000..468bb7dc389218cc2a62ad57d94c340b31fa5a30 --- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/StableTokenV2.json -@@ -0,0 +1,742 @@ ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeHandler.json +@@ -0,0 +1,72 @@ +[ + { -+ "inputs": [ -+ { -+ "internalType": "bool", -+ "name": "disable", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "constructor" -+ }, -+ { -+ "inputs": [], -+ "name": "DOMAIN_SEPARATOR", -+ "outputs": [ -+ { -+ "internalType": "bytes32", -+ "name": "", -+ "type": "bytes32" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "owner", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "spender", -+ "type": "address" -+ } -+ ], -+ "name": "allowance", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "spender", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ } -+ ], -+ "name": "approve", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "account", -+ "type": "address" -+ } -+ ], -+ "name": "balanceOf", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "broker", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ } -+ ], -+ "name": "burn", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "feeRecipient", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "gatewayFeeRecipient", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "communityFund", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "refund", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "tipTxFee", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "gatewayFee", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "baseTxFee", -+ "type": "uint256" -+ } -+ ], -+ "name": "creditGasFees", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 20, ++ "slot": "0", ++ "type": "bool" + }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ } -+ ], -+ "name": "debitGasFees", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ { ++ "bytes": "20", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "contract ICeloRegistry" + }, + { -+ "inputs": [], -+ "name": "decimals", -+ "outputs": [ -+ { -+ "internalType": "uint8", -+ "name": "", -+ "type": "uint8" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "32", ++ "label": "_status", ++ "offset": 0, ++ "slot": "2", ++ "type": "uint256" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "spender", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "subtractedValue", -+ "type": "uint256" -+ } -+ ], -+ "name": "decreaseAllowance", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "lastLimitDay", ++ "offset": 0, ++ "slot": "3", ++ "type": "uint256" + }, + { -+ "inputs": [], -+ "name": "exchange", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "32", ++ "label": "burnFraction", ++ "offset": 0, ++ "slot": "4", ++ "type": "struct FixidityLib.Fraction" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "spender", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "addedValue", -+ "type": "uint256" -+ } -+ ], -+ "name": "increaseAllowance", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "20", ++ "label": "feeBeneficiary", ++ "offset": 0, ++ "slot": "5", ++ "type": "address" + }, + { -+ "inputs": [ -+ { -+ "internalType": "string", -+ "name": "_name", -+ "type": "string" -+ }, -+ { -+ "internalType": "string", -+ "name": "_symbol", -+ "type": "string" -+ }, -+ { -+ "internalType": "address[]", -+ "name": "initialBalanceAddresses", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "uint256[]", -+ "name": "initialBalanceValues", -+ "type": "uint256[]" -+ } -+ ], -+ "name": "initialize", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "celoToBeBurned", ++ "offset": 0, ++ "slot": "6", ++ "type": "uint256" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "_broker", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "_validators", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "_exchange", -+ "type": "address" -+ } -+ ], -+ "name": "initializeV2", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "tokenStates", ++ "offset": 0, ++ "slot": "7", ++ "type": "mapping(address => struct FeeHandler.TokenState)" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "to", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ } -+ ], -+ "name": "mint", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "64", ++ "label": "activeTokens", ++ "offset": 0, ++ "slot": "8", ++ "type": "struct EnumerableSet.AddressSet" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+15
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/Freezable.json CELO/packages/contracts-bedrock/snapshots/storageLayout/Freezable.json +new file mode 100644 +index 0000000000000000000000000000000000000000..fb89bbc7e1ab3904137e39358de306a828c60dac +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/Freezable.json +@@ -0,0 +1,16 @@ ++[ ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "inputs": [], -+ "name": "name", -+ "outputs": [ -+ { -+ "internalType": "string", -+ "name": "", -+ "type": "string" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "20", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "contract ICeloRegistry" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+36
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/GoldToken.json CELO/packages/contracts-bedrock/snapshots/storageLayout/GoldToken.json +new file mode 100644 +index 0000000000000000000000000000000000000000..67b349856d86cdaab5dd67f9e9e413210d44ce63 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/GoldToken.json +@@ -0,0 +1,37 @@ ++[ ++ { ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 0, ++ "slot": "0", ++ "type": "bool" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "owner", -+ "type": "address" -+ } -+ ], -+ "name": "nonces", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 1, ++ "slot": "0", ++ "type": "address" + }, + { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "20", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "contract ICeloRegistry" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "owner", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "spender", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "deadline", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint8", -+ "name": "v", -+ "type": "uint8" -+ }, -+ { -+ "internalType": "bytes32", -+ "name": "r", -+ "type": "bytes32" -+ }, -+ { -+ "internalType": "bytes32", -+ "name": "s", -+ "type": "bytes32" -+ } -+ ], -+ "name": "permit", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "totalSupply_", ++ "offset": 0, ++ "slot": "2", ++ "type": "uint256" + }, + { -+ "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, ++ "bytes": "32", ++ "label": "allowed", ++ "offset": 0, ++ "slot": "3", ++ "type": "mapping(address => mapping(address => uint256))" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+8
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/Initializable.json CELO/packages/contracts-bedrock/snapshots/storageLayout/Initializable.json +new file mode 100644 +index 0000000000000000000000000000000000000000..b29972a4de8eb134c79b8e19e36619de89bfeb4b +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/Initializable.json +@@ -0,0 +1,9 @@ ++[ + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "_broker", -+ "type": "address" -+ } -+ ], -+ "name": "setBroker", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 0, ++ "slot": "0", ++ "type": "bool" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+29
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/MentoFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/storageLayout/MentoFeeHandlerSeller.json +new file mode 100644 +index 0000000000000000000000000000000000000000..a66c44056e6d0350f83d4ee520bafeda4d5c2a58 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/MentoFeeHandlerSeller.json +@@ -0,0 +1,30 @@ ++[ + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "_exchange", -+ "type": "address" -+ } -+ ], -+ "name": "setExchange", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "_validators", -+ "type": "address" -+ } -+ ], -+ "name": "setValidators", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 20, ++ "slot": "0", ++ "type": "bool" + }, + { -+ "inputs": [], -+ "name": "symbol", -+ "outputs": [ -+ { -+ "internalType": "string", -+ "name": "", -+ "type": "string" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "20", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "contract ICeloRegistry" + }, + { -+ "inputs": [], -+ "name": "totalSupply", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, ++ "bytes": "32", ++ "label": "minimumReports", ++ "offset": 0, ++ "slot": "2", ++ "type": "mapping(address => uint256)" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+29
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/MockSortedOracles.json CELO/packages/contracts-bedrock/snapshots/storageLayout/MockSortedOracles.json +new file mode 100644 +index 0000000000000000000000000000000000000000..c44ef116af9505417a194688daf746a4c58cdcff +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/MockSortedOracles.json +@@ -0,0 +1,30 @@ ++[ + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "to", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ } -+ ], -+ "name": "transfer", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "numerators", ++ "offset": 0, ++ "slot": "0", ++ "type": "mapping(address => uint256)" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "to", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ } -+ ], -+ "name": "transferFrom", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "medianTimestamp", ++ "offset": 0, ++ "slot": "1", ++ "type": "mapping(address => uint256)" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "numRates", ++ "offset": 0, ++ "slot": "2", ++ "type": "mapping(address => uint256)" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "to", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "string", -+ "name": "comment", -+ "type": "string" -+ } -+ ], -+ "name": "transferWithComment", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "expired", ++ "offset": 0, ++ "slot": "3", ++ "type": "mapping(address => bool)" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+71
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/SortedOracles.json CELO/packages/contracts-bedrock/snapshots/storageLayout/SortedOracles.json +new file mode 100644 +index 0000000000000000000000000000000000000000..e1e5e1736aff6530fc2e9dbeeecc4a4c9a316365 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/SortedOracles.json +@@ -0,0 +1,72 @@ ++[ ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "inputs": [], -+ "name": "validators", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 20, ++ "slot": "0", ++ "type": "bool" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "owner", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "spender", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ } -+ ], -+ "name": "Approval", -+ "type": "event" ++ "bytes": "32", ++ "label": "rates", ++ "offset": 0, ++ "slot": "1", ++ "type": "mapping(address => struct SortedLinkedListWithMedian.List)" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "broker", -+ "type": "address" -+ } -+ ], -+ "name": "BrokerUpdated", -+ "type": "event" ++ "bytes": "32", ++ "label": "timestamps", ++ "offset": 0, ++ "slot": "2", ++ "type": "mapping(address => struct SortedLinkedListWithMedian.List)" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "exchange", -+ "type": "address" -+ } -+ ], -+ "name": "ExchangeUpdated", -+ "type": "event" ++ "bytes": "32", ++ "label": "isOracle", ++ "offset": 0, ++ "slot": "3", ++ "type": "mapping(address => mapping(address => bool))" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "uint8", -+ "name": "version", -+ "type": "uint8" -+ } -+ ], -+ "name": "Initialized", -+ "type": "event" ++ "bytes": "32", ++ "label": "oracles", ++ "offset": 0, ++ "slot": "4", ++ "type": "mapping(address => address[])" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "previousOwner", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "OwnershipTransferred", -+ "type": "event" ++ "bytes": "32", ++ "label": "reportExpirySeconds", ++ "offset": 0, ++ "slot": "5", ++ "type": "uint256" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "from", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "to", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "value", -+ "type": "uint256" -+ } -+ ], -+ "name": "Transfer", -+ "type": "event" ++ "bytes": "32", ++ "label": "tokenReportExpirySeconds", ++ "offset": 0, ++ "slot": "6", ++ "type": "mapping(address => uint256)" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "string", -+ "name": "comment", -+ "type": "string" -+ } -+ ], -+ "name": "TransferComment", -+ "type": "event" ++ "bytes": "20", ++ "label": "breakerBox", ++ "offset": 0, ++ "slot": "7", ++ "type": "contract IBreakerBox" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "validators", -+ "type": "address" -+ } -+ ], -+ "name": "ValidatorsUpdated", -+ "type": "event" ++ "bytes": "32", ++ "label": "equivalentTokens", ++ "offset": 0, ++ "slot": "8", ++ "type": "mapping(address => struct SortedOracles.EquivalentToken)" + } +] \ No newline at end of file
@@ -6439,9 +9430,9 @@
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/UniswapFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/abi/UniswapFeeHandlerSeller.json +
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/StableTokenV2.json CELO/packages/contracts-bedrock/snapshots/storageLayout/StableTokenV2.json new file mode 100644 -index 0000000000000000000000000000000000000000..19c31c979af28ecd9ffcfcda9095f993e30b3ed1 +index 0000000000000000000000000000000000000000..eea3cafe6e9025cb532486b1e9ff84f4246310ec --- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/UniswapFeeHandlerSeller.json -@@ -0,0 +1,481 @@ ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/StableTokenV2.json +@@ -0,0 +1,142 @@ +[ + { -+ "inputs": [ -+ { -+ "internalType": "bool", -+ "name": "test", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "constructor" -+ }, -+ { -+ "stateMutability": "payable", -+ "type": "receive" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "uint256", -+ "name": "midPriceNumerator", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "midPriceDenominator", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "maxSlippage", -+ "type": "uint256" -+ } -+ ], -+ "name": "calculateMinAmount", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "pure", -+ "type": "function" ++ "bytes": "1", ++ "label": "_initialized", ++ "offset": 0, ++ "slot": "0", ++ "type": "uint8" + }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ } -+ ], -+ "name": "getRoutersForToken", -+ "outputs": [ -+ { -+ "internalType": "address[]", -+ "name": "", -+ "type": "address[]" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ { ++ "bytes": "1", ++ "label": "_initializing", ++ "offset": 1, ++ "slot": "0", ++ "type": "bool" + }, + { -+ "inputs": [], -+ "name": "getVersionNumber", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "pure", -+ "type": "function" ++ "bytes": "1600", ++ "label": "__gap", ++ "offset": 0, ++ "slot": "1", ++ "type": "uint256[50]" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "_registryAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "address[]", -+ "name": "tokenAddresses", -+ "type": "address[]" -+ }, -+ { -+ "internalType": "uint256[]", -+ "name": "newMininumReports", -+ "type": "uint256[]" -+ } -+ ], -+ "name": "initialize", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "_balances", ++ "offset": 0, ++ "slot": "51", ++ "type": "mapping(address => uint256)" + }, + { -+ "inputs": [], -+ "name": "initialized", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "32", ++ "label": "_allowances", ++ "offset": 0, ++ "slot": "52", ++ "type": "mapping(address => mapping(address => uint256))" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "name": "minimumReports", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "32", ++ "label": "_totalSupply", ++ "offset": 0, ++ "slot": "53", ++ "type": "uint256" + }, + { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "32", ++ "label": "_name", ++ "offset": 0, ++ "slot": "54", ++ "type": "string" + }, + { -+ "inputs": [], -+ "name": "registry", -+ "outputs": [ -+ { -+ "internalType": "contract ICeloRegistry", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" ++ "bytes": "32", ++ "label": "_symbol", ++ "offset": 0, ++ "slot": "55", ++ "type": "string" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "router", -+ "type": "address" -+ } -+ ], -+ "name": "removeRouter", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "1440", ++ "label": "__gap", ++ "offset": 0, ++ "slot": "56", ++ "type": "uint256[45]" + }, + { -+ "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "_HASHED_NAME", ++ "offset": 0, ++ "slot": "101", ++ "type": "bytes32" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "sellTokenAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "buyTokenAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "maxSlippage", -+ "type": "uint256" -+ } -+ ], -+ "name": "sell", -+ "outputs": [ -+ { -+ "internalType": "uint256", -+ "name": "", -+ "type": "uint256" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "_HASHED_VERSION", ++ "offset": 0, ++ "slot": "102", ++ "type": "bytes32" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "newMininumReports", -+ "type": "uint256" -+ } -+ ], -+ "name": "setMinimumReports", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "1600", ++ "label": "__gap", ++ "offset": 0, ++ "slot": "103", ++ "type": "uint256[50]" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "registryAddress", -+ "type": "address" -+ } -+ ], -+ "name": "setRegistry", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "_nonces", ++ "offset": 0, ++ "slot": "153", ++ "type": "mapping(address => struct CountersUpgradeable.Counter)" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "internalType": "address", -+ "name": "router", -+ "type": "address" -+ } -+ ], -+ "name": "setRouter", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "32", ++ "label": "_PERMIT_TYPEHASH_DEPRECATED_SLOT", ++ "offset": 0, ++ "slot": "154", ++ "type": "bytes32" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ }, -+ { -+ "internalType": "address", -+ "name": "to", -+ "type": "address" -+ } -+ ], -+ "name": "transfer", -+ "outputs": [ -+ { -+ "internalType": "bool", -+ "name": "", -+ "type": "bool" -+ } -+ ], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "1568", ++ "label": "__gap", ++ "offset": 0, ++ "slot": "155", ++ "type": "uint256[49]" + }, + { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "204", ++ "type": "address" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "tokenAddress", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "minimumReports", -+ "type": "uint256" -+ } -+ ], -+ "name": "MinimumReportsSet", -+ "type": "event" ++ "bytes": "1568", ++ "label": "__gap", ++ "offset": 0, ++ "slot": "205", ++ "type": "uint256[49]" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "previousOwner", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "OwnershipTransferred", -+ "type": "event" ++ "bytes": "20", ++ "label": "validators", ++ "offset": 0, ++ "slot": "254", ++ "type": "address" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "tokneAddress", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "router", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "quote", -+ "type": "uint256" -+ } -+ ], -+ "name": "ReceivedQuote", -+ "type": "event" ++ "bytes": "20", ++ "label": "broker", ++ "offset": 0, ++ "slot": "255", ++ "type": "address" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "registryAddress", -+ "type": "address" -+ } -+ ], -+ "name": "RegistrySet", -+ "type": "event" ++ "bytes": "20", ++ "label": "exchange", ++ "offset": 0, ++ "slot": "256", ++ "type": "address" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+36
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/UniswapFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/storageLayout/UniswapFeeHandlerSeller.json +new file mode 100644 +index 0000000000000000000000000000000000000000..3688a3204dec12dbace7b35435f8d85cb1c9acb3 +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/UniswapFeeHandlerSeller.json +@@ -0,0 +1,37 @@ ++[ ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "router", -+ "type": "address" -+ } -+ ], -+ "name": "RouterAddressRemoved", -+ "type": "event" ++ "bytes": "1", ++ "label": "initialized", ++ "offset": 20, ++ "slot": "0", ++ "type": "bool" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "token", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "router", -+ "type": "address" -+ } -+ ], -+ "name": "RouterAddressSet", -+ "type": "event" ++ "bytes": "20", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "contract ICeloRegistry" ++ }, ++ { ++ "bytes": "32", ++ "label": "minimumReports", ++ "offset": 0, ++ "slot": "2", ++ "type": "mapping(address => uint256)" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "router", -+ "type": "address" -+ } -+ ], -+ "name": "RouterUsed", -+ "type": "event" ++ "bytes": "32", ++ "label": "routerAddresses", ++ "offset": 0, ++ "slot": "3", ++ "type": "mapping(address => struct EnumerableSet.AddressSet)" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+15
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/UsingRegistry.json CELO/packages/contracts-bedrock/snapshots/storageLayout/UsingRegistry.json +new file mode 100644 +index 0000000000000000000000000000000000000000..fb89bbc7e1ab3904137e39358de306a828c60dac +--- /dev/null ++++ CELO/packages/contracts-bedrock/snapshots/storageLayout/UsingRegistry.json +@@ -0,0 +1,16 @@ ++[ ++ { ++ "bytes": "20", ++ "label": "_owner", ++ "offset": 0, ++ "slot": "0", ++ "type": "address" + }, + { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "soldTokenAddress", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "address", -+ "name": "boughtTokenAddress", -+ "type": "address" -+ }, -+ { -+ "indexed": false, -+ "internalType": "uint256", -+ "name": "amount", -+ "type": "uint256" -+ } -+ ], -+ "name": "TokenSold", -+ "type": "event" ++ "bytes": "20", ++ "label": "registry", ++ "offset": 0, ++ "slot": "1", ++ "type": "contract ICeloRegistry" ++ } ++] +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-22
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol CELO/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol +index 211fe76729c479b01477041d710ebbbc04e1c2d0..476c3e12e4b3bebc7d8da7ef888fbfc49b7b252c 100644 +--- OP/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol ++++ CELO/packages/contracts-bedrock/src/L2/CrossL2Inbox.sol +@@ -5,6 +5,7 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; + import { TransientContext, TransientReentrancyAware } from "src/libraries/TransientContext.sol"; + import { ISemver } from "src/universal/ISemver.sol"; + import { ICrossL2Inbox } from "src/L2/ICrossL2Inbox.sol"; ++import { SafeCall } from "src/libraries/SafeCall.sol"; +  + /// @title IDependencySet + /// @notice Interface for L1Block with only `isInDependencySet(uint256)` method. +@@ -55,8 +56,8 @@ /// Equal to bytes32(uint256(keccak256("crossl2inbox.identifier.chainid")) - 1) + bytes32 internal constant CHAINID_SLOT = 0x6e0446e8b5098b8c8193f964f1b567ec3a2bdaeba33d36acb85c1f1d3f92d313; +  + /// @notice Semantic version. +- /// @custom:semver 0.1.0 +- string public constant version = "0.1.0"; ++ /// @custom:semver 1.0.0-beta.1 ++ string public constant version = "1.0.0-beta.1"; +  + /// @notice Emitted when a cross chain message is being executed. + /// @param encodedId Encoded Identifier of the message. +@@ -122,7 +123,7 @@ // Store the Identifier in transient storage. + _storeIdentifier(_id); +  + // Call the target account with the message payload. +- bool success = _callWithAllGas(_target, _message); ++ bool success = SafeCall.call(_target, msg.value, _message); +  + // Revert if the target call failed. + if (!success) revert TargetCallFailed(); +@@ -138,24 +139,5 @@ TransientContext.set(BLOCK_NUMBER_SLOT, _id.blockNumber); + TransientContext.set(LOG_INDEX_SLOT, _id.logIndex); + TransientContext.set(TIMESTAMP_SLOT, _id.timestamp); + TransientContext.set(CHAINID_SLOT, _id.chainId); +- } +- +- /// @notice Calls the target address with the message payload and all available gas. +- /// @param _target Target address to call. +- /// @param _message Message payload to call target with. +- /// @return _success True if the call was successful, and false otherwise. +- function _callWithAllGas(address _target, bytes memory _message) internal returns (bool _success) { +- assembly { +- _success := +- call( +- gas(), // gas +- _target, // recipient +- callvalue(), // ether value +- add(_message, 32), // inloc +- mload(_message), // inlen +- 0, // outloc +- 0 // outlen +- ) +- } + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-22
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol CELO/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +index fe851c5523c857551288af95861a1c2de7693e1c..35ccf60ac6d37dbcd812141b7958fdaf148aee91 100644 +--- OP/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol ++++ CELO/packages/contracts-bedrock/src/L2/L2ToL2CrossDomainMessenger.sol +@@ -6,6 +6,7 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; + import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; + import { IL2ToL2CrossDomainMessenger } from "src/L2/IL2ToL2CrossDomainMessenger.sol"; + import { ISemver } from "src/universal/ISemver.sol"; ++import { SafeCall } from "src/libraries/SafeCall.sol"; +  + /// @notice Thrown when a non-written slot in transient storage is attempted to be read from. + error NotEntered(); +@@ -59,8 +60,8 @@ /// @notice Current message version identifier. + uint16 public constant messageVersion = uint16(0); +  + /// @notice Semantic version. +- /// @custom:semver 0.1.0 +- string public constant version = "0.1.0"; ++ /// @custom:semver 1.0.0-beta.1 ++ string public constant version = "1.0.0-beta.1"; +  + /// @notice Mapping of message hashes to boolean receipt values. Note that a message will only be present in this + /// mapping if it has successfully been relayed on this chain, and can therefore not be relayed again. +@@ -175,7 +176,7 @@ } +  + _storeMessageMetadata(_source, _sender); +  +- bool success = _callWithAllGas(_target, _message); ++ bool success = SafeCall.call(_target, msg.value, _message); +  + if (success) { + successfulMessages[messageHash] = true; +@@ -211,25 +212,6 @@ function _storeMessageMetadata(uint256 _source, address _sender) internal { + assembly { + tstore(CROSS_DOMAIN_MESSAGE_SENDER_SLOT, _sender) + tstore(CROSS_DOMAIN_MESSAGE_SOURCE_SLOT, _source) +- } +- } +- +- /// @notice Calls the target address with the message payload and all available gas. +- /// @param _target Target address to call. +- /// @param _message Message payload to call target with. +- /// @return _success True if the call was successful, and false otherwise. +- function _callWithAllGas(address _target, bytes memory _message) internal returns (bool _success) { +- assembly { +- _success := +- call( +- gas(), // gas +- _target, // recipient +- callvalue(), // ether value +- add(_message, 32), // inloc +- mload(_message), // inlen +- 0, // outloc +- 0 // outlen +- ) + } + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+134
+
-484
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/cannon/MIPS.sol CELO/packages/contracts-bedrock/src/cannon/MIPS.sol +index 78064149bfaf706bab7a3eca7900093e290af2c5..f53dede0286f3edb8abd41b81d0a7a660962e492 100644 +--- OP/packages/contracts-bedrock/src/cannon/MIPS.sol ++++ CELO/packages/contracts-bedrock/src/cannon/MIPS.sol +@@ -4,7 +4,10 @@ + import { ISemver } from "src/universal/ISemver.sol"; + import { IPreimageOracle } from "./interfaces/IPreimageOracle.sol"; + import { PreimageKeyLib } from "./PreimageKeyLib.sol"; +-import "src/cannon/libraries/MIPSInstructions.sol" as ins; ++import { MIPSInstructions as ins } from "src/cannon/libraries/MIPSInstructions.sol"; ++import { MIPSSyscalls as sys } from "src/cannon/libraries/MIPSSyscalls.sol"; ++import { MIPSState as st } from "src/cannon/libraries/MIPSState.sol"; ++import { MIPSMemory } from "src/cannon/libraries/MIPSMemory.sol"; +  + /// @title MIPS + /// @notice The MIPS contract emulates a single MIPS instruction. +@@ -45,21 +48,13 @@ uint32 public constant BRK_START = 0x40000000; +  + /// @notice The semantic version of the MIPS contract. + /// @custom:semver 1.0.1 +- string public constant version = "1.1.0-beta.1"; +- +- uint32 internal constant FD_STDIN = 0; +- uint32 internal constant FD_STDOUT = 1; +- uint32 internal constant FD_STDERR = 2; +- uint32 internal constant FD_HINT_READ = 3; +- uint32 internal constant FD_HINT_WRITE = 4; +- uint32 internal constant FD_PREIMAGE_READ = 5; +- uint32 internal constant FD_PREIMAGE_WRITE = 6; +- +- uint32 internal constant EBADF = 0x9; +- uint32 internal constant EINVAL = 0x16; ++ string public constant version = "1.1.0-beta.4"; +  + /// @notice The preimage oracle contract. + IPreimageOracle internal immutable ORACLE; ++ ++ // The offset of the start of proof calldata (_proof.offset) in the step() function ++ uint256 internal constant STEP_PROOF_OFFSET = 420; +  + /// @param _oracle The address of the preimage oracle contract. + constructor(IPreimageOracle _oracle) { +@@ -147,481 +142,59 @@ assembly { + state := 0x80 + } +  +- // Load the syscall number from the registers +- uint32 syscall_no = state.registers[2]; ++ // Load the syscall numbers and args from the registers ++ (uint32 syscall_no, uint32 a0, uint32 a1, uint32 a2) = sys.getSyscallArgs(state.registers); ++ + uint32 v0 = 0; + uint32 v1 = 0; +  +- // Load the syscall arguments from the registers +- uint32 a0 = state.registers[4]; +- uint32 a1 = state.registers[5]; +- uint32 a2 = state.registers[6]; +- +- // mmap: Allocates a page from the heap. +- if (syscall_no == 4090) { +- uint32 sz = a1; +- if (sz & 4095 != 0) { +- // adjust size to align with page size +- sz += 4096 - (sz & 4095); +- } +- if (a0 == 0) { +- v0 = state.heap; +- state.heap += sz; +- } else { +- v0 = a0; +- } +- } +- // brk: Returns a fixed address for the program break at 0x40000000 +- else if (syscall_no == 4045) { ++ if (syscall_no == sys.SYS_MMAP) { ++ (v0, v1, state.heap) = sys.handleSysMmap(a0, a1, state.heap); ++ } else if (syscall_no == sys.SYS_BRK) { ++ // brk: Returns a fixed address for the program break at 0x40000000 + v0 = BRK_START; +- } +- // clone (not supported) returns 1 +- else if (syscall_no == 4120) { ++ } else if (syscall_no == sys.SYS_CLONE) { ++ // clone (not supported) returns 1 + v0 = 1; +- } +- // exit group: Sets the Exited and ExitCode states to true and argument 0. +- else if (syscall_no == 4246) { ++ } else if (syscall_no == sys.SYS_EXIT_GROUP) { ++ // exit group: Sets the Exited and ExitCode states to true and argument 0. + state.exited = true; + state.exitCode = uint8(a0); + return outputState(); +- } +- // read: Like Linux read syscall. Splits unaligned reads into aligned reads. +- else if (syscall_no == 4003) { +- // args: a0 = fd, a1 = addr, a2 = count +- // returns: v0 = read, v1 = err code +- if (a0 == FD_STDIN) { +- // Leave v0 and v1 zero: read nothing, no error +- } +- // pre-image oracle read +- else if (a0 == FD_PREIMAGE_READ) { +- // verify proof 1 is correct, and get the existing memory. +- uint32 mem = readMem(a1 & 0xFFffFFfc, 1); // mask the addr to align it to 4 bytes +- bytes32 preimageKey = state.preimageKey; +- // If the preimage key is a local key, localize it in the context of the caller. +- if (uint8(preimageKey[0]) == 1) { +- preimageKey = PreimageKeyLib.localize(preimageKey, _localContext); +- } +- (bytes32 dat, uint256 datLen) = ORACLE.readPreimage(preimageKey, state.preimageOffset); +- +- // Transform data for writing to memory +- // We use assembly for more precise ops, and no var count limit +- assembly { +- let alignment := and(a1, 3) // the read might not start at an aligned address +- let space := sub(4, alignment) // remaining space in memory word +- if lt(space, datLen) { datLen := space } // if less space than data, shorten data +- if lt(a2, datLen) { datLen := a2 } // if requested to read less, read less +- dat := shr(sub(256, mul(datLen, 8)), dat) // right-align data +- dat := shl(mul(sub(sub(4, datLen), alignment), 8), dat) // position data to insert into memory +- // word +- let mask := sub(shl(mul(sub(4, alignment), 8), 1), 1) // mask all bytes after start +- let suffixMask := sub(shl(mul(sub(sub(4, alignment), datLen), 8), 1), 1) // mask of all bytes +- // starting from end, maybe none +- mask := and(mask, not(suffixMask)) // reduce mask to just cover the data we insert +- mem := or(and(mem, not(mask)), dat) // clear masked part of original memory, and insert data +- } +- +- // Write memory back +- writeMem(a1 & 0xFFffFFfc, 1, mem); +- state.preimageOffset += uint32(datLen); +- v0 = uint32(datLen); +- } +- // hint response +- else if (a0 == FD_HINT_READ) { +- // Don't read into memory, just say we read it all +- // The result is ignored anyway +- v0 = a2; +- } else { +- v0 = 0xFFffFFff; +- v1 = EBADF; +- } +- } +- // write: like Linux write syscall. Splits unaligned writes into aligned writes. +- else if (syscall_no == 4004) { +- // args: a0 = fd, a1 = addr, a2 = count +- // returns: v0 = written, v1 = err code +- if (a0 == FD_STDOUT || a0 == FD_STDERR || a0 == FD_HINT_WRITE) { +- v0 = a2; // tell program we have written everything +- } +- // pre-image oracle +- else if (a0 == FD_PREIMAGE_WRITE) { +- uint32 mem = readMem(a1 & 0xFFffFFfc, 1); // mask the addr to align it to 4 bytes +- bytes32 key = state.preimageKey; +- +- // Construct pre-image key from memory +- // We use assembly for more precise ops, and no var count limit +- assembly { +- let alignment := and(a1, 3) // the read might not start at an aligned address +- let space := sub(4, alignment) // remaining space in memory word +- if lt(space, a2) { a2 := space } // if less space than data, shorten data +- key := shl(mul(a2, 8), key) // shift key, make space for new info +- let mask := sub(shl(mul(a2, 8), 1), 1) // mask for extracting value from memory +- mem := and(shr(mul(sub(space, a2), 8), mem), mask) // align value to right, mask it +- key := or(key, mem) // insert into key +- } +- +- // Write pre-image key to oracle +- state.preimageKey = key; +- state.preimageOffset = 0; // reset offset, to read new pre-image data from the start +- v0 = a2; +- } else { +- v0 = 0xFFffFFff; +- v1 = EBADF; +- } +- } +- // fcntl: Like linux fcntl syscall, but only supports minimal file-descriptor control commands, +- // to retrieve the file-descriptor R/W flags. +- else if (syscall_no == 4055) { +- // fcntl +- // args: a0 = fd, a1 = cmd +- if (a1 == 3) { +- // F_GETFL: get file descriptor flags +- if (a0 == FD_STDIN || a0 == FD_PREIMAGE_READ || a0 == FD_HINT_READ) { +- v0 = 0; // O_RDONLY +- } else if (a0 == FD_STDOUT || a0 == FD_STDERR || a0 == FD_PREIMAGE_WRITE || a0 == FD_HINT_WRITE) { +- v0 = 1; // O_WRONLY +- } else { +- v0 = 0xFFffFFff; +- v1 = EBADF; +- } +- } else { +- v0 = 0xFFffFFff; +- v1 = EINVAL; // cmd not recognized by this kernel +- } +- } +- +- // Write the results back to the state registers +- state.registers[2] = v0; +- state.registers[7] = v1; +- +- // Update the PC and nextPC +- state.pc = state.nextPC; +- state.nextPC = state.nextPC + 4; +- +- out_ = outputState(); +- } +- } +- +- /// @notice Handles a branch instruction, updating the MIPS state PC where needed. +- /// @param _opcode The opcode of the branch instruction. +- /// @param _insn The instruction to be executed. +- /// @param _rtReg The register to be used for the branch. +- /// @param _rs The register to be compared with the branch register. +- /// @return out_ The hashed MIPS state. +- function handleBranch(uint32 _opcode, uint32 _insn, uint32 _rtReg, uint32 _rs) internal returns (bytes32 out_) { +- unchecked { +- // Load state from memory +- State memory state; +- assembly { +- state := 0x80 +- } +- +- bool shouldBranch = false; +- +- if (state.nextPC != state.pc + 4) { +- revert("branch in delay slot"); +- } +- +- // beq/bne: Branch on equal / not equal +- if (_opcode == 4 || _opcode == 5) { +- uint32 rt = state.registers[_rtReg]; +- shouldBranch = (_rs == rt && _opcode == 4) || (_rs != rt && _opcode == 5); +- } +- // blez: Branches if instruction is less than or equal to zero +- else if (_opcode == 6) { +- shouldBranch = int32(_rs) <= 0; +- } +- // bgtz: Branches if instruction is greater than zero +- else if (_opcode == 7) { +- shouldBranch = int32(_rs) > 0; +- } +- // bltz/bgez: Branch on less than zero / greater than or equal to zero +- else if (_opcode == 1) { +- // regimm +- uint32 rtv = ((_insn >> 16) & 0x1F); +- if (rtv == 0) { +- shouldBranch = int32(_rs) < 0; +- } +- if (rtv == 1) { +- shouldBranch = int32(_rs) >= 0; +- } +- } +- +- // Update the state's previous PC +- uint32 prevPC = state.pc; +- +- // Execute the delay slot first +- state.pc = state.nextPC; +- +- // If we should branch, update the PC to the branch target +- // Otherwise, proceed to the next instruction +- if (shouldBranch) { +- state.nextPC = prevPC + 4 + (ins.signExtend(_insn & 0xFFFF, 16) << 2); +- } else { +- state.nextPC = state.nextPC + 4; +- } +- +- // Return the hash of the resulting state +- out_ = outputState(); +- } +- } +- +- /// @notice Handles HI and LO register instructions. +- /// @param _func The function code of the instruction. +- /// @param _rs The value of the RS register. +- /// @param _rt The value of the RT register. +- /// @param _storeReg The register to store the result in. +- /// @return out_ The hashed MIPS state. +- function handleHiLo(uint32 _func, uint32 _rs, uint32 _rt, uint32 _storeReg) internal returns (bytes32 out_) { +- unchecked { +- // Load state from memory +- State memory state; +- assembly { +- state := 0x80 +- } +- +- uint32 val; +- +- // mfhi: Move the contents of the HI register into the destination +- if (_func == 0x10) { +- val = state.hi; +- } +- // mthi: Move the contents of the source into the HI register +- else if (_func == 0x11) { +- state.hi = _rs; +- } +- // mflo: Move the contents of the LO register into the destination +- else if (_func == 0x12) { +- val = state.lo; +- } +- // mtlo: Move the contents of the source into the LO register +- else if (_func == 0x13) { +- state.lo = _rs; +- } +- // mult: Multiplies `rs` by `rt` and stores the result in HI and LO registers +- else if (_func == 0x18) { +- uint64 acc = uint64(int64(int32(_rs)) * int64(int32(_rt))); +- state.hi = uint32(acc >> 32); +- state.lo = uint32(acc); +- } +- // multu: Unsigned multiplies `rs` by `rt` and stores the result in HI and LO registers +- else if (_func == 0x19) { +- uint64 acc = uint64(uint64(_rs) * uint64(_rt)); +- state.hi = uint32(acc >> 32); +- state.lo = uint32(acc); +- } +- // div: Divides `rs` by `rt`. +- // Stores the quotient in LO +- // And the remainder in HI +- else if (_func == 0x1a) { +- if (int32(_rt) == 0) { +- revert("MIPS: division by zero"); +- } +- state.hi = uint32(int32(_rs) % int32(_rt)); +- state.lo = uint32(int32(_rs) / int32(_rt)); +- } +- // divu: Unsigned divides `rs` by `rt`. +- // Stores the quotient in LO +- // And the remainder in HI +- else if (_func == 0x1b) { +- if (_rt == 0) { +- revert("MIPS: division by zero"); +- } +- state.hi = _rs % _rt; +- state.lo = _rs / _rt; +- } +- +- // Store the result in the destination register, if applicable +- if (_storeReg != 0) { +- state.registers[_storeReg] = val; +- } +- +- // Update the PC +- state.pc = state.nextPC; +- state.nextPC = state.nextPC + 4; +- +- // Return the hash of the resulting state +- out_ = outputState(); +- } +- } +- +- /// @notice Handles a jump instruction, updating the MIPS state PC where needed. +- /// @param _linkReg The register to store the link to the instruction after the delay slot instruction. +- /// @param _dest The destination to jump to. +- /// @return out_ The hashed MIPS state. +- function handleJump(uint32 _linkReg, uint32 _dest) internal returns (bytes32 out_) { +- unchecked { +- // Load state from memory. +- State memory state; +- assembly { +- state := 0x80 +- } +- +- if (state.nextPC != state.pc + 4) { +- revert("jump in delay slot"); ++ } else if (syscall_no == sys.SYS_READ) { ++ (v0, v1, state.preimageOffset, state.memRoot) = sys.handleSysRead({ ++ _a0: a0, ++ _a1: a1, ++ _a2: a2, ++ _preimageKey: state.preimageKey, ++ _preimageOffset: state.preimageOffset, ++ _localContext: _localContext, ++ _oracle: ORACLE, ++ _proofOffset: MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 1), ++ _memRoot: state.memRoot ++ }); ++ } else if (syscall_no == sys.SYS_WRITE) { ++ (v0, v1, state.preimageKey, state.preimageOffset) = sys.handleSysWrite({ ++ _a0: a0, ++ _a1: a1, ++ _a2: a2, ++ _preimageKey: state.preimageKey, ++ _preimageOffset: state.preimageOffset, ++ _proofOffset: MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 1), ++ _memRoot: state.memRoot ++ }); ++ } else if (syscall_no == sys.SYS_FCNTL) { ++ (v0, v1) = sys.handleSysFcntl(a0, a1); + } +  +- // Update the next PC to the jump destination. +- uint32 prevPC = state.pc; +- state.pc = state.nextPC; +- state.nextPC = _dest; ++ st.CpuScalars memory cpu = getCpuScalars(state); ++ sys.handleSyscallUpdates(cpu, state.registers, v0, v1); ++ setStateCpuScalars(state, cpu); +  +- // Update the link-register to the instruction after the delay slot instruction. +- if (_linkReg != 0) { +- state.registers[_linkReg] = prevPC + 8; +- } +- +- // Return the hash of the resulting state. + out_ = outputState(); + } + } +  +- /// @notice Handles a storing a value into a register. +- /// @param _storeReg The register to store the value into. +- /// @param _val The value to store. +- /// @param _conditional Whether or not the store is conditional. +- /// @return out_ The hashed MIPS state. +- function handleRd(uint32 _storeReg, uint32 _val, bool _conditional) internal returns (bytes32 out_) { +- unchecked { +- // Load state from memory. +- State memory state; +- assembly { +- state := 0x80 +- } +- +- // The destination register must be valid. +- require(_storeReg < 32, "valid register"); +- +- // Never write to reg 0, and it can be conditional (movz, movn). +- if (_storeReg != 0 && _conditional) { +- state.registers[_storeReg] = _val; +- } +- +- // Update the PC. +- state.pc = state.nextPC; +- state.nextPC = state.nextPC + 4; +- +- // Return the hash of the resulting state. +- out_ = outputState(); +- } +- } +- +- /// @notice Computes the offset of the proof in the calldata. +- /// @param _proofIndex The index of the proof in the calldata. +- /// @return offset_ The offset of the proof in the calldata. +- function proofOffset(uint8 _proofIndex) internal pure returns (uint256 offset_) { +- unchecked { +- // A proof of 32 bit memory, with 32-byte leaf values, is (32-5)=27 bytes32 entries. +- // And the leaf value itself needs to be encoded as well. And proof.offset == 420 +- offset_ = 420 + (uint256(_proofIndex) * (28 * 32)); +- uint256 s = 0; +- assembly { +- s := calldatasize() +- } +- require(s >= (offset_ + 28 * 32), "check that there is enough calldata"); +- return offset_; +- } +- } +- +- /// @notice Reads a 32-bit value from memory. +- /// @param _addr The address to read from. +- /// @param _proofIndex The index of the proof in the calldata. +- /// @return out_ The hashed MIPS state. +- function readMem(uint32 _addr, uint8 _proofIndex) internal pure returns (uint32 out_) { +- unchecked { +- // Compute the offset of the proof in the calldata. +- uint256 offset = proofOffset(_proofIndex); +- +- assembly { +- // Validate the address alignement. +- if and(_addr, 3) { revert(0, 0) } +- +- // Load the leaf value. +- let leaf := calldataload(offset) +- offset := add(offset, 32) +- +- // Convenience function to hash two nodes together in scratch space. +- function hashPair(a, b) -> h { +- mstore(0, a) +- mstore(32, b) +- h := keccak256(0, 64) +- } +- +- // Start with the leaf node. +- // Work back up by combining with siblings, to reconstruct the root. +- let path := shr(5, _addr) +- let node := leaf +- for { let i := 0 } lt(i, 27) { i := add(i, 1) } { +- let sibling := calldataload(offset) +- offset := add(offset, 32) +- switch and(shr(i, path), 1) +- case 0 { node := hashPair(node, sibling) } +- case 1 { node := hashPair(sibling, node) } +- } +- +- // Load the memory root from the first field of state. +- let memRoot := mload(0x80) +- +- // Verify the root matches. +- if iszero(eq(node, memRoot)) { +- mstore(0, 0x0badf00d) +- revert(0, 32) +- } +- +- // Bits to shift = (32 - 4 - (addr % 32)) * 8 +- let shamt := shl(3, sub(sub(32, 4), and(_addr, 31))) +- out_ := and(shr(shamt, leaf), 0xFFffFFff) +- } +- } +- } +- +- /// @notice Writes a 32-bit value to memory. +- /// This function first overwrites the part of the leaf. +- /// Then it recomputes the memory merkle root. +- /// @param _addr The address to write to. +- /// @param _proofIndex The index of the proof in the calldata. +- /// @param _val The value to write. +- function writeMem(uint32 _addr, uint8 _proofIndex, uint32 _val) internal pure { +- unchecked { +- // Compute the offset of the proof in the calldata. +- uint256 offset = proofOffset(_proofIndex); +- +- assembly { +- // Validate the address alignement. +- if and(_addr, 3) { revert(0, 0) } +- +- // Load the leaf value. +- let leaf := calldataload(offset) +- let shamt := shl(3, sub(sub(32, 4), and(_addr, 31))) +- +- // Mask out 4 bytes, and OR in the value +- leaf := or(and(leaf, not(shl(shamt, 0xFFffFFff))), shl(shamt, _val)) +- offset := add(offset, 32) +- +- // Convenience function to hash two nodes together in scratch space. +- function hashPair(a, b) -> h { +- mstore(0, a) +- mstore(32, b) +- h := keccak256(0, 64) +- } +- +- // Start with the leaf node. +- // Work back up by combining with siblings, to reconstruct the root. +- let path := shr(5, _addr) +- let node := leaf +- for { let i := 0 } lt(i, 27) { i := add(i, 1) } { +- let sibling := calldataload(offset) +- offset := add(offset, 32) +- switch and(shr(i, path), 1) +- case 0 { node := hashPair(node, sibling) } +- case 1 { node := hashPair(sibling, node) } +- } +- +- // Store the new memory root in the first field of state. +- mstore(0x80, node) +- } +- } +- } +- + /// @notice Executes a single step of the vm. + /// Will revert if any required input state is missing. + /// @param _stateData The encoded state witness data. +@@ -646,7 +219,7 @@ if iszero(eq(_stateData.offset, 132)) { + // 32*4+4=132 expected state data offset + revert(0, 0) + } +- if iszero(eq(_proof.offset, 420)) { ++ if iszero(eq(_proof.offset, STEP_PROOF_OFFSET)) { + // 132+32+256=420 expected proof offset + revert(0, 0) + } +@@ -688,14 +261,15 @@ + state.step += 1; +  + // instruction fetch +- uint32 insn = readMem(state.pc, 0); ++ uint256 insnProofOffset = MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 0); ++ uint32 insn = MIPSMemory.readMem(state.memRoot, state.pc, insnProofOffset); + uint32 opcode = insn >> 26; // 6-bits +  + // j-type j/jal + if (opcode == 2 || opcode == 3) { + // Take top 4 bits of the next PC (its 256 MB region), and concatenate with the 26-bit offset + uint32 target = (state.nextPC & 0xF0000000) | (insn & 0x03FFFFFF) << 2; +- return handleJump(opcode == 2 ? 0 : 31, target); ++ return handleJumpAndReturnOutput(state, opcode == 2 ? 0 : 31, target); + } +  + // register fetch +@@ -730,7 +304,19 @@ rdReg = rtReg; + } +  + if ((opcode >= 4 && opcode < 8) || opcode == 1) { +- return handleBranch(opcode, insn, rtReg, rs); ++ st.CpuScalars memory cpu = getCpuScalars(state); ++ ++ ins.handleBranch({ ++ _cpu: cpu, ++ _registers: state.registers, ++ _opcode: opcode, ++ _insn: insn, ++ _rtReg: rtReg, ++ _rs: rs ++ }); ++ setStateCpuScalars(state, cpu); ++ ++ return outputState(); + } +  + uint32 storeAddr = 0xFF_FF_FF_FF; +@@ -741,7 +327,8 @@ if (opcode >= 0x20) { + // M[R[rs]+SignExtImm] + rs += ins.signExtend(insn & 0xFFFF, 16); + uint32 addr = rs & 0xFFFFFFFC; +- mem = readMem(addr, 1); ++ uint256 memProofOffset = MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 1); ++ mem = MIPSMemory.readMem(state.memRoot, addr, memProofOffset); + if (opcode >= 0x28 && opcode != 0x30) { + // store + storeAddr = addr; +@@ -758,16 +345,16 @@ uint32 func = insn & 0x3f; // 6-bits + if (opcode == 0 && func >= 8 && func < 0x1c) { + if (func == 8 || func == 9) { + // jr/jalr +- return handleJump(func == 8 ? 0 : rdReg, rs); ++ return handleJumpAndReturnOutput(state, func == 8 ? 0 : rdReg, rs); + } +  + if (func == 0xa) { + // movz +- return handleRd(rdReg, rs, rt == 0); ++ return handleRdAndReturnOutput(state, rdReg, rs, rt == 0); + } + if (func == 0xb) { + // movn +- return handleRd(rdReg, rs, rt != 0); ++ return handleRdAndReturnOutput(state, rdReg, rs, rt != 0); + } +  + // syscall (can read and write) +@@ -778,7 +365,19 @@ + // lo and hi registers + // can write back + if (func >= 0x10 && func < 0x1c) { +- return handleHiLo(func, rs, rt, rdReg); ++ st.CpuScalars memory cpu = getCpuScalars(state); ++ ++ ins.handleHiLo({ ++ _cpu: cpu, ++ _registers: state.registers, ++ _func: func, ++ _rs: rs, ++ _rt: rt, ++ _storeReg: rdReg ++ }); ++ ++ setStateCpuScalars(state, cpu); ++ return outputState(); + } + } +  +@@ -789,11 +388,62 @@ } +  + // write memory + if (storeAddr != 0xFF_FF_FF_FF) { +- writeMem(storeAddr, 1, val); ++ uint256 memProofOffset = MIPSMemory.memoryProofOffset(STEP_PROOF_OFFSET, 1); ++ state.memRoot = MIPSMemory.writeMem(storeAddr, memProofOffset, val); + } +  + // write back the value to destination register +- return handleRd(rdReg, val, true); ++ return handleRdAndReturnOutput(state, rdReg, val, true); + } ++ } ++ ++ function handleJumpAndReturnOutput( ++ State memory _state, ++ uint32 _linkReg, ++ uint32 _dest ++ ) ++ internal ++ returns (bytes32 out_) ++ { ++ st.CpuScalars memory cpu = getCpuScalars(_state); ++ ++ ins.handleJump({ _cpu: cpu, _registers: _state.registers, _linkReg: _linkReg, _dest: _dest }); ++ ++ setStateCpuScalars(_state, cpu); ++ return outputState(); ++ } ++ ++ function handleRdAndReturnOutput( ++ State memory _state, ++ uint32 _storeReg, ++ uint32 _val, ++ bool _conditional ++ ) ++ internal ++ returns (bytes32 out_) ++ { ++ st.CpuScalars memory cpu = getCpuScalars(_state); ++ ++ ins.handleRd({ ++ _cpu: cpu, ++ _registers: _state.registers, ++ _storeReg: _storeReg, ++ _val: _val, ++ _conditional: _conditional ++ }); ++ ++ setStateCpuScalars(_state, cpu); ++ return outputState(); ++ } ++ ++ function getCpuScalars(State memory _state) internal pure returns (st.CpuScalars memory) { ++ return st.CpuScalars({ pc: _state.pc, nextPC: _state.nextPC, lo: _state.lo, hi: _state.hi }); ++ } ++ ++ function setStateCpuScalars(State memory _state, st.CpuScalars memory _cpu) internal pure { ++ _state.pc = _cpu.pc; ++ _state.nextPC = _cpu.nextPC; ++ _state.lo = _cpu.lo; ++ _state.hi = _cpu.hi; + } + }
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+9
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/CalledByVm.sol CELO/packages/contracts-bedrock/src/celo/CalledByVm.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..c3f6efe12072ef8c87e213f9c29b0789c26cff0f +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/CalledByVm.sol +@@ -0,0 +1,9 @@ ++// SPDX-License-Identifier: LGPL-3.0-only ++pragma solidity ^0.8.15; ++ ++contract CalledByVm { ++ modifier onlyVm() { ++ require(msg.sender == address(0), "Only VM can call"); ++ _; ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+41
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/CeloPredeploys.sol CELO/packages/contracts-bedrock/src/celo/CeloPredeploys.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..fc36601693c6d535c429e8fceef1a82349dc8eb4 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/CeloPredeploys.sol +@@ -0,0 +1,41 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.0; ++ ++import { console2 as console } from "forge-std/console2.sol"; ++ ++/// @title CeloPredeploys ++/// @notice Contains constant addresses for protocol contracts that are pre-deployed to the L2 system. ++library CeloPredeploys { ++ address internal constant CELO_REGISTRY = 0x000000000000000000000000000000000000ce10; ++ address internal constant GOLD_TOKEN = 0x471EcE3750Da237f93B8E339c536989b8978a438; ++ address internal constant FEE_HANDLER = 0xcD437749E43A154C07F3553504c68fBfD56B8778; ++ address internal constant FEE_CURRENCY_WHITELIST = 0xBB024E9cdCB2f9E34d893630D19611B8A5381b3c; ++ address internal constant MENTO_FEE_HANDLER_SELLER = 0x4eFa274B7e33476C961065000D58ee09F7921A74; ++ address internal constant UNISWAP_FEE_HANDLER_SELLER = 0xD3aeE28548Dbb65DF03981f0dC0713BfCBd10a97; ++ address internal constant SORTED_ORACLES = 0xefB84935239dAcdecF7c5bA76d8dE40b077B7b33; ++ address internal constant ADDRESS_SORTED_LINKED_LIST_WITH_MEDIAN = 0xED477A99035d0c1e11369F1D7A4e587893cc002B; ++ address internal constant FEE_CURRENCY = 0x4200000000000000000000000000000000001022; ++ address internal constant BRIDGED_ETH = 0x4200000000000000000000000000000000001023; ++ address internal constant FEE_CURRENCY_DIRECTORY = 0x71FFbD48E34bdD5a87c3c683E866dc63b8B2a685; ++ address internal constant cUSD = 0x765DE816845861e75A25fCA122bb6898B8B1282a; ++ ++ /// @notice Returns the name of the predeploy at the given address. ++ function getName(address _addr) internal pure returns (string memory out_) { ++ // require(isPredeployNamespace(_addr), "Predeploys: address must be a predeploy"); ++ ++ if (_addr == CELO_REGISTRY) return "CeloRegistry"; ++ if (_addr == GOLD_TOKEN) return "GoldToken"; ++ if (_addr == FEE_HANDLER) return "FeeHandler"; ++ if (_addr == FEE_CURRENCY_WHITELIST) return "FeeCurrencyWhitelist"; ++ if (_addr == MENTO_FEE_HANDLER_SELLER) return "MentoFeeHandlerSeller"; ++ if (_addr == UNISWAP_FEE_HANDLER_SELLER) return "UniswapFeeHandlerSeller"; ++ if (_addr == SORTED_ORACLES) return "SortedOracles"; ++ if (_addr == ADDRESS_SORTED_LINKED_LIST_WITH_MEDIAN) return "AddressSortedLinkedListWithMedian"; ++ if (_addr == FEE_CURRENCY) return "FeeCurrency"; ++ if (_addr == BRIDGED_ETH) return "BridgedEth"; ++ if (_addr == FEE_CURRENCY_DIRECTORY) return "FeeCurrencyDirectory"; ++ if (_addr == cUSD) return "cUSD"; ++ ++ revert("Predeploys: unnamed predeploy"); ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+95
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/CeloRegistry.sol CELO/packages/contracts-bedrock/src/celo/CeloRegistry.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..7da4cfb35ddfef5c49183c7c3523f658e071aa33 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/CeloRegistry.sol +@@ -0,0 +1,95 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++ ++import "./interfaces/ICeloRegistry.sol"; ++import "./Initializable.sol"; ++ ++/** ++ * @title Routes identifiers to addresses. ++ */ ++contract CeloRegistry is ICeloRegistry, Ownable, Initializable { ++ mapping(bytes32 => address) public registry; ++ ++ event RegistryUpdated(string identifier, bytes32 indexed identifierHash, address indexed addr); ++ ++ /** ++ * @notice Sets initialized == true on implementation contracts ++ * @param test Set to true to skip implementation initialization ++ */ ++ constructor(bool test) Initializable(test) { } ++ ++ /** ++ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. ++ */ ++ function initialize() external initializer { ++ _transferOwnership(msg.sender); ++ } ++ ++ /** ++ * @notice Associates the given address with the given identifier. ++ * @param identifier Identifier of contract whose address we want to set. ++ * @param addr Address of contract. ++ */ ++ function setAddressFor(string calldata identifier, address addr) external onlyOwner { ++ bytes32 identifierHash = keccak256(abi.encodePacked(identifier)); ++ registry[identifierHash] = addr; ++ emit RegistryUpdated(identifier, identifierHash, addr); ++ } ++ ++ /** ++ * @notice Gets address associated with the given identifierHash. ++ * @param identifierHash Identifier hash of contract whose address we want to look up. ++ * @dev Throws if address not set. ++ */ ++ function getAddressForOrDie(bytes32 identifierHash) external view returns (address) { ++ require(registry[identifierHash] != address(0), "identifier has no registry entry"); ++ return registry[identifierHash]; ++ } ++ ++ /** ++ * @notice Gets address associated with the given identifierHash. ++ * @param identifierHash Identifier hash of contract whose address we want to look up. ++ */ ++ function getAddressFor(bytes32 identifierHash) external view returns (address) { ++ return registry[identifierHash]; ++ } ++ ++ /** ++ * @notice Gets address associated with the given identifier. ++ * @param identifier Identifier of contract whose address we want to look up. ++ * @dev Throws if address not set. ++ */ ++ function getAddressForStringOrDie(string calldata identifier) external view returns (address) { ++ bytes32 identifierHash = keccak256(abi.encodePacked(identifier)); ++ require(registry[identifierHash] != address(0), "identifier has no registry entry"); ++ return registry[identifierHash]; ++ } ++ ++ /** ++ * @notice Gets address associated with the given identifier. ++ * @param identifier Identifier of contract whose address we want to look up. ++ */ ++ function getAddressForString(string calldata identifier) external view returns (address) { ++ bytes32 identifierHash = keccak256(abi.encodePacked(identifier)); ++ return registry[identifierHash]; ++ } ++ ++ /** ++ * @notice Iterates over provided array of identifiers, getting the address for each. ++ * Returns true if `sender` matches the address of one of the provided identifiers. ++ * @param identifierHashes Array of hashes of approved identifiers. ++ * @param sender Address in question to verify membership. ++ * @return True if `sender` corresponds to the address of any of `identifiers` ++ * registry entries. ++ */ ++ function isOneOf(bytes32[] calldata identifierHashes, address sender) external view returns (bool) { ++ for (uint256 i = 0; i < identifierHashes.length; i++) { ++ if (registry[identifierHashes[i]] == sender) { ++ return true; ++ } ++ } ++ return false; ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+45
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/FeeCurrency.sol CELO/packages/contracts-bedrock/src/celo/FeeCurrency.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..59516e3d9e485002357a392e322e85ea30c3b327 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/FeeCurrency.sol +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; ++ ++abstract contract FeeCurrency is ERC20 { ++ modifier onlyVm() { ++ require(msg.sender == address(0), "Only VM can call"); ++ _; ++ } ++ ++ function debitGasFees(address from, uint256 value) external onlyVm { ++ _burn(from, value); ++ } ++ ++ // New function signature, will be used when all fee currencies have migrated ++ function creditGasFees(address[] calldata recipients, uint256[] calldata amounts) public onlyVm { ++ require(recipients.length == amounts.length, "Recipients and amounts must be the same length."); ++ ++ for (uint256 i = 0; i < recipients.length; i++) { ++ _mint(recipients[i], amounts[i]); ++ } ++ } ++ ++ // Old function signature for backwards compatibility ++ function creditGasFees( ++ address from, ++ address feeRecipient, ++ address, // gatewayFeeRecipient, unused ++ address communityFund, ++ uint256 refund, ++ uint256 tipTxFee, ++ uint256, // gatewayFee, unused ++ uint256 baseTxFee ++ ) ++ public ++ onlyVm ++ { ++ // Calling the new creditGasFees would make sense here, but that is not ++ // possible due to its calldata arguments. ++ _mint(from, refund); ++ _mint(feeRecipient, tipTxFee); ++ _mint(communityFund, baseTxFee); ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+91
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/FeeCurrencyDirectory.sol CELO/packages/contracts-bedrock/src/celo/FeeCurrencyDirectory.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..21fc7ff3181a15e8d87b7f3ab89f713870197d48 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/FeeCurrencyDirectory.sol +@@ -0,0 +1,91 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.0; ++ ++import "./Initializable.sol"; ++import "./interfaces/IOracle.sol"; ++import "./interfaces/IFeeCurrencyDirectory.sol"; ++import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; ++ ++contract FeeCurrencyDirectory is IFeeCurrencyDirectory, Initializable, Ownable { ++ mapping(address => CurrencyConfig) public currencies; ++ address[] private currencyList; ++ ++ constructor(bool test) Initializable(test) { } ++ ++ /** ++ * @notice Initializes the contract with the owner set. ++ */ ++ function initialize() public initializer { ++ _transferOwnership(msg.sender); ++ } ++ ++ /** ++ * @notice Sets the currency configuration for a token. ++ * @dev This action can only be performed by the contract owner. ++ * @param token The token address. ++ * @param oracle The oracle address for price fetching. ++ * @param intrinsicGas The intrinsic gas value for transactions. ++ */ ++ function setCurrencyConfig(address token, address oracle, uint256 intrinsicGas) external onlyOwner { ++ require(oracle != address(0), "Oracle address cannot be zero"); ++ require(intrinsicGas > 0, "Intrinsic gas cannot be zero"); ++ require(currencies[token].oracle == address(0), "Currency already in the directory"); ++ ++ currencies[token] = CurrencyConfig({ oracle: oracle, intrinsicGas: intrinsicGas }); ++ currencyList.push(token); ++ } ++ ++ /** ++ * @notice Removes a token from the directory. ++ * @dev This action can only be performed by the contract owner. ++ * @param token The token address to remove. ++ * @param index The index in the list of directory currencies. ++ */ ++ function removeCurrencies(address token, uint256 index) external onlyOwner { ++ require(index < currencyList.length, "Index out of bounds"); ++ require(currencyList[index] == token, "Index does not match token"); ++ ++ delete currencies[token]; ++ currencyList[index] = currencyList[currencyList.length - 1]; ++ currencyList.pop(); ++ } ++ ++ /** ++ * @notice Returns the list of all currency addresses. ++ * @return An array of addresses. ++ */ ++ function getCurrencies() public view returns (address[] memory) { ++ return currencyList; ++ } ++ ++ /** ++ * @notice Returns the configuration for a currency. ++ * @param token The address of the token. ++ * @return Currency configuration of the token. ++ */ ++ function getCurrencyConfig(address token) public view returns (CurrencyConfig memory) { ++ return currencies[token]; ++ } ++ ++ /** ++ * @notice Retrieves exchange rate between token and CELO. ++ * @param token The token address whose price is to be fetched. ++ * @return numerator The exchange rate numerator. ++ * @return denominator The exchange rate denominator. ++ */ ++ function getExchangeRate(address token) public view returns (uint256 numerator, uint256 denominator) { ++ require(currencies[token].oracle != address(0), "Currency not in the directory"); ++ (numerator, denominator) = IOracle(currencies[token].oracle).getExchangeRate(token); ++ } ++ ++ /** ++ * @notice Returns the storage, major, minor, and patch version of the contract. ++ * @return Storage version of the contract. ++ * @return Major version of the contract. ++ * @return Minor version of the contract. ++ * @return Patch version of the contract. ++ */ ++ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { ++ return (1, 1, 0, 0); ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+77
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/FeeCurrencyWhitelist.sol CELO/packages/contracts-bedrock/src/celo/FeeCurrencyWhitelist.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..d52d4a155ea6536ad6559646881fd417278deb88 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/FeeCurrencyWhitelist.sol +@@ -0,0 +1,77 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++ ++import "./interfaces/IFeeCurrencyWhitelist.sol"; ++ ++import "./common/Initializable.sol"; ++ ++import "./common/interfaces/ICeloVersionedContract.sol"; ++ ++/** ++ * @title Holds a whitelist of the ERC20+ tokens that can be used to pay for gas ++ * Not including the native Celo token ++ */ ++contract FeeCurrencyWhitelist is IFeeCurrencyWhitelist, Ownable, Initializable, ICeloVersionedContract { ++ // Array of all the tokens enabled ++ address[] public whitelist; ++ ++ event FeeCurrencyWhitelisted(address token); ++ ++ event FeeCurrencyWhitelistRemoved(address token); ++ ++ /** ++ * @notice Sets initialized == true on implementation contracts ++ * @param test Set to true to skip implementation initialization ++ */ ++ constructor(bool test) Initializable(test) { } ++ ++ /** ++ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. ++ */ ++ function initialize() external initializer { ++ _transferOwnership(msg.sender); ++ } ++ ++ /** ++ * @notice Returns the storage, major, minor, and patch version of the contract. ++ * @return Storage version of the contract. ++ * @return Major version of the contract. ++ * @return Minor version of the contract. ++ * @return Patch version of the contract. ++ */ ++ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { ++ return (1, 1, 1, 0); ++ } ++ ++ /** ++ * @notice Removes a Mento token as enabled fee token. Tokens added with addToken should be ++ * removed with this function. ++ * @param tokenAddress The address of the token to remove. ++ * @param index The index of the token in the whitelist array. ++ */ ++ function removeToken(address tokenAddress, uint256 index) public onlyOwner { ++ require(whitelist[index] == tokenAddress, "Index does not match"); ++ uint256 length = whitelist.length; ++ whitelist[index] = whitelist[length - 1]; ++ whitelist.pop(); ++ emit FeeCurrencyWhitelistRemoved(tokenAddress); ++ } ++ ++ /** ++ * @dev Add a token to the whitelist ++ * @param tokenAddress The address of the token to add. ++ */ ++ function addToken(address tokenAddress) external onlyOwner { ++ whitelist.push(tokenAddress); ++ emit FeeCurrencyWhitelisted(tokenAddress); ++ } ++ ++ /** ++ * @return a list of all tokens enabled as gas fee currency. ++ */ ++ function getWhitelist() external view returns (address[] memory) { ++ return whitelist; ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+543
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/FeeHandler.sol CELO/packages/contracts-bedrock/src/celo/FeeHandler.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..00a1b0bde4fcb4af98c1cd85c71b2d45802d950c +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/FeeHandler.sol +@@ -0,0 +1,543 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/utils/math/Math.sol"; ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++import "../../lib/openzeppelin-contracts/contracts/utils/structs/EnumerableSet.sol"; ++import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; ++ ++import "./UsingRegistry.sol"; ++import "./common/Freezable.sol"; ++import "./common/FixidityLib.sol"; ++import "./common/Initializable.sol"; ++ ++import "./common/interfaces/IFeeHandler.sol"; ++import "./common/interfaces/IFeeHandlerSeller.sol"; ++ ++// TODO move to IStableToken when it adds method getExchangeRegistryId ++import "./interfaces/IStableTokenMento.sol"; ++import "./common/interfaces/ICeloVersionedContract.sol"; ++import "./common/interfaces/ICeloToken.sol"; ++import "./stability/interfaces/ISortedOracles.sol"; ++ ++// Using the minimal required signatures in the interfaces so more contracts could be compatible ++import { ReentrancyGuard } from "@openzeppelin/contracts/security/ReentrancyGuard.sol"; ++ ++// An implementation of FeeHandler as described in CIP-52 ++// See https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md ++contract FeeHandler is ++ Ownable, ++ Initializable, ++ UsingRegistry, ++ ICeloVersionedContract, ++ Freezable, ++ IFeeHandler, ++ ReentrancyGuard ++{ ++ using FixidityLib for FixidityLib.Fraction; ++ using EnumerableSet for EnumerableSet.AddressSet; ++ ++ uint256 public constant FIXED1_UINT = 1000000000000000000000000; // TODO move to FIX and add check ++ ++ // Min units that can be burned ++ uint256 public constant MIN_BURN = 200; ++ ++ // last day the daily limits were updated ++ uint256 public lastLimitDay; ++ ++ FixidityLib.Fraction public burnFraction; // 80% ++ ++ address public feeBeneficiary; ++ ++ uint256 public celoToBeBurned; ++ ++ // This mapping can not be public because it contains a FixidityLib.Fraction ++ // and that'd be only supported with experimental features in this ++ // compiler version ++ mapping(address => TokenState) private tokenStates; ++ ++ struct TokenState { ++ address handler; ++ FixidityLib.Fraction maxSlippage; ++ // Max amounts that can be burned in a day for a token ++ uint256 dailySellLimit; ++ // Max amounts that can be burned today for a token ++ uint256 currentDaySellLimit; ++ uint256 toDistribute; ++ // Historical amounts burned by this contract ++ uint256 pastBurn; ++ } ++ ++ EnumerableSet.AddressSet private activeTokens; ++ ++ event SoldAndBurnedToken(address token, uint256 value); ++ event DailyLimitSet(address tokenAddress, uint256 newLimit); ++ event DailyLimitHit(address token, uint256 burning); ++ event MaxSlippageSet(address token, uint256 maxSlippage); ++ event DailySellLimitUpdated(uint256 amount); ++ event FeeBeneficiarySet(address newBeneficiary); ++ event BurnFractionSet(uint256 fraction); ++ event TokenAdded(address tokenAddress, address handlerAddress); ++ event TokenRemoved(address tokenAddress); ++ ++ /** ++ * @notice Sets initialized == true on implementation contracts. ++ * @param test Set to true to skip implementation initialisation. ++ */ ++ constructor(bool test) Initializable(test) { } ++ ++ /** ++ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. ++ */ ++ function initialize( ++ address _registryAddress, ++ address newFeeBeneficiary, ++ uint256 newBurnFraction, ++ address[] calldata tokens, ++ address[] calldata handlers, ++ uint256[] calldata newLimits, ++ uint256[] calldata newMaxSlippages ++ ) ++ external ++ initializer ++ { ++ require(tokens.length == handlers.length, "handlers length should match tokens length"); ++ require(tokens.length == newLimits.length, "limits length should match tokens length"); ++ require(tokens.length == newMaxSlippages.length, "maxSlippage length should match tokens length"); ++ ++ _transferOwnership(msg.sender); ++ setRegistry(_registryAddress); ++ _setFeeBeneficiary(newFeeBeneficiary); ++ _setBurnFraction(newBurnFraction); ++ ++ for (uint256 i = 0; i < tokens.length; i++) { ++ _addToken(tokens[i], handlers[i]); ++ _setDailySellLimit(tokens[i], newLimits[i]); ++ _setMaxSplippage(tokens[i], newMaxSlippages[i]); ++ } ++ } ++ ++ // Without this the contract cant receive Celo as native transfer ++ receive() external payable { } ++ ++ /** ++ * @dev Returns the handler address for the specified token. ++ * @param tokenAddress The address of the token for which to return the handler. ++ * @return The address of the handler contract for the specified token. ++ */ ++ function getTokenHandler(address tokenAddress) external view returns (address) { ++ return tokenStates[tokenAddress].handler; ++ } ++ ++ /** ++ * @dev Returns a boolean indicating whether the specified token is active or not. ++ * @param tokenAddress The address of the token for which to retrieve the active status. ++ * @return A boolean representing the active status of the specified token. ++ */ ++ function getTokenActive(address tokenAddress) external view returns (bool) { ++ return activeTokens.contains(tokenAddress); ++ } ++ ++ /** ++ * @dev Returns the maximum slippage percentage for the specified token. ++ * @param tokenAddress The address of the token for which to retrieve the maximum ++ * slippage percentage. ++ * @return The maximum slippage percentage as a uint256 value. ++ */ ++ function getTokenMaxSlippage(address tokenAddress) external view returns (uint256) { ++ return FixidityLib.unwrap(tokenStates[tokenAddress].maxSlippage); ++ } ++ ++ /** ++ * @dev Returns the daily burn limit for the specified token. ++ * @param tokenAddress The address of the token for which to retrieve the daily burn limit. ++ * @return The daily burn limit as a uint256 value. ++ */ ++ function getTokenDailySellLimit(address tokenAddress) external view returns (uint256) { ++ return tokenStates[tokenAddress].dailySellLimit; ++ } ++ ++ /** ++ * @dev Returns the current daily sell limit for the specified token. ++ * @param tokenAddress The address of the token for which to retrieve the current daily limit. ++ * @return The current daily limit as a uint256 value. ++ */ ++ function getTokenCurrentDaySellLimit(address tokenAddress) external view returns (uint256) { ++ return tokenStates[tokenAddress].currentDaySellLimit; ++ } ++ ++ /** ++ * @dev Returns the amount of tokens available to distribute for the specified token. ++ * @param tokenAddress The address of the token for which to retrieve the amount of ++ * tokens available to distribute. ++ * @return The amount of tokens available to distribute as a uint256 value. ++ */ ++ function getTokenToDistribute(address tokenAddress) external view returns (uint256) { ++ return tokenStates[tokenAddress].toDistribute; ++ } ++ ++ function getActiveTokens() public view returns (address[] memory) { ++ return activeTokens.values(); ++ } ++ ++ /** ++ * @dev Sets the fee beneficiary address to the specified address. ++ * @param beneficiary The address to set as the fee beneficiary. ++ */ ++ function setFeeBeneficiary(address beneficiary) external onlyOwner { ++ return _setFeeBeneficiary(beneficiary); ++ } ++ ++ function _setFeeBeneficiary(address beneficiary) private { ++ feeBeneficiary = beneficiary; ++ emit FeeBeneficiarySet(beneficiary); ++ } ++ ++ /** ++ * @dev Sets the burn fraction to the specified value. ++ * @param fraction The value to set as the burn fraction. ++ */ ++ function setBurnFraction(uint256 fraction) external onlyOwner { ++ return _setBurnFraction(fraction); ++ } ++ ++ function _setBurnFraction(uint256 newFraction) private { ++ FixidityLib.Fraction memory fraction = FixidityLib.wrap(newFraction); ++ require(FixidityLib.lte(fraction, FixidityLib.fixed1()), "Burn fraction must be less than or equal to 1"); ++ burnFraction = fraction; ++ emit BurnFractionSet(newFraction); ++ } ++ ++ /** ++ * @dev Sets the burn fraction to the specified value. Token has to have a handler set. ++ * @param tokenAddress The address of the token to sell ++ */ ++ function sell(address tokenAddress) external { ++ return _sell(tokenAddress); ++ } ++ ++ /** ++ * @dev Adds a new token to the contract with the specified token and handler addresses. ++ * @param tokenAddress The address of the token to add. ++ * @param handlerAddress The address of the handler contract for the specified token. ++ */ ++ function addToken(address tokenAddress, address handlerAddress) external onlyOwner { ++ _addToken(tokenAddress, handlerAddress); ++ } ++ ++ function _addToken(address tokenAddress, address handlerAddress) private { ++ require(handlerAddress != address(0), "Can't set handler to zero"); ++ TokenState storage tokenState = tokenStates[tokenAddress]; ++ tokenState.handler = handlerAddress; ++ ++ activeTokens.add(tokenAddress); ++ emit TokenAdded(tokenAddress, handlerAddress); ++ } ++ ++ /** ++ * @notice Allows the owner to activate a specified token. ++ * @param tokenAddress The address of the token to be activated. ++ */ ++ function activateToken(address tokenAddress) external onlyOwner { ++ _activateToken(tokenAddress); ++ } ++ ++ function _activateToken(address tokenAddress) private { ++ TokenState storage tokenState = tokenStates[tokenAddress]; ++ require( ++ tokenState.handler != address(0) || tokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), ++ "Handler has to be set to activate token" ++ ); ++ activeTokens.add(tokenAddress); ++ } ++ ++ /** ++ * @dev Deactivates the specified token by marking it as inactive. ++ * @param tokenAddress The address of the token to deactivate. ++ */ ++ function deactivateToken(address tokenAddress) external onlyOwner { ++ _deactivateToken(tokenAddress); ++ } ++ ++ function _deactivateToken(address tokenAddress) private { ++ activeTokens.remove(tokenAddress); ++ } ++ ++ /** ++ * @notice Allows the owner to set a handler contract for a specified token. ++ * @param tokenAddress The address of the token to set the handler for. ++ * @param handlerAddress The address of the handler contract to be set. ++ */ ++ function setHandler(address tokenAddress, address handlerAddress) external onlyOwner { ++ _setHandler(tokenAddress, handlerAddress); ++ } ++ ++ function _setHandler(address tokenAddress, address handlerAddress) private { ++ require(handlerAddress != address(0), "Can't set handler to zero, use deactivateToken"); ++ TokenState storage tokenState = tokenStates[tokenAddress]; ++ tokenState.handler = handlerAddress; ++ } ++ ++ function removeToken(address tokenAddress) external onlyOwner { ++ _removeToken(tokenAddress); ++ } ++ ++ function _removeToken(address tokenAddress) private { ++ _deactivateToken(tokenAddress); ++ TokenState storage tokenState = tokenStates[tokenAddress]; ++ tokenState.handler = address(0); ++ emit TokenRemoved(tokenAddress); ++ } ++ ++ function _sell(address tokenAddress) private onlyWhenNotFrozen nonReentrant { ++ IERC20 token = IERC20(tokenAddress); ++ ++ TokenState storage tokenState = tokenStates[tokenAddress]; ++ require(tokenState.handler != address(0), "Handler has to be set to sell token"); ++ require(FixidityLib.unwrap(tokenState.maxSlippage) != 0, "Max slippage has to be set to sell token"); ++ FixidityLib.Fraction memory balanceToProcess = ++ FixidityLib.newFixed(token.balanceOf(address(this)) - tokenState.toDistribute); ++ ++ uint256 balanceToBurn = (burnFraction.multiply(balanceToProcess).fromFixed()); ++ ++ tokenState.toDistribute = tokenState.toDistribute + balanceToProcess.fromFixed() - balanceToBurn; ++ ++ // small numbers cause rounding errors and zero case should be skipped ++ if (balanceToBurn < MIN_BURN) { ++ return; ++ } ++ ++ if (dailySellLimitHit(tokenAddress, balanceToBurn)) { ++ // in case the limit is hit, burn the max possible ++ balanceToBurn = tokenState.currentDaySellLimit; ++ emit DailyLimitHit(tokenAddress, balanceToBurn); ++ } ++ ++ token.transfer(tokenState.handler, balanceToBurn); ++ IFeeHandlerSeller handler = IFeeHandlerSeller(tokenState.handler); ++ ++ uint256 celoReceived = handler.sell( ++ tokenAddress, ++ registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), ++ balanceToBurn, ++ FixidityLib.unwrap(tokenState.maxSlippage) ++ ); ++ ++ celoToBeBurned = celoToBeBurned + celoReceived; ++ tokenState.pastBurn = tokenState.pastBurn + balanceToBurn; ++ updateLimits(tokenAddress, balanceToBurn); ++ ++ emit SoldAndBurnedToken(tokenAddress, balanceToBurn); ++ } ++ ++ /** ++ * @dev Distributes the available tokens for the specified token address to the fee beneficiary. ++ * @param tokenAddress The address of the token for which to distribute the available tokens. ++ */ ++ function distribute(address tokenAddress) external { ++ return _distribute(tokenAddress); ++ } ++ ++ function _distribute(address tokenAddress) private onlyWhenNotFrozen nonReentrant { ++ require(feeBeneficiary != address(0), "Can't distribute to the zero address"); ++ IERC20 token = IERC20(tokenAddress); ++ uint256 tokenBalance = token.balanceOf(address(this)); ++ ++ TokenState storage tokenState = tokenStates[tokenAddress]; ++ require( ++ tokenState.handler != address(0) || tokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), ++ "Handler has to be set to sell token" ++ ); ++ ++ // safty check to avoid a revert due balance ++ uint256 balanceToDistribute = Math.min(tokenBalance, tokenState.toDistribute); ++ ++ if (balanceToDistribute == 0) { ++ // don't distribute with zero balance ++ return; ++ } ++ ++ token.transfer(feeBeneficiary, balanceToDistribute); ++ tokenState.toDistribute = tokenState.toDistribute - balanceToDistribute; ++ } ++ ++ /** ++ * @notice Returns the storage, major, minor, and patch version of the contract. ++ * @return Storage version of the contract. ++ * @return Major version of the contract. ++ * @return Minor version of the contract. ++ * @return Patch version of the contract. ++ */ ++ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { ++ return (1, 1, 0, 0); ++ } ++ ++ /** ++ * @notice Allows owner to set max slippage for a token. ++ * @param token Address of the token to set. ++ * @param newMax New sllipage to set, as Fixidity fraction. ++ */ ++ function setMaxSplippage(address token, uint256 newMax) external onlyOwner { ++ _setMaxSplippage(token, newMax); ++ } ++ ++ function _setMaxSplippage(address token, uint256 newMax) private { ++ TokenState storage tokenState = tokenStates[token]; ++ require(newMax != 0, "Cannot set max slippage to zero"); ++ tokenState.maxSlippage = FixidityLib.wrap(newMax); ++ require( ++ FixidityLib.lte(tokenState.maxSlippage, FixidityLib.fixed1()), "Splippage must be less than or equal to 1" ++ ); ++ emit MaxSlippageSet(token, newMax); ++ } ++ ++ /** ++ * @notice Allows owner to set the daily burn limit for a token. ++ * @param token Address of the token to set. ++ * @param newLimit The new limit to set, in the token units. ++ */ ++ function setDailySellLimit(address token, uint256 newLimit) external onlyOwner { ++ _setDailySellLimit(token, newLimit); ++ } ++ ++ function _setDailySellLimit(address token, uint256 newLimit) private { ++ TokenState storage tokenState = tokenStates[token]; ++ tokenState.dailySellLimit = newLimit; ++ emit DailyLimitSet(token, newLimit); ++ } ++ ++ /** ++ * @dev Burns CELO tokens according to burnFraction. ++ */ ++ function burnCelo() external { ++ return _burnCelo(); ++ } ++ ++ /** ++ * @dev Distributes the available tokens for all registered tokens to the feeBeneficiary. ++ */ ++ function distributeAll() external { ++ return _distributeAll(); ++ } ++ ++ function _distributeAll() private { ++ for (uint256 i = 0; i < EnumerableSet.length(activeTokens); i++) { ++ address token = activeTokens.at(i); ++ _distribute(token); ++ } ++ // distribute Celo ++ _distribute(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); ++ } ++ ++ /** ++ * @dev Distributes the available tokens for all registered tokens to the feeBeneficiary. ++ */ ++ function handleAll() external { ++ return _handleAll(); ++ } ++ ++ function _handleAll() private { ++ for (uint256 i = 0; i < EnumerableSet.length(activeTokens); i++) { ++ // calling _handle would trigger may burn Celo and distributions ++ // that can be just batched at the end ++ address token = activeTokens.at(i); ++ _sell(token); ++ } ++ _distributeAll(); // distributes Celo as well ++ _burnCelo(); ++ } ++ ++ /** ++ * @dev Distributes the the token for to the feeBeneficiary. ++ */ ++ function handle(address tokenAddress) external { ++ return _handle(tokenAddress); ++ } ++ ++ function _handle(address tokenAddress) private { ++ // Celo doesn't have to be exchanged for anything ++ if (tokenAddress != registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)) { ++ _sell(tokenAddress); ++ } ++ _burnCelo(); ++ _distribute(tokenAddress); ++ _distribute(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); ++ } ++ ++ /** ++ * @notice Burns all the Celo balance of this contract. ++ */ ++ function _burnCelo() private { ++ TokenState storage tokenState = tokenStates[registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)]; ++ ICeloToken celo = ICeloToken(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); ++ ++ uint256 balanceOfCelo = address(this).balance; ++ ++ uint256 balanceToProcess = balanceOfCelo - tokenState.toDistribute - celoToBeBurned; ++ uint256 currentBalanceToBurn = FixidityLib.newFixed(balanceToProcess).multiply(burnFraction).fromFixed(); ++ uint256 totalBalanceToBurn = currentBalanceToBurn + celoToBeBurned; ++ celo.burn(totalBalanceToBurn); ++ ++ celoToBeBurned = 0; ++ tokenState.toDistribute = tokenState.toDistribute + balanceToProcess - currentBalanceToBurn; ++ } ++ ++ /** ++ * @param token The address of the token to query. ++ * @return The amount burned for a token. ++ */ ++ function getPastBurnForToken(address token) external view returns (uint256) { ++ return tokenStates[token].pastBurn; ++ } ++ ++ /** ++ * @param token The address of the token to query. ++ * @param amountToBurn The amount of the token to burn. ++ * @return Returns true if burning amountToBurn would exceed the daily limit. ++ */ ++ function dailySellLimitHit(address token, uint256 amountToBurn) public returns (bool) { ++ TokenState storage tokenState = tokenStates[token]; ++ ++ if (tokenState.dailySellLimit == 0) { ++ // if no limit set, assume uncapped ++ return false; ++ } ++ ++ uint256 currentDay = block.timestamp / 1 days; ++ // Pattern borrowed from Reserve.sol ++ if (currentDay > lastLimitDay) { ++ lastLimitDay = currentDay; ++ tokenState.currentDaySellLimit = tokenState.dailySellLimit; ++ } ++ ++ return amountToBurn >= tokenState.currentDaySellLimit; ++ } ++ ++ /** ++ * @notice Updates the current day limit for a token. ++ * @param token The address of the token to query. ++ * @param amountBurned the amount of the token that was burned. ++ */ ++ function updateLimits(address token, uint256 amountBurned) private { ++ TokenState storage tokenState = tokenStates[token]; ++ ++ if (tokenState.dailySellLimit == 0) { ++ // if no limit set, assume uncapped ++ return; ++ } ++ tokenState.currentDaySellLimit = tokenState.currentDaySellLimit - amountBurned; ++ emit DailySellLimitUpdated(amountBurned); ++ } ++ ++ /** ++ * @notice Allows owner to transfer tokens of this contract. It's meant for governance to ++ * trigger use cases not contemplated in this contract. ++ * @param token The address of the token to transfer. ++ * @param recipient The address of the recipient to transfer the tokens to. ++ * @param value The amount of tokens to transfer. ++ * @return A boolean indicating whether the transfer was successful or not. ++ */ ++ function transfer(address token, address recipient, uint256 value) external onlyOwner returns (bool) { ++ return IERC20(token).transfer(recipient, value); ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+92
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/FeeHandlerSeller.sol CELO/packages/contracts-bedrock/src/celo/FeeHandlerSeller.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..4d22125af4d647021d77e0ed4b59d09049dd6bac +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/FeeHandlerSeller.sol +@@ -0,0 +1,92 @@ ++// SPDX-License-Identifier: LGPL-3.0-only ++pragma solidity ^0.8.15; ++ ++import "./common/FixidityLib.sol"; ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; ++import "./UsingRegistry.sol"; ++import "./common/Initializable.sol"; ++ ++// Abstract class for a FeeHandlerSeller, as defined in CIP-52 ++// https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md ++abstract contract FeeHandlerSeller is Ownable, Initializable, UsingRegistry { ++ using FixidityLib for FixidityLib.Fraction; ++ ++ // Address of the token ++ // Minimal number of reports in SortedOracles contract ++ mapping(address => uint256) public minimumReports; ++ ++ event MinimumReportsSet(address tokenAddress, uint256 minimumReports); ++ event TokenSold(address soldTokenAddress, address boughtTokenAddress, uint256 amount); ++ ++ constructor(bool testingDeployment) Initializable(testingDeployment) { } ++ ++ function initialize( ++ address _registryAddress, ++ address[] calldata tokenAddresses, ++ uint256[] calldata newMininumReports ++ ) ++ external ++ initializer ++ { ++ _transferOwnership(msg.sender); ++ setRegistry(_registryAddress); ++ ++ for (uint256 i = 0; i < tokenAddresses.length; i++) { ++ _setMinimumReports(tokenAddresses[i], newMininumReports[i]); ++ } ++ } ++ ++ /** ++ * @notice Allows owner to set the minimum number of reports required. ++ * @param newMininumReports The new update minimum number of reports required. ++ */ ++ function setMinimumReports(address tokenAddress, uint256 newMininumReports) public onlyOwner { ++ _setMinimumReports(tokenAddress, newMininumReports); ++ } ++ ++ function _setMinimumReports(address tokenAddress, uint256 newMininumReports) internal { ++ minimumReports[tokenAddress] = newMininumReports; ++ emit MinimumReportsSet(tokenAddress, newMininumReports); ++ } ++ ++ /** ++ * @dev Calculates the minimum amount of tokens that should be received for the specified ++ * amount with the given mid-price and maximum slippage. ++ * @param midPriceNumerator The numerator of the mid-price for the token pair. ++ * @param midPriceDenominator The denominator of the mid-price for the token pair. ++ * @param amount The amount of tokens to be exchanged. ++ * @param maxSlippage The maximum slippage percentage as a fraction of the mid-price. ++ * @return The minimum amount of tokens that should be received as a uint256 value. ++ */ ++ function calculateMinAmount( ++ uint256 midPriceNumerator, ++ uint256 midPriceDenominator, ++ uint256 amount, ++ uint256 maxSlippage // as fraction ++ ) ++ public ++ pure ++ returns (uint256) ++ { ++ FixidityLib.Fraction memory maxSlippageFraction = FixidityLib.wrap(maxSlippage); ++ ++ FixidityLib.Fraction memory price = FixidityLib.newFixedFraction(midPriceNumerator, midPriceDenominator); ++ FixidityLib.Fraction memory amountFraction = FixidityLib.newFixed(amount); ++ FixidityLib.Fraction memory totalAmount = price.multiply(amountFraction); ++ ++ return totalAmount.subtract(price.multiply(maxSlippageFraction).multiply(amountFraction)).fromFixed(); ++ } ++ ++ /** ++ * @notice Allows owner to transfer tokens of this contract. It's meant for governance to ++ * trigger use cases not contemplated in this contract. ++ * @param token The address of the token to transfer. ++ * @param amount The amount of tokens to transfer. ++ * @param to The address of the recipient to transfer the tokens to. ++ * @return A boolean indicating whether the transfer was successful or not. ++ */ ++ function transfer(address token, uint256 amount, address to) external onlyOwner returns (bool) { ++ return IERC20(token).transfer(to, amount); ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+272
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/GoldToken.sol CELO/packages/contracts-bedrock/src/celo/GoldToken.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..e7236678670a7bedf86f7769ef74888dc5f2488c +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/GoldToken.sol +@@ -0,0 +1,272 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; ++ ++import "./UsingRegistry.sol"; ++import "./CalledByVm.sol"; ++import "./Initializable.sol"; ++import "./interfaces/ICeloToken.sol"; ++import "./common/interfaces/ICeloVersionedContract.sol"; ++ ++contract GoldToken is Initializable, CalledByVm, UsingRegistry, IERC20, ICeloToken, ICeloVersionedContract { ++ // Address of the TRANSFER precompiled contract. ++ // solhint-disable state-visibility ++ address constant TRANSFER = address(0xff - 2); ++ string constant NAME = "Celo native asset"; ++ string constant SYMBOL = "CELO"; ++ uint8 constant DECIMALS = 18; ++ uint256 internal totalSupply_; ++ // solhint-enable state-visibility ++ ++ mapping(address => mapping(address => uint256)) internal allowed; ++ ++ // Burn address is 0xdEaD because truffle is having buggy behaviour with the zero address ++ address constant BURN_ADDRESS = address(0x000000000000000000000000000000000000dEaD); ++ ++ event TransferComment(string comment); ++ ++ /** ++ * @notice Sets initialized == true on implementation contracts ++ * @param test Set to true to skip implementation initialization ++ */ ++ constructor(bool test) Initializable(test) { } ++ ++ /** ++ * @notice Returns the storage, major, minor, and patch version of the contract. ++ * @return Storage version of the contract. ++ * @return Major version of the contract. ++ * @return Minor version of the contract. ++ * @return Patch version of the contract. ++ */ ++ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { ++ return (1, 1, 2, 0); ++ } ++ ++ /** ++ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. ++ * @param registryAddress Address of the Registry contract. ++ */ ++ function initialize(address registryAddress) external initializer { ++ totalSupply_ = 0; ++ _transferOwnership(msg.sender); ++ setRegistry(registryAddress); ++ } ++ ++ /** ++ * @notice Transfers CELO from one address to another. ++ * @param to The address to transfer CELO to. ++ * @param value The amount of CELO to transfer. ++ * @return True if the transaction succeeds. ++ */ ++ // solhint-disable-next-line no-simple-event-func-name ++ function transfer(address to, uint256 value) external returns (bool) { ++ return _transferWithCheck(to, value); ++ } ++ ++ /** ++ * @notice Transfers CELO from one address to another with a comment. ++ * @param to The address to transfer CELO to. ++ * @param value The amount of CELO to transfer. ++ * @param comment The transfer comment ++ * @return True if the transaction succeeds. ++ */ ++ function transferWithComment(address to, uint256 value, string calldata comment) external returns (bool) { ++ bool succeeded = _transferWithCheck(to, value); ++ emit TransferComment(comment); ++ return succeeded; ++ } ++ ++ /** ++ * @notice This function allows a user to burn a specific amount of tokens. ++ * Burning is implemented by sending tokens to the burn address. ++ * @param value: The amount of CELO to burn. ++ * @return True if burn was successful. ++ */ ++ function burn(uint256 value) external returns (bool) { ++ // not using transferWithCheck as the burn address can potentially be the zero address ++ return _transfer(BURN_ADDRESS, value); ++ } ++ ++ /** ++ * @notice Approve a user to transfer CELO on behalf of another user. ++ * @param spender The address which is being approved to spend CELO. ++ * @param value The amount of CELO approved to the spender. ++ * @return True if the transaction succeeds. ++ */ ++ function approve(address spender, uint256 value) external returns (bool) { ++ require(spender != address(0), "cannot set allowance for 0"); ++ allowed[msg.sender][spender] = value; ++ emit Approval(msg.sender, spender, value); ++ return true; ++ } ++ ++ /** ++ * @notice Increases the allowance of another user. ++ * @param spender The address which is being approved to spend CELO. ++ * @param value The increment of the amount of CELO approved to the spender. ++ * @return True if the transaction succeeds. ++ */ ++ function increaseAllowance(address spender, uint256 value) external returns (bool) { ++ require(spender != address(0), "cannot set allowance for 0"); ++ uint256 oldValue = allowed[msg.sender][spender]; ++ uint256 newValue = oldValue + value; ++ allowed[msg.sender][spender] = newValue; ++ emit Approval(msg.sender, spender, newValue); ++ return true; ++ } ++ ++ /** ++ * @notice Decreases the allowance of another user. ++ * @param spender The address which is being approved to spend CELO. ++ * @param value The decrement of the amount of CELO approved to the spender. ++ * @return True if the transaction succeeds. ++ */ ++ function decreaseAllowance(address spender, uint256 value) external returns (bool) { ++ uint256 oldValue = allowed[msg.sender][spender]; ++ uint256 newValue = oldValue - value; ++ allowed[msg.sender][spender] = newValue; ++ emit Approval(msg.sender, spender, newValue); ++ return true; ++ } ++ ++ /** ++ * @notice Transfers CELO from one address to another on behalf of a user. ++ * @param from The address to transfer CELO from. ++ * @param to The address to transfer CELO to. ++ * @param value The amount of CELO to transfer. ++ * @return True if the transaction succeeds. ++ */ ++ function transferFrom(address from, address to, uint256 value) external returns (bool) { ++ require(to != address(0), "transfer attempted to reserved address 0x0"); ++ require(value <= balanceOf(from), "transfer value exceeded balance of sender"); ++ require(value <= allowed[from][msg.sender], "transfer value exceeded sender's allowance for spender"); ++ ++ bool success; ++ (success,) = TRANSFER.call{ value: 0, gas: gasleft() }(abi.encode(from, to, value)); ++ require(success, "CELO transfer failed"); ++ ++ allowed[from][msg.sender] = allowed[from][msg.sender] - value; ++ emit Transfer(from, to, value); ++ return true; ++ } ++ ++ /** ++ * @notice Mints new CELO and gives it to 'to'. ++ * @param to The account for which to mint tokens. ++ * @param value The amount of CELO to mint. ++ */ ++ function mint(address to, uint256 value) external onlyVm returns (bool) { ++ if (value == 0) { ++ return true; ++ } ++ ++ require(to != address(0), "mint attempted to reserved address 0x0"); ++ totalSupply_ = totalSupply_ + value; ++ ++ bool success; ++ (success,) = TRANSFER.call{ value: 0, gas: gasleft() }(abi.encode(address(0), to, value)); ++ require(success, "CELO transfer failed"); ++ ++ emit Transfer(address(0), to, value); ++ return true; ++ } ++ ++ /** ++ * @return The name of the CELO token. ++ */ ++ function name() external pure returns (string memory) { ++ return NAME; ++ } ++ ++ /** ++ * @return The symbol of the CELO token. ++ */ ++ function symbol() external pure returns (string memory) { ++ return SYMBOL; ++ } ++ ++ /** ++ * @return The number of decimal places to which CELO is divisible. ++ */ ++ function decimals() external pure returns (uint8) { ++ return DECIMALS; ++ } ++ ++ /** ++ * @return The total amount of CELO in existence, including what the burn address holds. ++ */ ++ function totalSupply() external view returns (uint256) { ++ return totalSupply_; ++ } ++ ++ /** ++ * @return The total amount of CELO in existence, not including what the burn address holds. ++ */ ++ function circulatingSupply() external view returns (uint256) { ++ return totalSupply_ - getBurnedAmount() - balanceOf(address(0)); ++ } ++ ++ /** ++ * @notice Gets the amount of owner's CELO allowed to be spent by spender. ++ * @param owner The owner of the CELO. ++ * @param spender The spender of the CELO. ++ * @return The amount of CELO owner is allowing spender to spend. ++ */ ++ function allowance(address owner, address spender) external view returns (uint256) { ++ return allowed[owner][spender]; ++ } ++ ++ /** ++ * @notice Increases the variable for total amount of CELO in existence. ++ * @param amount The amount to increase counter by ++ */ ++ function increaseSupply(uint256 amount) external onlyVm { ++ totalSupply_ = totalSupply_ + amount; ++ } ++ ++ /** ++ * @notice Gets the amount of CELO that has been burned. ++ * @return The total amount of Celo that has been sent to the burn address. ++ */ ++ function getBurnedAmount() public view returns (uint256) { ++ return balanceOf(BURN_ADDRESS); ++ } ++ ++ /** ++ * @notice Gets the balance of the specified address. ++ * @param owner The address to query the balance of. ++ * @return The balance of the specified address. ++ */ ++ function balanceOf(address owner) public view returns (uint256) { ++ return owner.balance; ++ } ++ ++ /** ++ * @notice internal CELO transfer from one address to another. ++ * @param to The address to transfer CELO to. ++ * @param value The amount of CELO to transfer. ++ * @return True if the transaction succeeds. ++ */ ++ function _transfer(address to, uint256 value) internal returns (bool) { ++ require(value <= balanceOf(msg.sender), "transfer value exceeded balance of sender"); ++ ++ bool success; ++ (success,) = TRANSFER.call{ value: 0, gas: gasleft() }(abi.encode(msg.sender, to, value)); ++ require(success, "CELO transfer failed"); ++ emit Transfer(msg.sender, to, value); ++ return true; ++ } ++ ++ /** ++ * @notice Internal CELO transfer from one address to another. ++ * @param to The address to transfer CELO to. Zero address will revert. ++ * @param value The amount of CELO to transfer. ++ * @return True if the transaction succeeds. ++ */ ++ function _transferWithCheck(address to, uint256 value) internal returns (bool) { ++ require(to != address(0), "transfer attempted to reserved address 0x0"); ++ return _transfer(to, value); ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+18
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/Initializable.sol CELO/packages/contracts-bedrock/src/celo/Initializable.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..7929728eef4ed9063c81aea6f2a0a1758d4ef728 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/Initializable.sol +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: LGPL-3.0-only ++pragma solidity ^0.8.15; ++ ++contract Initializable { ++ bool public initialized; ++ ++ modifier initializer() { ++ require(!initialized, "contract already initialized"); ++ initialized = true; ++ _; ++ } ++ ++ constructor(bool testingDeployment) { ++ if (!testingDeployment) { ++ initialized = true; ++ } ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+85
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/MentoFeeHandlerSeller.sol CELO/packages/contracts-bedrock/src/celo/MentoFeeHandlerSeller.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..e5a9ff455f391f797bbc2ace5101c0ef58c3c192 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/MentoFeeHandlerSeller.sol +@@ -0,0 +1,85 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; ++ ++import "./interfaces/IStableTokenMento.sol"; ++ ++import "./common/interfaces/IFeeHandlerSeller.sol"; ++import "./stability/interfaces/ISortedOracles.sol"; ++import "./common/FixidityLib.sol"; ++import "./common/Initializable.sol"; ++ ++import "./FeeHandlerSeller.sol"; ++ ++// An implementation of FeeHandlerSeller supporting interfaces compatible with ++// Mento ++// See https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md ++contract MentoFeeHandlerSeller is FeeHandlerSeller { ++ using FixidityLib for FixidityLib.Fraction; ++ ++ /** ++ * @notice Sets initialized == true on implementation contracts. ++ * @param test Set to true to skip implementation initialisation. ++ */ ++ constructor(bool test) FeeHandlerSeller(test) { } ++ ++ // without this line the contract can't receive native Celo transfers ++ receive() external payable { } ++ ++ /** ++ * @notice Returns the storage, major, minor, and patch version of the contract. ++ * @return Storage version of the contract. ++ * @return Major version of the contract. ++ * @return Minor version of the contract. ++ * @return Patch version of the contract. ++ */ ++ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { ++ return (1, 1, 0, 0); ++ } ++ ++ function sell( ++ address sellTokenAddress, ++ address buyTokenAddress, ++ uint256 amount, ++ uint256 maxSlippage // as fraction, ++ ) ++ external ++ returns (uint256) ++ { ++ require( ++ buyTokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), "Buy token can only be gold token" ++ ); ++ ++ IStableTokenMento stableToken = IStableTokenMento(sellTokenAddress); ++ require(amount <= stableToken.balanceOf(address(this)), "Balance of token to burn not enough"); ++ ++ address exchangeAddress = registry.getAddressForOrDie(stableToken.getExchangeRegistryId()); ++ ++ IExchange exchange = IExchange(exchangeAddress); ++ ++ uint256 minAmount = 0; ++ ++ ISortedOracles sortedOracles = getSortedOracles(); ++ ++ require( ++ sortedOracles.numRates(sellTokenAddress) >= minimumReports[sellTokenAddress], ++ "Number of reports for token not enough" ++ ); ++ ++ (uint256 rateNumerator, uint256 rateDenominator) = sortedOracles.medianRate(sellTokenAddress); ++ minAmount = calculateMinAmount(rateNumerator, rateDenominator, amount, maxSlippage); ++ ++ // TODO an upgrade would be to compare using routers as well ++ stableToken.approve(exchangeAddress, amount); ++ exchange.sell(amount, minAmount, false); ++ ++ IERC20 goldToken = getGoldToken(); ++ uint256 celoAmount = goldToken.balanceOf(address(this)); ++ goldToken.transfer(msg.sender, celoAmount); ++ ++ emit TokenSold(sellTokenAddress, buyTokenAddress, amount); ++ return celoAmount; ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+336
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/StableTokenV2.sol CELO/packages/contracts-bedrock/src/celo/StableTokenV2.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..68632df65abc9d352de50b7f273afc491ff8a1b2 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/StableTokenV2.sol +@@ -0,0 +1,336 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import { ERC20PermitUpgradeable } from ++ "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/draft-ERC20PermitUpgradeable.sol"; ++import { ERC20Upgradeable } from "@openzeppelin/contracts-upgradeable/token/ERC20/ERC20Upgradeable.sol"; ++import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; ++ ++import { IStableTokenV2 } from "./interfaces/IStableToken.sol"; ++import { CalledByVm } from "./CalledByVm.sol"; ++ ++/** ++ * @title ERC20 token with minting and burning permissioned to a broker and validators. ++ */ ++contract StableTokenV2 is IStableTokenV2, ERC20PermitUpgradeable, CalledByVm, OwnableUpgradeable { ++ address public validators; ++ address public broker; ++ address public exchange; ++ ++ event TransferComment(string comment); ++ event BrokerUpdated(address broker); ++ event ValidatorsUpdated(address validators); ++ event ExchangeUpdated(address exchange); ++ ++ /** ++ * @dev Restricts a function so it can only be executed by an address that's allowed to mint. ++ * Currently that's the broker, validators, or exchange. ++ */ ++ modifier onlyMinter() { ++ address sender = _msgSender(); ++ require(sender == broker || sender == validators || sender == exchange, "StableTokenV2: not allowed to mint"); ++ _; ++ } ++ ++ /** ++ * @dev Restricts a function so it can only be executed by an address that's allowed to burn. ++ * Currently that's the broker or exchange. ++ */ ++ modifier onlyBurner() { ++ address sender = _msgSender(); ++ require(sender == broker || sender == exchange, "StableTokenV2: not allowed to burn"); ++ _; ++ } ++ ++ /** ++ * @notice The constructor for the StableTokenV2 contract. ++ * @dev Should be called with disable=true in deployments when ++ * it's accessed through a Proxy. ++ * Call this with disable=false during testing, when used ++ * without a proxy. ++ * @param disable Set to true to run `_disableInitializers()` inherited from ++ * openzeppelin-contracts-upgradeable/Initializable.sol ++ */ ++ constructor(bool disable) { ++ if (disable) { ++ _disableInitializers(); ++ } ++ } ++ ++ /** ++ * @notice Initializes a StableTokenV2. ++ * It keeps the same signature as the original initialize() function ++ * in legacy/StableToken.sol ++ * @param _name The name of the stable token (English) ++ * @param _symbol A short symbol identifying the token (e.g. "cUSD") ++ * @param initialBalanceAddresses Array of addresses with an initial balance. ++ * @param initialBalanceValues Array of balance values corresponding to initialBalanceAddresses. ++ * deprecated-param exchangeIdentifier String identifier of exchange in registry (for specific fiat pairs) ++ */ ++ function initialize( ++ // slither-disable-start shadowing-local ++ string calldata _name, ++ string calldata _symbol, ++ // slither-disable-end shadowing-local ++ address[] calldata initialBalanceAddresses, ++ uint256[] calldata initialBalanceValues ++ ) ++ external ++ initializer ++ { ++ __ERC20_init_unchained(_name, _symbol); ++ __ERC20Permit_init(_symbol); ++ _transferOwnership(_msgSender()); ++ ++ require(initialBalanceAddresses.length == initialBalanceValues.length, "Array length mismatch"); ++ for (uint256 i = 0; i < initialBalanceAddresses.length; i += 1) { ++ _mint(initialBalanceAddresses[i], initialBalanceValues[i]); ++ } ++ } ++ ++ /** ++ * @notice Initializes a StableTokenV2 contract ++ * when upgrading from legacy/StableToken.sol. ++ * It sets the addresses that were previously read from the Registry. ++ * It runs the ERC20PermitUpgradeable initializer. ++ * @dev This function is only callable once. ++ * @param _broker The address of the Broker contract. ++ * @param _validators The address of the Validators contract. ++ * @param _exchange The address of the Exchange contract. ++ */ ++ function initializeV2( ++ address _broker, ++ address _validators, ++ address _exchange ++ ) ++ external ++ reinitializer(2) ++ onlyOwner ++ { ++ _setBroker(_broker); ++ _setValidators(_validators); ++ _setExchange(_exchange); ++ __ERC20Permit_init(symbol()); ++ } ++ ++ /** ++ * @notice Sets the address of the Broker contract. ++ * @dev This function is only callable by the owner. ++ * @param _broker The address of the Broker contract. ++ */ ++ function setBroker(address _broker) external onlyOwner { ++ _setBroker(_broker); ++ } ++ ++ /** ++ * @notice Sets the address of the Validators contract. ++ * @dev This function is only callable by the owner. ++ * @param _validators The address of the Validators contract. ++ */ ++ function setValidators(address _validators) external onlyOwner { ++ _setValidators(_validators); ++ } ++ ++ /** ++ * @notice Sets the address of the Exchange contract. ++ * @dev This function is only callable by the owner. ++ * @param _exchange The address of the Exchange contract. ++ */ ++ function setExchange(address _exchange) external onlyOwner { ++ _setExchange(_exchange); ++ } ++ ++ /** ++ * @notice Transfer token for a specified address ++ * @param to The address to transfer to. ++ * @param value The amount to be transferred. ++ * @param comment The transfer comment. ++ * @return True if the transaction succeeds. ++ */ ++ function transferWithComment(address to, uint256 value, string calldata comment) external returns (bool) { ++ emit TransferComment(comment); ++ return transfer(to, value); ++ } ++ ++ /** ++ * @notice Mints new StableToken and gives it to 'to'. ++ * @param to The account for which to mint tokens. ++ * @param value The amount of StableToken to mint. ++ */ ++ function mint(address to, uint256 value) external onlyMinter returns (bool) { ++ _mint(to, value); ++ return true; ++ } ++ ++ /** ++ * @notice Burns StableToken from the balance of msg.sender. ++ * @param value The amount of StableToken to burn. ++ */ ++ function burn(uint256 value) external onlyBurner returns (bool) { ++ _burn(msg.sender, value); ++ return true; ++ } ++ ++ /** ++ * @notice Set the address of the Broker contract and emit an event ++ * @param _broker The address of the Broker contract. ++ */ ++ function _setBroker(address _broker) internal { ++ broker = _broker; ++ emit BrokerUpdated(_broker); ++ } ++ ++ /** ++ * @notice Set the address of the Validators contract and emit an event ++ * @param _validators The address of the Validators contract. ++ */ ++ function _setValidators(address _validators) internal { ++ validators = _validators; ++ emit ValidatorsUpdated(_validators); ++ } ++ ++ /** ++ * @notice Set the address of the Exchange contract and emit an event ++ * @param _exchange The address of the Exchange contract. ++ */ ++ function _setExchange(address _exchange) internal { ++ exchange = _exchange; ++ emit ExchangeUpdated(_exchange); ++ } ++ ++ /// @inheritdoc ERC20Upgradeable ++ function transferFrom( ++ address from, ++ address to, ++ uint256 amount ++ ) ++ public ++ override(ERC20Upgradeable, IStableTokenV2) ++ returns (bool) ++ { ++ return ERC20Upgradeable.transferFrom(from, to, amount); ++ } ++ ++ /// @inheritdoc ERC20Upgradeable ++ function transfer(address to, uint256 amount) public override(ERC20Upgradeable, IStableTokenV2) returns (bool) { ++ return ERC20Upgradeable.transfer(to, amount); ++ } ++ ++ /// @inheritdoc ERC20Upgradeable ++ function balanceOf(address account) public view override(ERC20Upgradeable, IStableTokenV2) returns (uint256) { ++ return ERC20Upgradeable.balanceOf(account); ++ } ++ ++ /// @inheritdoc ERC20Upgradeable ++ function approve( ++ address spender, ++ uint256 amount ++ ) ++ public ++ override(ERC20Upgradeable, IStableTokenV2) ++ returns (bool) ++ { ++ return ERC20Upgradeable.approve(spender, amount); ++ } ++ ++ /// @inheritdoc ERC20Upgradeable ++ function allowance( ++ address owner, ++ address spender ++ ) ++ public ++ view ++ override(ERC20Upgradeable, IStableTokenV2) ++ returns (uint256) ++ { ++ return ERC20Upgradeable.allowance(owner, spender); ++ } ++ ++ /// @inheritdoc ERC20Upgradeable ++ function totalSupply() public view override(ERC20Upgradeable, IStableTokenV2) returns (uint256) { ++ return ERC20Upgradeable.totalSupply(); ++ } ++ ++ /// @inheritdoc ERC20PermitUpgradeable ++ function permit( ++ address owner, ++ address spender, ++ uint256 value, ++ uint256 deadline, ++ uint8 v, ++ bytes32 r, ++ bytes32 s ++ ) ++ public ++ override(ERC20PermitUpgradeable, IStableTokenV2) ++ { ++ ERC20PermitUpgradeable.permit(owner, spender, value, deadline, v, r, s); ++ } ++ ++ /** ++ * @notice Reserve balance for making payments for gas in this StableToken currency. ++ * @param from The account to reserve balance from ++ * @param value The amount of balance to reserve ++ * @dev Note that this function is called by the protocol when paying for tx fees in this ++ * currency. After the tx is executed, gas is refunded to the sender and credited to the ++ * various tx fee recipients via a call to `creditGasFees`. ++ */ ++ function debitGasFees(address from, uint256 value) external onlyVm { ++ _burn(from, value); ++ } ++ ++ /** ++ * @notice Alternative function to credit balance after making payments ++ * for gas in this StableToken currency. ++ * @param from The account to debit balance from ++ * @param feeRecipient Coinbase address ++ * @param gatewayFeeRecipient Gateway address ++ * @param communityFund Community fund address ++ * @param refund amount to be refunded by the VM ++ * @param tipTxFee Coinbase fee ++ * @param baseTxFee Community fund fee ++ * @param gatewayFee Gateway fee ++ * @dev Note that this function is called by the protocol when paying for tx fees in this ++ * currency. Before the tx is executed, gas is debited from the sender via a call to ++ * `debitGasFees`. ++ */ ++ function creditGasFees( ++ address from, ++ address feeRecipient, ++ address gatewayFeeRecipient, ++ address communityFund, ++ uint256 refund, ++ uint256 tipTxFee, ++ uint256 gatewayFee, ++ uint256 baseTxFee ++ ) ++ external ++ onlyVm ++ { ++ // slither-disable-next-line uninitialized-local ++ uint256 amountToBurn; ++ _mint(from, refund + tipTxFee + gatewayFee + baseTxFee); ++ ++ if (feeRecipient != address(0)) { ++ _transfer(from, feeRecipient, tipTxFee); ++ } else if (tipTxFee > 0) { ++ amountToBurn += tipTxFee; ++ } ++ ++ if (gatewayFeeRecipient != address(0)) { ++ _transfer(from, gatewayFeeRecipient, gatewayFee); ++ } else if (gatewayFee > 0) { ++ amountToBurn += gatewayFee; ++ } ++ ++ if (communityFund != address(0)) { ++ _transfer(from, communityFund, baseTxFee); ++ } else if (baseTxFee > 0) { ++ amountToBurn += baseTxFee; ++ } ++ ++ if (amountToBurn > 0) { ++ _burn(from, amountToBurn); ++ } ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+193
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/UniswapFeeHandlerSeller.sol CELO/packages/contracts-bedrock/src/celo/UniswapFeeHandlerSeller.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..54ce14eaf37cfd30695729e4a2990b294d589b86 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/UniswapFeeHandlerSeller.sol +@@ -0,0 +1,193 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/utils/math/Math.sol"; ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++import "../../lib/openzeppelin-contracts/contracts/utils/structs/EnumerableSet.sol"; ++import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; ++ ++import "./UsingRegistry.sol"; ++ ++import "./common/interfaces/IFeeHandlerSeller.sol"; ++import "./stability/interfaces/ISortedOracles.sol"; ++import "./common/FixidityLib.sol"; ++import "./common/Initializable.sol"; ++import "./FeeHandlerSeller.sol"; ++ ++import "./uniswap/interfaces/IUniswapV2RouterMin.sol"; ++import "./uniswap/interfaces/IUniswapV2FactoryMin.sol"; ++ ++// An implementation of FeeHandlerSeller supporting interfaces compatible with ++// Uniswap V2 API ++// See https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md ++contract UniswapFeeHandlerSeller is FeeHandlerSeller { ++ using FixidityLib for FixidityLib.Fraction; ++ using EnumerableSet for EnumerableSet.AddressSet; ++ ++ uint256 constant MAX_TIMESTAMP_BLOCK_EXCHANGE = 20; ++ uint256 constant MAX_NUMBER_ROUTERS_PER_TOKEN = 3; ++ mapping(address => EnumerableSet.AddressSet) private routerAddresses; ++ ++ event ReceivedQuote(address indexed tokneAddress, address indexed router, uint256 quote); ++ event RouterUsed(address router); ++ event RouterAddressSet(address token, address router); ++ event RouterAddressRemoved(address token, address router); ++ ++ /** ++ * @notice Sets initialized == true on implementation contracts. ++ * @param test Set to true to skip implementation initialisation. ++ */ ++ constructor(bool test) FeeHandlerSeller(test) { } ++ ++ // without this line the contract can't receive native Celo transfers ++ receive() external payable { } ++ ++ /** ++ * @notice Returns the storage, major, minor, and patch version of the contract. ++ * @return Storage version of the contract. ++ * @return Major version of the contract. ++ * @return Minor version of the contract. ++ * @return Patch version of the contract. ++ */ ++ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { ++ return (1, 1, 0, 0); ++ } ++ ++ /** ++ * @notice Allows owner to set the router for a token. ++ * @param token Address of the token to set. ++ * @param router The new router. ++ */ ++ function setRouter(address token, address router) external onlyOwner { ++ _setRouter(token, router); ++ } ++ ++ function _setRouter(address token, address router) private { ++ require(router != address(0), "Router can't be address zero"); ++ routerAddresses[token].add(router); ++ require(routerAddresses[token].values().length <= MAX_NUMBER_ROUTERS_PER_TOKEN, "Max number of routers reached"); ++ emit RouterAddressSet(token, router); ++ } ++ ++ /** ++ * @notice Allows owner to remove a router for a token. ++ * @param token Address of the token. ++ * @param router Address of the router to remove. ++ */ ++ function removeRouter(address token, address router) external onlyOwner { ++ routerAddresses[token].remove(router); ++ emit RouterAddressRemoved(token, router); ++ } ++ ++ /** ++ * @notice Get the list of routers for a token. ++ * @param token The address of the token to query. ++ * @return An array of all the allowed router. ++ */ ++ function getRoutersForToken(address token) external view returns (address[] memory) { ++ return routerAddresses[token].values(); ++ } ++ ++ /** ++ * @dev Calculates the minimum amount of tokens that can be received for a given amount of sell tokens, ++ * taking into account the slippage and the rates of the sell token and CELO token on the Uniswap V2 pair. ++ * @param sellTokenAddress The address of the sell token. ++ * @param maxSlippage The maximum slippage allowed. ++ * @param amount The amount of sell tokens to be traded. ++ * @param bestRouter The Uniswap V2 router with the best price. ++ * @return The minimum amount of tokens that can be received. ++ */ ++ function calculateAllMinAmount( ++ address sellTokenAddress, ++ uint256 maxSlippage, ++ uint256 amount, ++ IUniswapV2RouterMin bestRouter ++ ) ++ private ++ view ++ returns (uint256) ++ { ++ ISortedOracles sortedOracles = getSortedOracles(); ++ uint256 minReports = minimumReports[sellTokenAddress]; ++ ++ require(sortedOracles.numRates(sellTokenAddress) >= minReports, "Number of reports for token not enough"); ++ ++ uint256 minimalSortedOracles = 0; ++ // if minimumReports for this token is zero, assume the check is not needed ++ if (minReports > 0) { ++ (uint256 rateNumerator, uint256 rateDenominator) = sortedOracles.medianRate(sellTokenAddress); ++ ++ minimalSortedOracles = calculateMinAmount(rateNumerator, rateDenominator, amount, maxSlippage); ++ } ++ ++ IERC20 celoToken = getGoldToken(); ++ address pair = IUniswapV2FactoryMin(bestRouter.factory()).getPair(sellTokenAddress, address(celoToken)); ++ uint256 minAmountPair = ++ calculateMinAmount(IERC20(sellTokenAddress).balanceOf(pair), celoToken.balanceOf(pair), amount, maxSlippage); ++ ++ return Math.max(minAmountPair, minimalSortedOracles); ++ } ++ ++ // This function explicitly defines few variables because it was getting error "stack too deep" ++ function sell( ++ address sellTokenAddress, ++ address buyTokenAddress, ++ uint256 amount, ++ uint256 maxSlippage // as fraction, ++ ) ++ external ++ returns (uint256) ++ { ++ require( ++ buyTokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), "Buy token can only be gold token" ++ ); ++ ++ require(routerAddresses[sellTokenAddress].values().length > 0, "routerAddresses should be non empty"); ++ ++ // An improvement to this function would be to allow the user to pass a path as argument ++ // and if it generates a better outcome that the ones enabled that gets used ++ // and the user gets a reward ++ ++ IERC20 celoToken = getGoldToken(); ++ ++ IUniswapV2RouterMin bestRouter; ++ uint256 bestRouterQuote = 0; ++ ++ address[] memory path = new address[](2); ++ ++ path[0] = sellTokenAddress; ++ path[1] = address(celoToken); ++ ++ for (uint256 i = 0; i < routerAddresses[sellTokenAddress].values().length; i++) { ++ address poolAddress = routerAddresses[sellTokenAddress].at(i); ++ IUniswapV2RouterMin router = IUniswapV2RouterMin(poolAddress); ++ ++ // Using the second return value becuase it's the last argument, ++ // the previous values show how many tokens are exchanged in each path ++ // so the first value would be equivalent to balanceToBurn ++ uint256 wouldGet = router.getAmountsOut(amount, path)[1]; ++ ++ emit ReceivedQuote(sellTokenAddress, poolAddress, wouldGet); ++ if (wouldGet > bestRouterQuote) { ++ bestRouterQuote = wouldGet; ++ bestRouter = router; ++ } ++ } ++ ++ require(bestRouterQuote != 0, "Can't exchange with zero quote"); ++ ++ uint256 minAmount = 0; ++ minAmount = calculateAllMinAmount(sellTokenAddress, maxSlippage, amount, bestRouter); ++ ++ IERC20(sellTokenAddress).approve(address(bestRouter), amount); ++ bestRouter.swapExactTokensForTokens( ++ amount, minAmount, path, address(this), block.timestamp + MAX_TIMESTAMP_BLOCK_EXCHANGE ++ ); ++ ++ uint256 celoAmount = celoToken.balanceOf(address(this)); ++ celoToken.transfer(msg.sender, celoAmount); ++ emit RouterUsed(address(bestRouter)); ++ emit TokenSold(sellTokenAddress, buyTokenAddress, amount); ++ return celoAmount; ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+126
+
-0
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/celo/UsingRegistry.sol CELO/packages/contracts-bedrock/src/celo/UsingRegistry.sol +new file mode 100644 +index 0000000000000000000000000000000000000000..0764125d65c19d7a1834b599a34bf2e4d0dafbf6 +--- /dev/null ++++ CELO/packages/contracts-bedrock/src/celo/UsingRegistry.sol +@@ -0,0 +1,126 @@ ++// SPDX-License-Identifier: MIT ++pragma solidity ^0.8.15; ++ ++import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; ++import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; ++ ++import "./interfaces/IAccounts.sol"; ++import "./interfaces/IFeeCurrencyWhitelist.sol"; ++import "./interfaces/IFreezer.sol"; ++import "./interfaces/ICeloRegistry.sol"; ++ ++import "./governance/interfaces/IElection.sol"; ++import "./governance/interfaces/IGovernance.sol"; ++import "./governance/interfaces/ILockedGold.sol"; ++import "./governance/interfaces/IValidators.sol"; ++ ++import "./identity/interfaces/IRandom.sol"; ++import "./identity/interfaces/IAttestations.sol"; ++ ++import "./stability/interfaces/ISortedOracles.sol"; ++ ++import "./mento/interfaces/IExchange.sol"; ++import "./mento/interfaces/IReserve.sol"; ++import "./mento/interfaces/IStableToken.sol"; ++ ++contract UsingRegistry is Ownable { ++ event RegistrySet(address indexed registryAddress); ++ ++ // solhint-disable state-visibility ++ bytes32 constant ACCOUNTS_REGISTRY_ID = keccak256(abi.encodePacked("Accounts")); ++ bytes32 constant ATTESTATIONS_REGISTRY_ID = keccak256(abi.encodePacked("Attestations")); ++ bytes32 constant DOWNTIME_SLASHER_REGISTRY_ID = keccak256(abi.encodePacked("DowntimeSlasher")); ++ bytes32 constant DOUBLE_SIGNING_SLASHER_REGISTRY_ID = keccak256(abi.encodePacked("DoubleSigningSlasher")); ++ bytes32 constant ELECTION_REGISTRY_ID = keccak256(abi.encodePacked("Election")); ++ bytes32 constant EXCHANGE_REGISTRY_ID = keccak256(abi.encodePacked("Exchange")); ++ bytes32 constant FEE_CURRENCY_WHITELIST_REGISTRY_ID = keccak256(abi.encodePacked("FeeCurrencyWhitelist")); ++ bytes32 constant FREEZER_REGISTRY_ID = keccak256(abi.encodePacked("Freezer")); ++ bytes32 constant GOLD_TOKEN_REGISTRY_ID = keccak256(abi.encodePacked("GoldToken")); ++ bytes32 constant GOVERNANCE_REGISTRY_ID = keccak256(abi.encodePacked("Governance")); ++ bytes32 constant GOVERNANCE_SLASHER_REGISTRY_ID = keccak256(abi.encodePacked("GovernanceSlasher")); ++ bytes32 constant LOCKED_GOLD_REGISTRY_ID = keccak256(abi.encodePacked("LockedGold")); ++ bytes32 constant RESERVE_REGISTRY_ID = keccak256(abi.encodePacked("Reserve")); ++ bytes32 constant RANDOM_REGISTRY_ID = keccak256(abi.encodePacked("Random")); ++ bytes32 constant SORTED_ORACLES_REGISTRY_ID = keccak256(abi.encodePacked("SortedOracles")); ++ bytes32 constant STABLE_TOKEN_REGISTRY_ID = keccak256(abi.encodePacked("StableToken")); ++ bytes32 constant VALIDATORS_REGISTRY_ID = keccak256(abi.encodePacked("Validators")); ++ // solhint-enable state-visibility ++ ++ ICeloRegistry public registry; ++ ++ modifier onlyRegisteredContract(bytes32 identifierHash) { ++ require(registry.getAddressForOrDie(identifierHash) == msg.sender, "only registered contract"); ++ _; ++ } ++ ++ modifier onlyRegisteredContracts(bytes32[] memory identifierHashes) { ++ require(registry.isOneOf(identifierHashes, msg.sender), "only registered contracts"); ++ _; ++ } ++ ++ /** ++ * @notice Updates the address pointing to a Registry contract. ++ * @param registryAddress The address of a registry contract for routing to other contracts. ++ */ ++ function setRegistry(address registryAddress) public onlyOwner { ++ require(registryAddress != address(0), "Cannot register the null address"); ++ registry = ICeloRegistry(registryAddress); ++ emit RegistrySet(registryAddress); ++ } ++ ++ function getAccounts() internal view returns (IAccounts) { ++ return IAccounts(registry.getAddressForOrDie(ACCOUNTS_REGISTRY_ID)); ++ } ++ ++ function getAttestations() internal view returns (IAttestations) { ++ return IAttestations(registry.getAddressForOrDie(ATTESTATIONS_REGISTRY_ID)); ++ } ++ ++ function getElection() internal view returns (IElection) { ++ return IElection(registry.getAddressForOrDie(ELECTION_REGISTRY_ID)); ++ } ++ ++ function getExchange() internal view returns (IExchange) { ++ return IExchange(registry.getAddressForOrDie(EXCHANGE_REGISTRY_ID)); ++ } ++ ++ function getFeeCurrencyWhitelistRegistry() internal view returns (IFeeCurrencyWhitelist) { ++ return IFeeCurrencyWhitelist(registry.getAddressForOrDie(FEE_CURRENCY_WHITELIST_REGISTRY_ID)); ++ } ++ ++ function getFreezer() internal view returns (IFreezer) { ++ return IFreezer(registry.getAddressForOrDie(FREEZER_REGISTRY_ID)); ++ } ++ ++ function getGoldToken() internal view returns (IERC20) { ++ return IERC20(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); ++ } ++ ++ function getGovernance() internal view returns (IGovernance) { ++ return IGovernance(registry.getAddressForOrDie(GOVERNANCE_REGISTRY_ID)); ++ } ++ ++ function getLockedGold() internal view returns (ILockedGold) { ++ return ILockedGold(registry.getAddressForOrDie(LOCKED_GOLD_REGISTRY_ID)); ++ } ++ ++ function getRandom() internal view returns (IRandom) { ++ return IRandom(registry.getAddressForOrDie(RANDOM_REGISTRY_ID)); ++ } ++ ++ function getReserve() internal view returns (IReserve) { ++ return IReserve(registry.getAddressForOrDie(RESERVE_REGISTRY_ID)); ++ } ++ ++ function getSortedOracles() internal view returns (ISortedOracles) { ++ return ISortedOracles(registry.getAddressForOrDie(SORTED_ORACLES_REGISTRY_ID)); ++ } ++ ++ function getStableToken() internal view returns (IStableToken) { ++ return IStableToken(registry.getAddressForOrDie(STABLE_TOKEN_REGISTRY_ID)); ++ } ++ ++ function getValidators() internal view returns (IValidators) { ++ return IValidators(registry.getAddressForOrDie(VALIDATORS_REGISTRY_ID)); ++ } ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+9
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/libraries/SafeCall.sol CELO/packages/contracts-bedrock/src/libraries/SafeCall.sol +index 78603993b8ad0be09d426be9473d9d30494eb6c5..c2c4e635f0fbea9f04ff10565b4c25df712324f3 100644 +--- OP/packages/contracts-bedrock/src/libraries/SafeCall.sol ++++ CELO/packages/contracts-bedrock/src/libraries/SafeCall.sol +@@ -1,5 +1,5 @@ + // SPDX-License-Identifier: MIT +-pragma solidity 0.8.15; ++pragma solidity ^0.8.0; +  + /// @title SafeCall + /// @notice Perform low level safe calls +@@ -57,6 +57,14 @@ 0, // outloc + 0 // outlen + ) + } ++ } ++ ++ /// @notice Perform a low level call without copying any returndata ++ /// @param _target Address to call ++ /// @param _value Amount of value to pass to the call ++ /// @param _calldata Calldata to pass to the call ++ function call(address _target, uint256 _value, bytes memory _calldata) internal returns (bool success_) { ++ success_ = call({ _target: _target, _gas: gasleft(), _value: _value, _calldata: _calldata }); + } +  + /// @notice Helper function to determine if there is sufficient gas remaining within the context
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol CELO/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol +index 43ddd65424f895b007d2eab8f5907b281c714f74..915dcefc1761de10fdd1e851387f956150de51d7 100644 +--- OP/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol ++++ CELO/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol +@@ -5,6 +5,7 @@ import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; + import { ILegacyMintableERC20, IOptimismMintableERC20 } from "src/universal/IOptimismMintableERC20.sol"; + import { ISemver } from "src/universal/ISemver.sol"; ++import { FeeCurrency } from "src/celo/FeeCurrency.sol"; +  + /// @title OptimismMintableERC20 + /// @notice OptimismMintableERC20 is a standard extension of the base ERC20 token contract designed +@@ -12,7 +13,7 @@ /// to allow the StandardBridge contracts to mint and burn tokens. This makes it possible to + /// use an OptimismMintablERC20 as the L2 representation of an L1 token, or vice-versa. + /// Designed to be backwards compatible with the older StandardL2ERC20 token which was only + /// meant for use on L2. +-contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20, ERC20, ISemver { ++contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20, ERC20, ISemver, FeeCurrency { + /// @notice Address of the corresponding version of this token on the remote chain. + address public immutable REMOTE_TOKEN; +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/L2Genesis.t.sol CELO/packages/contracts-bedrock/test/L2Genesis.t.sol +index f851f62d634d87b5b688acc59c208b8a4c9f4c90..85009526983f8b12030d6f19b19aecba2b6f1fa1 100644 +--- OP/packages/contracts-bedrock/test/L2Genesis.t.sol ++++ CELO/packages/contracts-bedrock/test/L2Genesis.t.sol +@@ -37,7 +37,7 @@ string[] memory commands = new string[](3); + commands[0] = "bash"; + commands[1] = "-c"; + commands[2] = string.concat("rm ", path); +- Process.run(commands); ++ Process.run({ _command: commands, _allowEmpty: true }); + } +  + /// @notice Returns the number of top level keys in a JSON object at a given +@@ -181,6 +181,7 @@ + /// @notice Tests the number of accounts in the genesis setup + function _test_allocs_size(string memory _path) internal { + genesis.cfg().setFundDevAccounts(false); ++ genesis.cfg().setDeployCeloContracts(true); + genesis.runWithLatestLocal(_dummyL1Deps()); + genesis.writeGenesisAllocs(_path); +  +@@ -190,6 +191,7 @@ expected += 21; // predeploy implementations (excl. legacy erc20-style eth and legacy message sender) + expected += 256; // precompiles + expected += 12; // preinstalls + expected += 1; // 4788 deployer account ++ expected += 16; // Celo contracts + // 16 prefunded dev accounts are excluded + assertEq(expected, getJSONKeyCount(_path), "key count check"); +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/Safe/DeployOwnership.t.sol CELO/packages/contracts-bedrock/test/Safe/DeployOwnership.t.sol +index cc4f58508ab1397afc197c48fc0a3f8dc7ea1844..b3a856e68a406ed3ccdec503437374447c0e74ed 100644 +--- OP/packages/contracts-bedrock/test/Safe/DeployOwnership.t.sol ++++ CELO/packages/contracts-bedrock/test/Safe/DeployOwnership.t.sol +@@ -8,7 +8,7 @@ SecurityCouncilConfig, + GuardianConfig, + DeputyGuardianModuleConfig, + LivenessModuleConfig +-} from "scripts/DeployOwnership.s.sol"; ++} from "scripts/deploy/DeployOwnership.s.sol"; + import { Test } from "forge-std/Test.sol"; +  + import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol";
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+95
+
-9
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/Specs.t.sol CELO/packages/contracts-bedrock/test/Specs.t.sol +index e3d0d7cc7d6f23dc0462b2cdbd10ff8fce9c6657..4f495ebd07e5937a67b4d6eb6ec80321f832aa39 100644 +--- OP/packages/contracts-bedrock/test/Specs.t.sol ++++ CELO/packages/contracts-bedrock/test/Specs.t.sol +@@ -25,13 +25,16 @@ PROPOSER, + CHALLENGER, + SYSTEMCONFIGOWNER, + GUARDIAN, ++ DEPUTYGUARDIAN, + MESSENGER, + L1PROXYADMINOWNER, + GOVERNANCETOKENOWNER, + MINTMANAGEROWNER, + DATAAVAILABILITYCHALLENGEOWNER, + DISPUTEGAMEFACTORYOWNER, +- DELAYEDWETHOWNER ++ DELAYEDWETHOWNER, ++ COUNCILSAFE, ++ COUNCILSAFEOWNER + } +  + /// @notice Represents the specification of a function. +@@ -47,6 +50,7 @@ bool pausable; + } +  + mapping(string => mapping(bytes4 => Spec)) specs; ++ mapping(Role => Spec[]) public specsByRole; + mapping(string => uint256) public numEntries; + uint256 numSpecs; +  +@@ -725,9 +729,13 @@ }); + _addSpec({ + _name: "DisputeGameFactory", + _sel: _getSel("setImplementation(uint32,address)"), +- _auth: Role.GUARDIAN ++ _auth: Role.DISPUTEGAMEFACTORYOWNER ++ }); ++ _addSpec({ ++ _name: "DisputeGameFactory", ++ _sel: _getSel("setInitBond(uint32,uint256)"), ++ _auth: Role.DISPUTEGAMEFACTORYOWNER + }); +- _addSpec({ _name: "DisputeGameFactory", _sel: _getSel("setInitBond(uint32,uint256)"), _auth: Role.GUARDIAN }); + _addSpec({ + _name: "DisputeGameFactory", + _sel: _getSel("transferOwnership(address)"), +@@ -743,11 +751,11 @@ _addSpec({ _name: "DelayedWETH", _sel: _getSel("config()") }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("decimals()") }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("delay()") }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("deposit()") }); +- _addSpec({ _name: "DelayedWETH", _sel: _getSel("hold(address,uint256)"), _auth: Role.GUARDIAN }); ++ _addSpec({ _name: "DelayedWETH", _sel: _getSel("hold(address,uint256)"), _auth: Role.DELAYEDWETHOWNER }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("initialize(address,address)") }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("name()") }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("owner()") }); +- _addSpec({ _name: "DelayedWETH", _sel: _getSel("recover(uint256)"), _auth: Role.GUARDIAN }); ++ _addSpec({ _name: "DelayedWETH", _sel: _getSel("recover(uint256)"), _auth: Role.DELAYEDWETHOWNER }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("renounceOwnership()"), _auth: Role.DELAYEDWETHOWNER }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("symbol()") }); + _addSpec({ _name: "DelayedWETH", _sel: _getSel("totalSupply()") }); +@@ -772,6 +780,51 @@ _addSpec({ _name: "WETH98", _sel: _getSel("totalSupply()") }); + _addSpec({ _name: "WETH98", _sel: _getSel("transfer(address,uint256)") }); + _addSpec({ _name: "WETH98", _sel: _getSel("transferFrom(address,address,uint256)") }); + _addSpec({ _name: "WETH98", _sel: _getSel("withdraw(uint256)") }); ++ ++ // DeputyGuardianModule ++ _addSpec({ ++ _name: "DeputyGuardianModule", ++ _sel: _getSel("blacklistDisputeGame(address,address)"), ++ _auth: Role.DEPUTYGUARDIAN ++ }); ++ _addSpec({ ++ _name: "DeputyGuardianModule", ++ _sel: _getSel("setRespectedGameType(address,uint32)"), ++ _auth: Role.DEPUTYGUARDIAN ++ }); ++ _addSpec({ _name: "DeputyGuardianModule", _sel: _getSel("pause()"), _auth: Role.DEPUTYGUARDIAN }); ++ _addSpec({ _name: "DeputyGuardianModule", _sel: _getSel("unpause()"), _auth: Role.DEPUTYGUARDIAN }); ++ _addSpec({ _name: "DeputyGuardianModule", _sel: _getSel("deputyGuardian()") }); ++ _addSpec({ _name: "DeputyGuardianModule", _sel: _getSel("safe()") }); ++ _addSpec({ _name: "DeputyGuardianModule", _sel: _getSel("superchainConfig()") }); ++ _addSpec({ _name: "DeputyGuardianModule", _sel: _getSel("version()") }); ++ ++ // LivenessGuard ++ _addSpec({ _name: "LivenessGuard", _sel: _getSel("checkAfterExecution(bytes32,bool)"), _auth: Role.COUNCILSAFE }); ++ _addSpec({ ++ _name: "LivenessGuard", ++ _sel: _getSel( ++ "checkTransaction(address,uint256,bytes,uint8,uint256,uint256,uint256,address,address,bytes,address)" ++ ), ++ _auth: Role.COUNCILSAFE ++ }); ++ _addSpec({ _name: "LivenessGuard", _sel: _getSel("lastLive(address)") }); ++ _addSpec({ _name: "LivenessGuard", _sel: _getSel("safe()") }); ++ _addSpec({ _name: "LivenessGuard", _sel: _getSel("showLiveness()"), _auth: Role.COUNCILSAFEOWNER }); ++ _addSpec({ _name: "LivenessGuard", _sel: _getSel("version()") }); ++ ++ // LivenessModule ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("canRemove(address)") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("fallbackOwner()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("getRequiredThreshold(uint256)") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("livenessGuard()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("livenessInterval()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("minOwners()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("ownershipTransferredToFallback()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("removeOwners(address[],address[])") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("safe()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("thresholdPercentage()") }); ++ _addSpec({ _name: "LivenessModule", _sel: _getSel("version()") }); + } +  + /// @dev Computes the selector from a function signature. +@@ -781,8 +834,10 @@ } +  + /// @dev Adds a spec for a function. + function _addSpec(string memory _name, bytes4 _sel, Role _auth, bool _pausable) internal { +- specs[_name][_sel] = Spec({ name: _name, sel: _sel, auth: _auth, pausable: _pausable }); ++ Spec memory spec = Spec({ name: _name, sel: _sel, auth: _auth, pausable: _pausable }); ++ specs[_name][_sel] = spec; + numEntries[_name]++; ++ specsByRole[_auth].push(spec); + numSpecs++; + } +  +@@ -803,11 +858,13 @@ } +  + /// @notice Ensures that there's an auth spec for every L1 contract function. + function testContractAuth() public { +- string[] memory pathExcludes = new string[](2); ++ string[] memory pathExcludes = new string[](3); + pathExcludes[0] = "src/dispute/interfaces/*"; + pathExcludes[1] = "src/dispute/lib/*"; +- Abi[] memory abis = +- ForgeArtifacts.getContractFunctionAbis("src/{L1,dispute,governance,universal/ProxyAdmin.sol}", pathExcludes); ++ pathExcludes[2] = "src/Safe/SafeSigners.sol"; ++ Abi[] memory abis = ForgeArtifacts.getContractFunctionAbis( ++ "src/{L1,dispute,governance,Safe,universal/ProxyAdmin.sol}", pathExcludes ++ ); +  + uint256 numCheckedEntries = 0; + for (uint256 i = 0; i < abis.length; i++) { +@@ -839,5 +896,34 @@ numCheckedEntries++; + } + } + assertEq(numSpecs, numCheckedEntries, "Some specs were not checked"); ++ } ++ ++ /// @dev Asserts that two roles are equal by comparing their uint256 representations. ++ function _assertRolesEq(Role leftRole, Role rightRole) internal pure { ++ assertEq(uint256(leftRole), uint256(rightRole)); ++ } ++ ++ /// @notice Ensures that the DeputyGuardian is authorized to take all Guardian actions. ++ function testDeputyGuardianAuth() public view { ++ assertEq(specsByRole[Role.DEPUTYGUARDIAN].length, specsByRole[Role.GUARDIAN].length); ++ assertEq(specsByRole[Role.DEPUTYGUARDIAN].length, 4); ++ ++ mapping(bytes4 => Spec) storage dgmFuncSpecs = specs["DeputyGuardianModule"]; ++ mapping(bytes4 => Spec) storage superchainConfigFuncSpecs = specs["SuperchainConfig"]; ++ mapping(bytes4 => Spec) storage portal2FuncSpecs = specs["OptimismPortal2"]; ++ ++ // Ensure that for each of the DeputyGuardianModule's methods there is a corresponding method on another ++ // system contract authed to the Guardian role. ++ _assertRolesEq(dgmFuncSpecs[_getSel("pause()")].auth, Role.DEPUTYGUARDIAN); ++ _assertRolesEq(superchainConfigFuncSpecs[_getSel("pause(string)")].auth, Role.GUARDIAN); ++ ++ _assertRolesEq(dgmFuncSpecs[_getSel("unpause()")].auth, Role.DEPUTYGUARDIAN); ++ _assertRolesEq(superchainConfigFuncSpecs[_getSel("unpause()")].auth, Role.GUARDIAN); ++ ++ _assertRolesEq(dgmFuncSpecs[_getSel("blacklistDisputeGame(address,address)")].auth, Role.DEPUTYGUARDIAN); ++ _assertRolesEq(portal2FuncSpecs[_getSel("blacklistDisputeGame(address)")].auth, Role.GUARDIAN); ++ ++ _assertRolesEq(dgmFuncSpecs[_getSel("setRespectedGameType(address,uint32)")].auth, Role.DEPUTYGUARDIAN); ++ _assertRolesEq(portal2FuncSpecs[_getSel("setRespectedGameType(uint32)")].auth, Role.GUARDIAN); + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/cannon/MIPS.t.sol CELO/packages/contracts-bedrock/test/cannon/MIPS.t.sol +index f5c9c3066c7b6714c22c0b49fb99b2a1efc62d64..3f843f3e0d028e3f6045942ce5a6c518c99ab7e1 100644 +--- OP/packages/contracts-bedrock/test/cannon/MIPS.t.sol ++++ CELO/packages/contracts-bedrock/test/cannon/MIPS.t.sol +@@ -1470,7 +1470,7 @@ + function test_fcntl_succeeds() external { + uint32 insn = 0x0000000c; // syscall + (MIPS.State memory state, bytes memory proof) = constructMIPSState(0, insn, 0x4, 0); +- state.registers[2] = 4055; // fnctl syscall ++ state.registers[2] = 4055; // fcntl syscall + state.registers[4] = 0x0; // a0 + state.registers[5] = 0x3; // a1 +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/invariants/InvariantTest.sol CELO/packages/contracts-bedrock/test/invariants/InvariantTest.sol +index a188bcdf27a05fc11141134a95bd91c847b0c5f9..eea6c158b3577d456a20654eeebd9bc1a9ef40ad 100644 +--- OP/packages/contracts-bedrock/test/invariants/InvariantTest.sol ++++ CELO/packages/contracts-bedrock/test/invariants/InvariantTest.sol +@@ -2,7 +2,7 @@ // SPDX-License-Identifier: MIT + pragma solidity 0.8.15; +  + import { FFIInterface } from "test/setup/FFIInterface.sol"; +-import { Deploy } from "scripts/Deploy.s.sol"; ++import { Deploy } from "scripts/deploy/Deploy.s.sol"; + import { Test } from "forge-std/Test.sol"; +  + /// @title InvariantTest
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/kontrol/README.md CELO/packages/contracts-bedrock/test/kontrol/README.md +index ccf8781fce0be9a56bf453198756c0e08d787da1..e539c3c109d535b00d0c31f7cc2f306857a51489 100644 +--- OP/packages/contracts-bedrock/test/kontrol/README.md ++++ CELO/packages/contracts-bedrock/test/kontrol/README.md +@@ -111,7 +111,7 @@ These are the instructions to add a new proof to this project. If all functions involved in the new proof are from a contract already deployed by [`KontrolDeployment`](./deployment/KontrolDeployment.sol) the first two steps can be skipped. +  + #### Make Kontrol aware of the new contract being tested +  +-The `runKontrolDeployment` function of [`KontrolDeployment`](./deployment/KontrolDeployment.sol) partially reproduces the deployment process laid out in the `_run` function of [`Deploy.s.sol`](../../scripts/Deploy.s.sol). `runKontrolDeployment` has the `stateDiff` modifier to make use of [Foundry's state diff cheatcodes](https://book.getfoundry.sh/cheatcodes/start-state-diff-recording). Kontrol utilizes the JSON resulting from this modifier for two purposes: ++The `runKontrolDeployment` function of [`KontrolDeployment`](./deployment/KontrolDeployment.sol) partially reproduces the deployment process laid out in the `_run` function of [`Deploy.s.sol`](../../scripts/deploy/Deploy.s.sol). `runKontrolDeployment` has the `stateDiff` modifier to make use of [Foundry's state diff cheatcodes](https://book.getfoundry.sh/cheatcodes/start-state-diff-recording). Kontrol utilizes the JSON resulting from this modifier for two purposes: + 1. Load all the state updates generated by `runKontrolDeployment` as the initial configuration for all proofs, effectively offloading the computation of the deployment process to `forge` and thus improving performance. + 2. Produce the [`DeploymentSummary`](./proofs/utils/DeploymentSummary.sol) script contract to test that the produced JSON contains correct updates. +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/setup/CommonTest.sol CELO/packages/contracts-bedrock/test/setup/CommonTest.sol +index 86ed109fd7b3f7d7e18b4fcac107cf3eff085f6f..d80cd31f260370bd2299738c95608f44fe547510 100644 +--- OP/packages/contracts-bedrock/test/setup/CommonTest.sol ++++ CELO/packages/contracts-bedrock/test/setup/CommonTest.sol +@@ -6,7 +6,7 @@ import { Setup } from "test/setup/Setup.sol"; + import { Events } from "test/setup/Events.sol"; + import { FFIInterface } from "test/setup/FFIInterface.sol"; + import { Constants } from "src/libraries/Constants.sol"; +-import "scripts/DeployConfig.s.sol"; ++import "scripts/deploy/DeployConfig.s.sol"; +  + /// @title CommonTest + /// @dev An extenstion to `Test` that sets up the optimism smart contracts.
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/setup/Setup.sol CELO/packages/contracts-bedrock/test/setup/Setup.sol +index 3193ce80e6e6c2c865801c3aa98d1256571fb0fb..7cf4d2a47d08f4ff6d4b1bf334fd7b24e0f62277 100644 +--- OP/packages/contracts-bedrock/test/setup/Setup.sol ++++ CELO/packages/contracts-bedrock/test/setup/Setup.sol +@@ -24,9 +24,9 @@ import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; + import { DelayedWETH } from "src/dispute/weth/DelayedWETH.sol"; + import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; + import { L1CrossDomainMessenger } from "src/L1/L1CrossDomainMessenger.sol"; +-import { DeployConfig } from "scripts/DeployConfig.s.sol"; ++import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; ++import { Deploy } from "scripts/deploy/Deploy.s.sol"; + import { Fork, LATEST_FORK } from "scripts/Config.sol"; +-import { Deploy } from "scripts/Deploy.s.sol"; + import { L2Genesis, L1Dependencies } from "scripts/L2Genesis.s.sol"; + import { OutputMode, Fork, ForkUtils } from "scripts/Config.sol"; + import { L2OutputOracle } from "src/L1/L2OutputOracle.sol";
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/test/vendor/Initializable.t.sol CELO/packages/contracts-bedrock/test/vendor/Initializable.t.sol +index 349bdaef6023463764b8eaae1d7396be422738c4..05fff737bd6e4d302c21f3e7d9f1a56965ec6d70 100644 +--- OP/packages/contracts-bedrock/test/vendor/Initializable.t.sol ++++ CELO/packages/contracts-bedrock/test/vendor/Initializable.t.sol +@@ -13,7 +13,7 @@ import { ForgeArtifacts } from "scripts/ForgeArtifacts.sol"; + import { Process } from "scripts/libraries/Process.sol"; + import "src/L1/ProtocolVersions.sol"; + import "src/dispute/lib/Types.sol"; +-import "scripts/Deployer.sol"; ++import "scripts/deploy/Deployer.sol"; +  + /// @title Initializer_Test + /// @dev Ensures that the `initialize()` function on contracts cannot be called more than
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-8
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/tsconfig.build.json CELO/packages/contracts-bedrock/tsconfig.build.json +deleted file mode 100644 +index e5df8a66de3182d1c71103da1bf42f7ea3e8b4f8..0000000000000000000000000000000000000000 +--- OP/packages/contracts-bedrock/tsconfig.build.json ++++ /dev/null +@@ -1,8 +0,0 @@ +-{ +- "extends": "../../tsconfig.json", +- "compilerOptions": { +- "rootDir": "./src", +- "outDir": "./dist" +- }, +- "include": ["src/**/*"] +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+21
+
-2
+ +
+ +
+
+
diff --git OP/packages/contracts-bedrock/tsconfig.json CELO/packages/contracts-bedrock/tsconfig.json +index 5091614ef561939cc6cd5f5cdc788d76413c82bd..7c7a62708773da00a9c837cdce771d7823e4af87 100644 +--- OP/packages/contracts-bedrock/tsconfig.json ++++ CELO/packages/contracts-bedrock/tsconfig.json +@@ -1,8 +1,27 @@ + { +- "extends": "../../tsconfig.json", + "compilerOptions": { +- "outDir": "./dist" ++ "outDir": "./dist", ++ "skipLibCheck": true, ++ "module": "commonjs", ++ "target": "es2017", ++ "sourceMap": true, ++ "esModuleInterop": true, ++ "composite": true, ++ "resolveJsonModule": true, ++ "declaration": true, ++ "noImplicitAny": false, ++ "removeComments": true, ++ "noLib": false, ++ "emitDecoratorMetadata": true, ++ "experimentalDecorators": true, ++ "typeRoots": [ ++ "node_modules/@types" ++ ] + }, ++ "exclude": [ ++ "node_modules", ++ "dist" ++ ], + "include": [ + "deploy-config/**/*", + "deploy-config/**/*.json",
+
+ + + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-10
+ +
+ +
+
+
diff --git OP/packages/chain-mon/.env.example CELO/packages/chain-mon/.env.example +index 0e10b0aaf530f07a255be3237405b3a558a455ac..07fbfc9856936f3acdb0a008e0c414fc16c9128e 100644 +--- OP/packages/chain-mon/.env.example ++++ CELO/packages/chain-mon/.env.example +@@ -32,16 +32,6 @@ # Defaults to the first bedrock block if unset. + WALLET_MON__START_BLOCK_NUMBER= +  + ############################################################################### +-# ↓ drippie-mon ↓ # +-############################################################################### +- +-# RPC pointing to network where Drippie is deployed +-DRIPPIE_MON__RPC= +- +-# Address of the Drippie contract +-DRIPPIE_MON__DRIPPIE_ADDRESS= +- +-############################################################################### + # ↓ wd-mon ↓ # + ############################################################################### +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/packages/chain-mon/README.md CELO/packages/chain-mon/README.md +index 2a7a74c72eb98fd28f994eff7965a826329e6b93..28ab6ffdf4ee1ca10479f6f20c79420fb085a067 100644 +--- OP/packages/chain-mon/README.md ++++ CELO/packages/chain-mon/README.md +@@ -23,8 +23,8 @@ ``` + pnpm start:<service name> + ``` +  +-For example, to run `drippie-mon`, execute: ++For example, to run `balance-mon`, execute: +  + ``` +-pnpm start:drippie-mon ++pnpm start:balance-mon + ```
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-2
+ +
+ +
+
+
diff --git OP/packages/chain-mon/package.json CELO/packages/chain-mon/package.json +index 456639c9ccdaef41ffd718a1452ac55cb0a3d74b..9ff9bf353382f04dc272b8b3b056c00e5b232ce3 100644 +--- OP/packages/chain-mon/package.json ++++ CELO/packages/chain-mon/package.json +@@ -10,7 +10,6 @@ "dist/*" + ], + "scripts": { + "dev:balance-mon": "tsx watch ./internal/balance-mon/service.ts", +- "dev:drippie-mon": "tsx watch ./contrib/drippie/service.ts", + "dev:fault-mon": "tsx watch ./src/fault-mon/service.ts", + "dev:multisig-mon": "tsx watch ./internal/multisig-mon/service.ts", + "dev:replica-mon": "tsx watch ./contrib/replica-mon/service.ts", +@@ -19,7 +18,6 @@ "dev:wd-mon": "tsx watch ./src/wd-mon/service.ts", + "dev:faultproof-wd-mon": "tsx ./src/faultproof-wd-mon/service.ts", + "dev:initialized-upgraded-mon": "tsx watch ./contrib/initialized-upgraded-mon/service.ts", + "start:balance-mon": "tsx ./internal/balance-mon/service.ts", +- "start:drippie-mon": "tsx ./contrib/drippie/service.ts", + "start:fault-mon": "tsx ./src/fault-mon/service.ts", + "start:multisig-mon": "tsx ./internal/multisig-mon/service.ts", + "start:replica-mon": "tsx ./contrib/replica-mon/service.ts",
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+20
+
-2
+ +
+ +
+
+
diff --git OP/packages/chain-mon/tsconfig.json CELO/packages/chain-mon/tsconfig.json +index f9bea541e657224b2de8265af0423a970af46dff..46e4d8fe12ca94f71b236a13922fd33ec4a591e2 100644 +--- OP/packages/chain-mon/tsconfig.json ++++ CELO/packages/chain-mon/tsconfig.json +@@ -1,9 +1,27 @@ + { +- "extends": "../../tsconfig.json", + "compilerOptions": { + "outDir": "./dist", +- "skipLibCheck": true ++ "skipLibCheck": true, ++ "module": "commonjs", ++ "target": "es2017", ++ "sourceMap": true, ++ "esModuleInterop": true, ++ "composite": true, ++ "resolveJsonModule": true, ++ "declaration": true, ++ "noImplicitAny": false, ++ "removeComments": true, ++ "noLib": false, ++ "emitDecoratorMetadata": true, ++ "experimentalDecorators": true, ++ "typeRoots": [ ++ "node_modules/@types" ++ ] + }, ++ "exclude": [ ++ "node_modules", ++ "dist" ++ ], + "include": [ + "package.json", + "src/abi/IGnosisSafe.0.8.19.json",
+
+ + + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + + + + + + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+29
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/Dockerfile CELO/op-chain-ops/Dockerfile +new file mode 100644 +index 0000000000000000000000000000000000000000..532a73bb5acbbae5c99f47cf60c9fb5a64c6a69a +--- /dev/null ++++ CELO/op-chain-ops/Dockerfile +@@ -0,0 +1,29 @@ ++FROM golang:1.21.1-alpine3.18 as builder ++ ++RUN apk --no-cache add make ++ ++COPY ./go.mod /app/go.mod ++COPY ./go.sum /app/go.sum ++ ++WORKDIR /app ++ ++RUN go mod download ++ ++COPY ./op-service /app/op-service ++COPY ./op-node /app/op-node ++COPY ./op-plasma /app/op-plasma ++COPY ./op-chain-ops /app/op-chain-ops ++WORKDIR /app/op-chain-ops ++RUN make celo-migrate ++ ++FROM alpine:3.18 ++RUN apk --no-cache add ca-certificates bash rsync ++ ++# RUN addgroup -S app && adduser -S app -G app ++# USER app ++WORKDIR /app ++ ++COPY --from=builder /app/op-chain-ops/bin/celo-migrate /app ++ENV PATH="/app:${PATH}" ++ ++ENTRYPOINT ["/app/celo-migrate"]
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-3
+ +
+ +
+
+
diff --git OP/op-chain-ops/Makefile CELO/op-chain-ops/Makefile +index 1808807c73781785d406aa674659b07a91b84942..d93243a215226aa24247b0928a60f7e2c994249d 100644 +--- OP/op-chain-ops/Makefile ++++ CELO/op-chain-ops/Makefile +@@ -3,9 +3,6 @@ ifeq ($(shell uname),Darwin) + FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic + endif +  +-op-version-check: +- go build -o ./bin/op-version-check ./cmd/op-version-check/main.go +- + ecotone-scalar: + go build -o ./bin/ecotone-scalar ./cmd/ecotone-scalar/main.go +  +@@ -14,6 +11,9 @@ go build -o ./bin/receipt-reference-builder ./cmd/receipt-reference-builder/*.go +  + op-upgrade: + go build -o ./bin/op-upgrade ./cmd/op-upgrade/main.go ++ ++celo-migrate: ++ go build -o ./bin/celo-migrate ./cmd/celo-migrate/*.go +  + test: + go test ./...
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-52
+ +
+ +
+
+
diff --git OP/op-chain-ops/README.md CELO/op-chain-ops/README.md +deleted file mode 100644 +index a40232fe7d17a784e9d60710d6097d6217b99045..0000000000000000000000000000000000000000 +--- OP/op-chain-ops/README.md ++++ /dev/null +@@ -1,52 +0,0 @@ +-# op-chain-ops +- +-This package contains utilities for working with chain state. +- +-## op-version-check +- +-A CLI tool for determining which contract versions are deployed for +-chains in a superchain. It will output a JSON file that contains a +-list of each chain's versions. It is assumed that the implementations +-that are being checked have already been deployed and their contract +-addresses exist inside of the `superchain-registry` repository. It is +-also assumed that the semantic version file in the `superchain-registry` +-has been updated. The tool will output the semantic versioning to +-determine which contract versions are deployed. +- +-### Configuration +- +-#### L1 RPC URL +- +-The L1 RPC URL is used to determine which superchain to target. All +-L2s that are not based on top of the L1 chain that corresponds to the +-L1 RPC URL are filtered out from being checked. It also is used to +-double check that the data in the `superchain-registry` is correct. +- +-#### Chain IDs +- +-A list of L2 chain IDs can be passed that will be used to filter which +-L2 chains will have their versions checked. Omitting this argument will +-result in all chains in the superchain being considered. +- +-#### Deploy Config +- +-The path to the `deploy-config` directory in the contracts package. +-Since multiple L2 networks may be considered in the check, the `deploy-config` +-directory must be passed and then the particular deploy config files will +-be read out of the directory as needed. +- +-#### Outfile +- +-The file that the versions should be written to. If omitted, the file +-will be written to stdout +- +-#### Usage +- +-It can be built and run using the [Makefile](./Makefile) `op-version-check` +-target. Run `make op-version-check` to create a binary in [./bin/op-version-check](./bin/op-version-check) +-that can be executed, optionally providing the `--l1-rpc-url`, `--chain-ids`, +-`--superchain-target`, and `--outfile` flags. +- +-```sh +-./bin/op-version-check +-```
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-chain-ops/clients/clients.go CELO/op-chain-ops/clients/clients.go +index 475a130a1f031cf8ab042dfa04367bd32e53e363..16cf1970728c652499250a1f10091af315ac2c35 100644 +--- OP/op-chain-ops/clients/clients.go ++++ CELO/op-chain-ops/clients/clients.go +@@ -10,7 +10,7 @@ "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" + ) +  +-// clients represents a set of initialized RPC clients ++// Clients represents a set of initialized RPC clients + type Clients struct { + L1Client *ethclient.Client + L2Client *ethclient.Client
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+117
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/README.md CELO/op-chain-ops/cmd/celo-migrate/README.md +new file mode 100644 +index 0000000000000000000000000000000000000000..9cf2493263a5bdf5087c51ee26ab0d860c4f0475 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/README.md +@@ -0,0 +1,117 @@ ++# Celo L2 Migration Script ++ ++## Overview ++ ++This script has two main sections. The first migrates Celo blocks to a format compatible with `op-geth`, and the second performs necessary state changes such as deploying L2 smart contracts. ++ ++### Block migration ++ ++The block migration itself has two parts: It first migrates the ancient / frozen blocks, which is all blocks before the last 90000. Because the ancients db is append-only, it copies these blocks into a new database after making the necessary transformations. The script then copies the rest of the chaindata directory (excluding `/ancients`) using the system level `rsync` command. All non-ancient blocks are then transformed in-place in the new db, leaving the old db unchanged. ++ ++### State migration ++ ++After all blocks have been migrated, the script performs a series of modifications to the state db. This is also done in-place in the `--new-db` directory. First, the state migration deploys the L2 smart contracts by iterating through the genesis allocs passed to the script and setting the nonce, balance, code and storage for each address accordingly, overwritting existing data if necessary. Finally, the state migration will commit the state changes to produce a new state root and create the first Cel2 block. ++ ++### Notes ++ ++Once the state changes are complete the migration is finished. The longest running section of the script is the ancients migration, and it can be resumed / skipped if interupted part way. The rest of the script cannot be resumed and will restart from the last migrated ancient block if interupted or re-run. ++ ++The script outputs a `rollup-config.json` file that is passed to the sequencer in order to start the L2 network. ++ ++See `--help` for how to run each portion of the script individually, along with other configuration options. ++ ++### Running the script ++ ++First, build the script by running ++ ++```bash ++make celo-migrate ++``` ++ ++from the `op-chain-ops` directory. ++ ++You can then run the script as follows. ++ ++```bash ++go run ./cmd/celo-migrate --help ++``` ++ ++NOTE: You will need `rsync` to run this script if it's not already installed ++ ++#### Running with local test setup (Alfajores / Holesky) ++ ++To test the script locally, we can migrate an alfajores database and use Holesky as our L1. The input files needed for this can be found in `./testdata`. The necessary smart contracts have already been deployed on Holesky. ++ ++##### Pull down the latest alfajores database snapshot ++ ++```bash ++gcloud alpha storage cp gs://celo-chain-backup/alfajores/chaindata-latest.tar.zst alfajores.tar.zst ++``` ++ ++Unzip and rename ++ ++```bash ++tar --use-compress-program=unzstd -xvf alfajores.tar.zst ++mv chaindata ./data/alfajores_old ++``` ++ ++##### Generate test allocs file ++ ++The state migration takes in a allocs file that specifies the l2 state changes to be made during the migration. This file can be generated from the deploy config and l1 contract addresses by running the following from the `contracts-bedrock` directory. ++ ++```bash ++CONTRACT_ADDRESSES_PATH=../../op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json \ ++DEPLOY_CONFIG_PATH=../../op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json \ ++STATE_DUMP_PATH=../../op-chain-ops/cmd/celo-migrate/testdata/l2-allocs-alfajores.json \ ++forge script ./scripts/L2Genesis.s.sol:L2Genesis \ ++--sig 'runWithStateDump()' ++``` ++ ++This should output the allocs file to `./testdata/l2-allocs-alfajores.json`. If you encounter difficulties with this and want to just continue testing the script, you can alternatively find the allocs file [here](https://gist.github.com/jcortejoso/7f90ba9b67c669791014661ccb6de81a). ++ ++##### Run script with test configuration ++ ++```bash ++go run ./cmd/celo-migrate full \ ++--deploy-config ./cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json \ ++--l1-deployments ./cmd/celo-migrate/testdata/deployment-l1-holesky.json \ ++--l1-rpc https://ethereum-holesky-rpc.publicnode.com \ ++--l2-allocs ./cmd/celo-migrate/testdata/l2-allocs-alfajores.json \ ++--outfile.rollup-config ./cmd/celo-migrate/testdata/rollup-config.json \ ++--old-db ./data/alfajores_old \ ++--new-db ./data/alfajores_new ++``` ++ ++The first time you run the script it should take ~5 minutes. The first part of the script will migrate ancient blocks, and will take the majority of the time. ++ ++During the ancients migration you can play around with stopping and re-running the script, which should always resume where it left off. If you run the script subsequent times after ancient migrations have been run, the script should skip ancient migrations and proceed to migrating non-ancient blocks quickly. ++ ++Note that partial migration progress beyond the ancient blocks (i.e. non-frozen blocks and state changes) will not be preserved between runs by default. ++ ++#### Running for Cel2 migration ++ ++##### Generate allocs file ++ ++You can generate the allocs file needed to run the migration with the following script in `contracts-bedrock` ++ ++```bash ++CONTRACT_ADDRESSES_PATH=<PATH_TO_CONTRACT_ADDRESSES> \ ++DEPLOY_CONFIG_PATH=<PATH_TO_MY_DEPLOY_CONFIG> \ ++STATE_DUMP_PATH=<PATH_TO_WRITE_L2_ALLOCS> \ ++forge script scripts/L2Genesis.s.sol:L2Genesis \ ++--sig 'runWithStateDump()' ++``` ++ ++##### Dress rehearsal / pre-migration ++ ++To minimize downtime caused by the migration, node operators can prepare their Cel2 databases by running this script a day ahead of the actual migration. This will pre-populate the new database with most of the ancient blocks needed for the final migration, and will also serve as a dress rehearsal for the rest of the migration. ++ ++NOTE: The pre-migration should be run using a chaindata snapshot, rather than a db that is being used by a node. To avoid network downtime, we recommend that node operators do not stop any nodes in order to perform the pre-migration. ++ ++Node operators should inspect their migration logs after the dress rehearsal to ensure the migration completed succesfully and direct any questions to the Celo developer community on Discord before the actual migration. ++ ++##### Final migration ++ ++On the day of the actual cel2 migration, this script can be re-run using the same parameters as for the dress rehearsal but with the latest Celo Mainnet database snapshot as `--old-db`. The script will only need to migrate any ancient blocks frozen after the dress rehearsal, all non-frozen blocks, and state. ++ ++Unlike the pre-migration, the final migration can be run directly on the db used by the Celo node rather than a snapshot.
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+195
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/ancients.go CELO/op-chain-ops/cmd/celo-migrate/ancients.go +new file mode 100644 +index 0000000000000000000000000000000000000000..b9e1fb975974f98c02d4b08aff2ca40d45a1d39c +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/ancients.go +@@ -0,0 +1,195 @@ ++package main ++ ++import ( ++ "context" ++ "fmt" ++ "path/filepath" ++ ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/core/rawdb" ++ "github.com/ethereum/go-ethereum/ethdb" ++ "github.com/ethereum/go-ethereum/log" ++ "golang.org/x/sync/errgroup" ++) ++ ++// RLPBlockRange is a range of blocks in RLP format ++type RLPBlockRange struct { ++ start uint64 ++ hashes [][]byte ++ headers [][]byte ++ bodies [][]byte ++ receipts [][]byte ++ tds [][]byte ++} ++ ++func migrateAncientsDb(oldDBPath, newDBPath string, batchSize, bufferSize uint64) (uint64, uint64, error) { ++ oldFreezer, err := rawdb.NewChainFreezer(filepath.Join(oldDBPath, "ancient"), "", false) // Can't be readonly because we need the .meta files to be created ++ if err != nil { ++ return 0, 0, fmt.Errorf("failed to open old freezer: %w", err) ++ } ++ defer oldFreezer.Close() ++ ++ newFreezer, err := rawdb.NewChainFreezer(filepath.Join(newDBPath, "ancient"), "", false) ++ if err != nil { ++ return 0, 0, fmt.Errorf("failed to open new freezer: %w", err) ++ } ++ defer newFreezer.Close() ++ ++ numAncientsOld, err := oldFreezer.Ancients() ++ if err != nil { ++ return 0, 0, fmt.Errorf("failed to get number of ancients in old freezer: %w", err) ++ } ++ ++ numAncientsNewBefore, err := newFreezer.Ancients() ++ if err != nil { ++ return 0, 0, fmt.Errorf("failed to get number of ancients in new freezer: %w", err) ++ } ++ ++ if numAncientsNewBefore >= numAncientsOld { ++ log.Info("Ancient Block Migration Skipped", "process", "ancients", "ancientsInOldDB", numAncientsOld, "ancientsInNewDB", numAncientsNewBefore) ++ return numAncientsNewBefore, numAncientsNewBefore, nil ++ } ++ ++ log.Info("Ancient Block Migration Started", "process", "ancients", "startBlock", numAncientsNewBefore, "endBlock", numAncientsOld, "count", numAncientsOld-numAncientsNewBefore, "step", batchSize) ++ ++ g, ctx := errgroup.WithContext(context.Background()) ++ readChan := make(chan RLPBlockRange, bufferSize) ++ transformChan := make(chan RLPBlockRange, bufferSize) ++ ++ g.Go(func() error { ++ return readAncientBlocks(ctx, oldFreezer, numAncientsNewBefore, numAncientsOld, batchSize, readChan) ++ }) ++ g.Go(func() error { return transformBlocks(ctx, readChan, transformChan) }) ++ g.Go(func() error { return writeAncientBlocks(ctx, newFreezer, transformChan) }) ++ ++ if err = g.Wait(); err != nil { ++ return 0, 0, fmt.Errorf("failed to migrate ancients: %w", err) ++ } ++ ++ numAncientsNewAfter, err := newFreezer.Ancients() ++ if err != nil { ++ return 0, 0, fmt.Errorf("failed to get number of ancients in new freezer: %w", err) ++ } ++ ++ log.Info("Ancient Block Migration Ended", "process", "ancients", "ancientsInOldDB", numAncientsOld, "ancientsInNewDB", numAncientsNewAfter, "migrated", numAncientsNewAfter-numAncientsNewBefore) ++ return numAncientsNewBefore, numAncientsNewAfter, nil ++} ++ ++func readAncientBlocks(ctx context.Context, freezer *rawdb.Freezer, startBlock, endBlock, batchSize uint64, out chan<- RLPBlockRange) error { ++ defer close(out) ++ ++ for i := startBlock; i < endBlock; i += batchSize { ++ select { ++ case <-ctx.Done(): ++ return ctx.Err() ++ default: ++ count := min(batchSize, endBlock-i+1) ++ start := i ++ ++ blockRange := RLPBlockRange{ ++ start: start, ++ hashes: make([][]byte, count), ++ headers: make([][]byte, count), ++ bodies: make([][]byte, count), ++ receipts: make([][]byte, count), ++ tds: make([][]byte, count), ++ } ++ var err error ++ ++ blockRange.hashes, err = freezer.AncientRange(rawdb.ChainFreezerHashTable, start, count, 0) ++ if err != nil { ++ return fmt.Errorf("failed to read hashes from old freezer: %w", err) ++ } ++ blockRange.headers, err = freezer.AncientRange(rawdb.ChainFreezerHeaderTable, start, count, 0) ++ if err != nil { ++ return fmt.Errorf("failed to read headers from old freezer: %w", err) ++ } ++ blockRange.bodies, err = freezer.AncientRange(rawdb.ChainFreezerBodiesTable, start, count, 0) ++ if err != nil { ++ return fmt.Errorf("failed to read bodies from old freezer: %w", err) ++ } ++ blockRange.receipts, err = freezer.AncientRange(rawdb.ChainFreezerReceiptTable, start, count, 0) ++ if err != nil { ++ return fmt.Errorf("failed to read receipts from old freezer: %w", err) ++ } ++ blockRange.tds, err = freezer.AncientRange(rawdb.ChainFreezerDifficultyTable, start, count, 0) ++ if err != nil { ++ return fmt.Errorf("failed to read tds from old freezer: %w", err) ++ } ++ ++ out <- blockRange ++ } ++ } ++ return nil ++} ++ ++func transformBlocks(ctx context.Context, in <-chan RLPBlockRange, out chan<- RLPBlockRange) error { ++ // Transform blocks from the in channel and send them to the out channel ++ defer close(out) ++ for blockRange := range in { ++ select { ++ case <-ctx.Done(): ++ return ctx.Err() ++ default: ++ for i := range blockRange.hashes { ++ blockNumber := blockRange.start + uint64(i) ++ ++ newHeader, err := transformHeader(blockRange.headers[i]) ++ if err != nil { ++ return fmt.Errorf("can't transform header: %w", err) ++ } ++ newBody, err := transformBlockBody(blockRange.bodies[i]) ++ if err != nil { ++ return fmt.Errorf("can't transform body: %w", err) ++ } ++ ++ if yes, newHash := hasSameHash(newHeader, blockRange.hashes[i]); !yes { ++ log.Error("Hash mismatch", "block", blockNumber, "oldHash", common.BytesToHash(blockRange.hashes[i]), "newHash", newHash) ++ return fmt.Errorf("hash mismatch at block %d", blockNumber) ++ } ++ ++ blockRange.headers[i] = newHeader ++ blockRange.bodies[i] = newBody ++ } ++ out <- blockRange ++ } ++ } ++ return nil ++} ++ ++func writeAncientBlocks(ctx context.Context, freezer *rawdb.Freezer, in <-chan RLPBlockRange) error { ++ // Write blocks from the in channel to the newDb ++ for blockRange := range in { ++ select { ++ case <-ctx.Done(): ++ return ctx.Err() ++ default: ++ _, err := freezer.ModifyAncients(func(aWriter ethdb.AncientWriteOp) error { ++ for i := range blockRange.hashes { ++ blockNumber := blockRange.start + uint64(i) ++ if err := aWriter.AppendRaw(rawdb.ChainFreezerHashTable, blockNumber, blockRange.hashes[i]); err != nil { ++ return fmt.Errorf("can't write hash to Freezer: %w", err) ++ } ++ if err := aWriter.AppendRaw(rawdb.ChainFreezerHeaderTable, blockNumber, blockRange.headers[i]); err != nil { ++ return fmt.Errorf("can't write header to Freezer: %w", err) ++ } ++ if err := aWriter.AppendRaw(rawdb.ChainFreezerBodiesTable, blockNumber, blockRange.bodies[i]); err != nil { ++ return fmt.Errorf("can't write body to Freezer: %w", err) ++ } ++ if err := aWriter.AppendRaw(rawdb.ChainFreezerReceiptTable, blockNumber, blockRange.receipts[i]); err != nil { ++ return fmt.Errorf("can't write receipts to Freezer: %w", err) ++ } ++ if err := aWriter.AppendRaw(rawdb.ChainFreezerDifficultyTable, blockNumber, blockRange.tds[i]); err != nil { ++ return fmt.Errorf("can't write td to Freezer: %w", err) ++ } ++ } ++ return nil ++ }) ++ if err != nil { ++ return fmt.Errorf("failed to write block range: %w", err) ++ } ++ log.Info("Wrote ancient blocks", "start", blockRange.start, "end", blockRange.start+uint64(len(blockRange.hashes)-1), "count", len(blockRange.hashes)) ++ } ++ } ++ return nil ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+102
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/db.go CELO/op-chain-ops/cmd/celo-migrate/db.go +new file mode 100644 +index 0000000000000000000000000000000000000000..e7f685909d5361503472c397f4f689f279863f87 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/db.go +@@ -0,0 +1,102 @@ ++package main ++ ++import ( ++ "encoding/binary" ++ "errors" ++ "fmt" ++ "os" ++ "path/filepath" ++ ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/core/rawdb" ++ "github.com/ethereum/go-ethereum/ethdb" ++) ++ ++// Constants for the database ++const ( ++ DBCache = 1024 // size of the cache in MB ++ DBHandles = 60 // number of handles ++ LastMigratedNonAncientBlockKey = "celoLastMigratedNonAncientBlock" ++) ++ ++var ( ++ headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header ++) ++ ++// encodeBlockNumber encodes a block number as big endian uint64 ++func encodeBlockNumber(number uint64) []byte { ++ enc := make([]byte, 8) ++ binary.BigEndian.PutUint64(enc, number) ++ return enc ++} ++ ++// headerKey = headerPrefix + num (uint64 big endian) + hash ++func headerKey(number uint64, hash common.Hash) []byte { ++ return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) ++} ++ ++// readLastMigratedNonAncientBlock returns the last migration number. If it doesn't exist, it returns 0. ++func readLastMigratedNonAncientBlock(db ethdb.KeyValueReader) uint64 { ++ data, err := db.Get([]byte(LastMigratedNonAncientBlockKey)) ++ if err != nil { ++ return 0 ++ } ++ number := binary.BigEndian.Uint64(data) ++ return number ++} ++ ++// writeLastMigratedNonAncientBlock stores the last migration number. ++func writeLastMigratedNonAncientBlock(db ethdb.KeyValueWriter, number uint64) error { ++ enc := make([]byte, 8) ++ binary.BigEndian.PutUint64(enc, number) ++ return db.Put([]byte(LastMigratedNonAncientBlockKey), enc) ++} ++ ++// deleteLastMigratedNonAncientBlock removes the last migration number. ++func deleteLastMigratedNonAncientBlock(db ethdb.KeyValueWriter) error { ++ return db.Delete([]byte(LastMigratedNonAncientBlockKey)) ++} ++ ++// openDB opens the chaindata database at the given path. Note this path is below the datadir ++func openDB(chaindataPath string) (ethdb.Database, error) { ++ if _, err := os.Stat(chaindataPath); errors.Is(err, os.ErrNotExist) { ++ return nil, err ++ } ++ ++ ldb, err := rawdb.Open(rawdb.OpenOptions{ ++ Type: "leveldb", ++ Directory: chaindataPath, ++ AncientsDirectory: filepath.Join(chaindataPath, "ancient"), ++ Namespace: "", ++ Cache: DBCache, ++ Handles: DBHandles, ++ ReadOnly: false, ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return ldb, nil ++} ++ ++func createNewDbIfNotExists(newDBPath string) error { ++ if err := os.MkdirAll(newDBPath, 0755); err != nil { ++ return fmt.Errorf("failed to create new database directory: %w", err) ++ } ++ return nil ++} ++ ++func cleanupNonAncientDb(dir string) error { ++ files, err := os.ReadDir(dir) ++ if err != nil { ++ return fmt.Errorf("failed to read directory: %w", err) ++ } ++ for _, file := range files { ++ if file.Name() != "ancient" { ++ err := os.RemoveAll(filepath.Join(dir, file.Name())) ++ if err != nil { ++ return fmt.Errorf("failed to remove file: %w", err) ++ } ++ } ++ } ++ return nil ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+362
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/main.go CELO/op-chain-ops/cmd/celo-migrate/main.go +new file mode 100644 +index 0000000000000000000000000000000000000000..bab69eb9b2857b2d0b6a099253c5d4aa974bee5f +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/main.go +@@ -0,0 +1,362 @@ ++package main ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "math/big" ++ "os" ++ "os/exec" ++ "runtime/debug" ++ ++ "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" ++ "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" ++ "github.com/ethereum-optimism/optimism/op-service/jsonutil" ++ oplog "github.com/ethereum-optimism/optimism/op-service/log" ++ "github.com/ethereum/go-ethereum/core/types" ++ "github.com/ethereum/go-ethereum/ethclient" ++ "github.com/ethereum/go-ethereum/log" ++ "github.com/ethereum/go-ethereum/rpc" ++ ++ "github.com/mattn/go-isatty" ++ "github.com/urfave/cli/v2" ++ "golang.org/x/exp/slog" ++) ++ ++var ( ++ deployConfigFlag = &cli.PathFlag{ ++ Name: "deploy-config", ++ Usage: "Path to the JSON file that was used for the l1 contracts deployment. A test example can be found here 'op-chain-ops/genesis/testdata/test-deploy-config-full.json' and documentation for the fields is at https://docs.optimism.io/builders/chain-operators/management/configuration", ++ Required: true, ++ } ++ l1DeploymentsFlag = &cli.PathFlag{ ++ Name: "l1-deployments", ++ Usage: "Path to L1 deployments JSON file, the output of running the bedrock contracts deployment for the given 'deploy-config'", ++ Required: true, ++ } ++ l1RPCFlag = &cli.StringFlag{ ++ Name: "l1-rpc", ++ Usage: "RPC URL for a node of the L1 defined in the 'deploy-config'", ++ Required: true, ++ } ++ l2AllocsFlag = &cli.PathFlag{ ++ Name: "l2-allocs", ++ Usage: "Path to L2 genesis allocs file. You can find instructions on how to generate this file in the README", ++ Required: true, ++ } ++ outfileRollupConfigFlag = &cli.PathFlag{ ++ Name: "outfile.rollup-config", ++ Usage: "Path to write the rollup config JSON file, to be provided to op-node with the 'rollup.config' flag", ++ Required: true, ++ } ++ oldDBPathFlag = &cli.PathFlag{ ++ Name: "old-db", ++ Usage: "Path to the old Celo chaindata dir, can be found at '<datadir>/celo/chaindata'", ++ Required: true, ++ } ++ newDBPathFlag = &cli.PathFlag{ ++ Name: "new-db", ++ Usage: "Path to write migrated Celo chaindata, note the new node implementation expects to find this chaindata at the following path '<datadir>/geth/chaindata", ++ Required: true, ++ } ++ batchSizeFlag = &cli.Uint64Flag{ ++ Name: "batch-size", ++ Usage: "Batch size to use for block migration, larger batch sizes can speed up migration but require more memory. If increasing the batch size consider also increasing the memory-limit", ++ Value: 50000, // TODO(Alec) optimize default parameters ++ } ++ bufferSizeFlag = &cli.Uint64Flag{ ++ Name: "buffer-size", ++ Usage: "Buffer size to use for ancient block migration channels. Defaults to 0. Included to facilitate testing for performance improvements.", ++ Value: 0, ++ } ++ memoryLimitFlag = &cli.Int64Flag{ ++ Name: "memory-limit", ++ Usage: "Memory limit in MiB, should be set lower than the available amount of memory in your system to prevent out of memory errors", ++ Value: 7500, ++ } ++ clearAllFlag = &cli.BoolFlag{ ++ Name: "clear-all", ++ Usage: "Use this to start with a fresh new db, deleting all data including ancients. CAUTION: Re-migrating ancients takes time.", ++ } ++ keepNonAncientsFlag = &cli.BoolFlag{ ++ Name: "keep-non-ancients", ++ Usage: "CAUTION: Not recommended for production. Use to keep all data in the new db as is, including any partially migrated non-ancient blocks and state data. If non-ancient blocks are partially migrated, the script will attempt to resume the migration.", ++ } ++ onlyAncientsFlag = &cli.BoolFlag{ ++ Name: "only-ancients", ++ Usage: "Use to only migrate ancient blocks. Ignored when running full migration", ++ } ++ ++ blockMigrationFlags = []cli.Flag{ ++ onlyAncientsFlag, ++ oldDBPathFlag, ++ newDBPathFlag, ++ batchSizeFlag, ++ bufferSizeFlag, ++ memoryLimitFlag, ++ clearAllFlag, ++ keepNonAncientsFlag, ++ } ++ stateMigrationFlags = []cli.Flag{ ++ newDBPathFlag, ++ deployConfigFlag, ++ l1DeploymentsFlag, ++ l1RPCFlag, ++ l2AllocsFlag, ++ outfileRollupConfigFlag, ++ } ++ // Ignore onlyAncients flag and duplicate newDBPathFlag for full migration ++ fullMigrationFlags = append(blockMigrationFlags[1:], stateMigrationFlags[1:]...) ++) ++ ++type blockMigrationOptions struct { ++ oldDBPath string ++ newDBPath string ++ batchSize uint64 ++ bufferSize uint64 ++ memoryLimit int64 ++ clearAll bool ++ keepNonAncients bool ++ onlyAncients bool ++} ++ ++type stateMigrationOptions struct { ++ deployConfig string ++ l1Deployments string ++ l1RPC string ++ l2AllocsPath string ++ outfileRollupConfig string ++ newDBPath string ++} ++ ++func parseBlockMigrationOptions(ctx *cli.Context) blockMigrationOptions { ++ return blockMigrationOptions{ ++ oldDBPath: ctx.String(oldDBPathFlag.Name), ++ newDBPath: ctx.String(newDBPathFlag.Name), ++ batchSize: ctx.Uint64(batchSizeFlag.Name), ++ bufferSize: ctx.Uint64(bufferSizeFlag.Name), ++ memoryLimit: ctx.Int64(memoryLimitFlag.Name), ++ clearAll: ctx.Bool(clearAllFlag.Name), ++ keepNonAncients: ctx.Bool(keepNonAncientsFlag.Name), ++ onlyAncients: ctx.Bool(onlyAncientsFlag.Name), ++ } ++} ++ ++func parseStateMigrationOptions(ctx *cli.Context) stateMigrationOptions { ++ return stateMigrationOptions{ ++ newDBPath: ctx.String(newDBPathFlag.Name), ++ deployConfig: ctx.Path(deployConfigFlag.Name), ++ l1Deployments: ctx.Path(l1DeploymentsFlag.Name), ++ l1RPC: ctx.String(l1RPCFlag.Name), ++ l2AllocsPath: ctx.Path(l2AllocsFlag.Name), ++ outfileRollupConfig: ctx.Path(outfileRollupConfigFlag.Name), ++ } ++} ++ ++func main() { ++ ++ color := isatty.IsTerminal(os.Stderr.Fd()) ++ handler := log.NewTerminalHandlerWithLevel(os.Stderr, slog.LevelDebug, color) ++ oplog.SetGlobalLogHandler(handler) ++ ++ log.Info("Beginning Cel2 Migration") ++ ++ app := &cli.App{ ++ Name: "celo-migrate", ++ Usage: "Migrate Celo block and state data to a CeL2 DB", ++ Commands: []*cli.Command{ ++ { ++ Name: "blocks", ++ Aliases: []string{"b"}, ++ Usage: "Migrate Celo block data to a CeL2 DB", ++ Flags: blockMigrationFlags, ++ Action: func(ctx *cli.Context) error { ++ return runBlockMigration(parseBlockMigrationOptions(ctx)) ++ }, ++ }, ++ { ++ Name: "state", ++ Aliases: []string{"s"}, ++ Usage: "Migrate Celo state data to a CeL2 DB. Makes necessary state changes and generates a rollup config file.", ++ Flags: stateMigrationFlags, ++ Action: func(ctx *cli.Context) error { ++ return runStateMigration(parseStateMigrationOptions(ctx)) ++ }, ++ }, ++ { ++ Name: "full", ++ Aliases: []string{"f", "all", "a"}, ++ Usage: "Perform a full migration of both block and state data to a CeL2 DB", ++ Flags: fullMigrationFlags, ++ Action: func(ctx *cli.Context) error { ++ if err := runBlockMigration(parseBlockMigrationOptions(ctx)); err != nil { ++ return fmt.Errorf("failed to run block migration: %w", err) ++ } ++ ++ if err := runStateMigration(parseStateMigrationOptions(ctx)); err != nil { ++ return fmt.Errorf("failed to run state migration: %w", err) ++ } ++ ++ return nil ++ }, ++ }, ++ }, ++ OnUsageError: func(ctx *cli.Context, err error, isSubcommand bool) error { ++ if isSubcommand { ++ return err ++ } ++ _ = cli.ShowAppHelp(ctx) ++ return fmt.Errorf("please provide a valid command") ++ }, ++ } ++ ++ if err := app.Run(os.Args); err != nil { ++ log.Crit("error in migration", "err", err) ++ } ++ log.Info("Finished migration successfully!") ++} ++ ++func runBlockMigration(opts blockMigrationOptions) error { ++ ++ // Check that `rsync` command is available. We use this to copy the db excluding ancients, which we will copy separately ++ if _, err := exec.LookPath("rsync"); err != nil { ++ return fmt.Errorf("please install `rsync` to run block migration") ++ } ++ ++ debug.SetMemoryLimit(opts.memoryLimit * 1 << 20) // Set memory limit, converting from MiB to bytes ++ ++ log.Info("Block Migration Started", "oldDBPath", opts.oldDBPath, "newDBPath", opts.newDBPath, "batchSize", opts.batchSize, "memoryLimit", opts.memoryLimit, "clearAll", opts.clearAll, "keepNonAncients", opts.keepNonAncients, "onlyAncients", opts.onlyAncients) ++ ++ var err error ++ ++ if err = createNewDbIfNotExists(opts.newDBPath); err != nil { ++ return fmt.Errorf("failed to create new database: %w", err) ++ } ++ ++ if opts.clearAll { ++ if err = os.RemoveAll(opts.newDBPath); err != nil { ++ return fmt.Errorf("failed to remove new database: %w", err) ++ } ++ } else if !opts.keepNonAncients { ++ if err = cleanupNonAncientDb(opts.newDBPath); err != nil { ++ return fmt.Errorf("failed to reset non-ancient database: %w", err) ++ } ++ } ++ ++ var numAncientsNewBefore uint64 ++ var numAncientsNewAfter uint64 ++ if numAncientsNewBefore, numAncientsNewAfter, err = migrateAncientsDb(opts.oldDBPath, opts.newDBPath, opts.batchSize, opts.bufferSize); err != nil { ++ return fmt.Errorf("failed to migrate ancients database: %w", err) ++ } ++ ++ var numNonAncients uint64 ++ if !opts.onlyAncients { ++ if numNonAncients, err = migrateNonAncientsDb(opts.oldDBPath, opts.newDBPath, numAncientsNewAfter-1, opts.batchSize); err != nil { ++ return fmt.Errorf("failed to migrate non-ancients database: %w", err) ++ } ++ } else { ++ log.Info("Skipping non-ancients migration") ++ } ++ ++ log.Info("Block Migration Completed", "migratedAncients", numAncientsNewAfter-numAncientsNewBefore, "migratedNonAncients", numNonAncients) ++ ++ return nil ++} ++ ++func runStateMigration(opts stateMigrationOptions) error { ++ log.Info("State Migration Started", "newDBPath", opts.newDBPath, "deployConfig", opts.deployConfig, "l1Deployments", opts.l1Deployments, "l1RPC", opts.l1RPC, "l2AllocsPath", opts.l2AllocsPath, "outfileRollupConfig", opts.outfileRollupConfig) ++ ++ // Read deployment configuration ++ config, err := genesis.NewDeployConfig(opts.deployConfig) ++ if err != nil { ++ return err ++ } ++ ++ if config.DeployCeloContracts { ++ return errors.New("DeployCeloContracts is not supported in migration") ++ } ++ if config.FundDevAccounts { ++ return errors.New("FundDevAccounts is not supported in migration") ++ } ++ ++ // Try reading the L1 deployment information ++ deployments, err := genesis.NewL1Deployments(opts.l1Deployments) ++ if err != nil { ++ return fmt.Errorf("cannot read L1 deployments at %s: %w", opts.l1Deployments, err) ++ } ++ config.SetDeployments(deployments) ++ ++ // Get latest block information from L1 ++ var l1StartBlock *types.Block ++ client, err := ethclient.Dial(opts.l1RPC) ++ if err != nil { ++ return fmt.Errorf("cannot dial %s: %w", opts.l1RPC, err) ++ } ++ ++ if config.L1StartingBlockTag == nil { ++ l1StartBlock, err = client.BlockByNumber(context.Background(), nil) ++ if err != nil { ++ return fmt.Errorf("cannot fetch latest block: %w", err) ++ } ++ tag := rpc.BlockNumberOrHashWithHash(l1StartBlock.Hash(), true) ++ config.L1StartingBlockTag = (*genesis.MarshalableRPCBlockNumberOrHash)(&tag) ++ } else if config.L1StartingBlockTag.BlockHash != nil { ++ l1StartBlock, err = client.BlockByHash(context.Background(), *config.L1StartingBlockTag.BlockHash) ++ if err != nil { ++ return fmt.Errorf("cannot fetch block by hash: %w", err) ++ } ++ } else if config.L1StartingBlockTag.BlockNumber != nil { ++ l1StartBlock, err = client.BlockByNumber(context.Background(), big.NewInt(config.L1StartingBlockTag.BlockNumber.Int64())) ++ if err != nil { ++ return fmt.Errorf("cannot fetch block by number: %w", err) ++ } ++ } ++ ++ // Ensure that there is a starting L1 block ++ if l1StartBlock == nil { ++ return fmt.Errorf("no starting L1 block") ++ } ++ ++ // Sanity check the config. Do this after filling in the L1StartingBlockTag ++ // if it is not defined. ++ if err := config.Check(); err != nil { ++ return err ++ } ++ ++ log.Info("Using L1 Start Block", "number", l1StartBlock.Number(), "hash", l1StartBlock.Hash().Hex()) ++ ++ // Build the L2 genesis block ++ l2Allocs, err := foundry.LoadForgeAllocs(opts.l2AllocsPath) ++ if err != nil { ++ return err ++ } ++ ++ l2Genesis, err := genesis.BuildL2Genesis(config, l2Allocs, l1StartBlock) ++ if err != nil { ++ return fmt.Errorf("error creating l2 genesis: %w", err) ++ } ++ ++ // Write changes to state to actual state database ++ cel2Header, err := applyStateMigrationChanges(config, l2Genesis, opts.newDBPath) ++ if err != nil { ++ return err ++ } ++ log.Info("Updated Cel2 state") ++ ++ rollupConfig, err := config.RollupConfig(l1StartBlock, cel2Header.Hash(), cel2Header.Number.Uint64()) ++ if err != nil { ++ return err ++ } ++ if err := rollupConfig.Check(); err != nil { ++ return fmt.Errorf("generated rollup config does not pass validation: %w", err) ++ } ++ ++ log.Info("Writing rollup config", "file", opts.outfileRollupConfig) ++ if err := jsonutil.WriteJSON(opts.outfileRollupConfig, rollupConfig, OutFilePerm); err != nil { ++ return err ++ } ++ ++ log.Info("State Migration Completed") ++ ++ return nil ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+123
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/non-ancients.go CELO/op-chain-ops/cmd/celo-migrate/non-ancients.go +new file mode 100644 +index 0000000000000000000000000000000000000000..bab92aacd3f19cea45630ccf54c0de35b1e8b6ca +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/non-ancients.go +@@ -0,0 +1,123 @@ ++package main ++ ++import ( ++ "fmt" ++ "os" ++ "os/exec" ++ "strings" ++ ++ "github.com/ethereum/go-ethereum/core/rawdb" ++ "github.com/ethereum/go-ethereum/log" ++) ++ ++func migrateNonAncientsDb(oldDbPath, newDbPath string, lastAncientBlock, batchSize uint64) (uint64, error) { ++ // First copy files from old database to new database ++ log.Info("Copy files from old database (excluding ancients)", "process", "non-ancients") ++ ++ // Get rsync help output ++ cmdHelp := exec.Command("rsync", "--help") ++ output, _ := cmdHelp.CombinedOutput() ++ ++ // Convert output to string ++ outputStr := string(output) ++ ++ // TODO(Alec) have rsync run as part of pre-migration (but not the transformation or state) ++ // can use --update and --delete to keep things synced between dbs ++ ++ // Check for supported options ++ var cmd *exec.Cmd ++ // Prefer --info=progress2 over --progress ++ if strings.Contains(outputStr, "--info") { ++ cmd = exec.Command("rsync", "-v", "-a", "--info=progress2", "--exclude=ancient", oldDbPath+"/", newDbPath) ++ } else if strings.Contains(outputStr, "--progress") { ++ cmd = exec.Command("rsync", "-v", "-a", "--progress", "--exclude=ancient", oldDbPath+"/", newDbPath) ++ } else { ++ cmd = exec.Command("rsync", "-v", "-a", "--exclude=ancient", oldDbPath+"/", newDbPath) ++ } ++ log.Info("Running rsync command", "command", cmd.String()) ++ cmd.Stdout = os.Stdout ++ cmd.Stderr = os.Stderr ++ if err := cmd.Run(); err != nil { ++ return 0, fmt.Errorf("failed to copy old database to new database: %w", err) ++ } ++ ++ // Open the new database without access to AncientsDb ++ newDB, err := rawdb.NewLevelDBDatabase(newDbPath, DBCache, DBHandles, "", false) ++ if err != nil { ++ return 0, fmt.Errorf("failed to open new database: %w", err) ++ } ++ defer newDB.Close() ++ ++ // get the last block number ++ hash := rawdb.ReadHeadHeaderHash(newDB) ++ lastBlock := *rawdb.ReadHeaderNumber(newDB, hash) ++ lastMigratedNonAncientBlock := readLastMigratedNonAncientBlock(newDB) // returns 0 if not found ++ ++ // if migration was interrupted, start from the last migrated block ++ fromBlock := max(lastAncientBlock, lastMigratedNonAncientBlock) + 1 ++ ++ if fromBlock >= lastBlock { ++ log.Info("Non-Ancient Block Migration Skipped", "process", "non-ancients", "lastAncientBlock", lastAncientBlock, "endBlock", lastBlock, "lastMigratedNonAncientBlock", lastMigratedNonAncientBlock) ++ if lastMigratedNonAncientBlock != lastBlock { ++ return 0, fmt.Errorf("migration range empty but last migrated block is not the last block in the database") ++ } ++ return 0, nil ++ } ++ ++ log.Info("Non-Ancient Block Migration Started", "process", "non-ancients", "startBlock", fromBlock, "endBlock", lastBlock, "count", lastBlock-fromBlock, "lastAncientBlock", lastAncientBlock, "lastMigratedNonAncientBlock", lastMigratedNonAncientBlock) ++ ++ for i := fromBlock; i <= lastBlock; i += batchSize { ++ numbersHash := rawdb.ReadAllHashesInRange(newDB, i, i+batchSize-1) ++ ++ log.Info("Processing Block Range", "process", "non-ancients", "from", i, "to(inclusve)", i+batchSize-1, "count", len(numbersHash)) ++ for _, numberHash := range numbersHash { ++ // read header and body ++ header := rawdb.ReadHeaderRLP(newDB, numberHash.Hash, numberHash.Number) ++ body := rawdb.ReadBodyRLP(newDB, numberHash.Hash, numberHash.Number) ++ ++ // transform header and body ++ newHeader, err := transformHeader(header) ++ if err != nil { ++ return 0, fmt.Errorf("failed to transform header: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) ++ } ++ newBody, err := transformBlockBody(body) ++ if err != nil { ++ return 0, fmt.Errorf("failed to transform body: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) ++ } ++ ++ if yes, newHash := hasSameHash(newHeader, numberHash.Hash[:]); !yes { ++ log.Error("Hash mismatch", "block", numberHash.Number, "oldHash", numberHash.Hash, "newHash", newHash) ++ return 0, fmt.Errorf("hash mismatch at block %d - %x", numberHash.Number, numberHash.Hash) ++ } ++ ++ // write header and body ++ batch := newDB.NewBatch() ++ rawdb.WriteBodyRLP(batch, numberHash.Hash, numberHash.Number, newBody) ++ _ = batch.Put(headerKey(numberHash.Number, numberHash.Hash), newHeader) ++ _ = writeLastMigratedNonAncientBlock(batch, numberHash.Number) ++ if err := batch.Write(); err != nil { ++ return 0, fmt.Errorf("failed to write header and body: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) ++ } ++ } ++ } ++ ++ toBeRemoved := rawdb.ReadAllHashesInRange(newDB, 1, lastAncientBlock) ++ log.Info("Removing frozen blocks", "process", "non-ancients", "count", len(toBeRemoved)) ++ batch := newDB.NewBatch() ++ for _, numberHash := range toBeRemoved { ++ rawdb.DeleteBlockWithoutNumber(batch, numberHash.Hash, numberHash.Number) ++ rawdb.DeleteCanonicalHash(batch, numberHash.Number) ++ } ++ if err := batch.Write(); err != nil { ++ return 0, fmt.Errorf("failed to delete frozen blocks: %w", err) ++ } ++ ++ // if migration finished, remove the last migration number ++ if err := deleteLastMigratedNonAncientBlock(newDB); err != nil { ++ return 0, fmt.Errorf("failed to delete last migration number: %w", err) ++ } ++ ++ log.Info("Non-Ancient Block Migration Ended", "process", "non-ancients", "migratedBlocks", lastBlock-fromBlock+1, "removedBlocks", len(toBeRemoved)) ++ ++ return lastBlock - fromBlock + 1, nil ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+317
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/state.go CELO/op-chain-ops/cmd/celo-migrate/state.go +new file mode 100644 +index 0000000000000000000000000000000000000000..7511e45869e1cf3b63973d6efa4f150e28611ce2 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/state.go +@@ -0,0 +1,317 @@ ++package main ++ ++import ( ++ "bytes" ++ "encoding/json" ++ "errors" ++ "fmt" ++ "math/big" ++ "os" ++ ++ "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" ++ "github.com/ethereum-optimism/optimism/op-service/predeploys" ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/contracts/addresses" ++ "github.com/ethereum/go-ethereum/core" ++ "github.com/ethereum/go-ethereum/core/rawdb" ++ "github.com/ethereum/go-ethereum/core/state" ++ "github.com/ethereum/go-ethereum/core/types" ++ "github.com/ethereum/go-ethereum/log" ++ "github.com/ethereum/go-ethereum/params" ++ "github.com/ethereum/go-ethereum/trie" ++ "github.com/ethereum/go-ethereum/triedb" ++ ++ "github.com/holiman/uint256" ++) ++ ++var ( ++ Big10 = uint256.NewInt(10) ++ Big9 = uint256.NewInt(9) ++ Big18 = uint256.NewInt(18) ++ ++ OutFilePerm = os.FileMode(0o440) ++ ++ alfajoresChainId uint64 = 44787 ++ mainnetChainId uint64 = 42220 ++ ++ accountOverwriteWhitelist = map[uint64]map[common.Address]struct{}{ ++ // Add any addresses that should be allowed to overwrite existing accounts here. ++ alfajoresChainId: { ++ // Create2Deployer ++ common.HexToAddress("0x13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2"): {}, ++ }, ++ } ++ distributionScheduleAddressMap = map[uint64]common.Address{ ++ alfajoresChainId: common.HexToAddress("0x78af211ad79bce6bf636640ce8c2c2b29e02365a"), ++ } ++ celoTokenAddressMap = map[uint64]common.Address{ ++ alfajoresChainId: addresses.CeloTokenAlfajoresAddress, ++ mainnetChainId: addresses.CeloTokenAddress, ++ } ++) ++ ++func applyStateMigrationChanges(config *genesis.DeployConfig, genesis *core.Genesis, dbPath string) (*types.Header, error) { ++ log.Info("Opening Celo database", "dbPath", dbPath) ++ ++ ldb, err := openDB(dbPath) ++ if err != nil { ++ return nil, fmt.Errorf("cannot open DB: %w", err) ++ } ++ log.Info("Loaded Celo L1 DB", "db", ldb) ++ ++ // Grab the hash of the tip of the legacy chain. ++ hash := rawdb.ReadHeadHeaderHash(ldb) ++ log.Info("Reading chain tip from database", "hash", hash) ++ ++ // Grab the header number. ++ num := rawdb.ReadHeaderNumber(ldb, hash) ++ if num == nil { ++ return nil, fmt.Errorf("cannot find header number for %s", hash) ++ } ++ log.Info("Reading chain tip num from database", "number", num) ++ ++ // Grab the full header. ++ header := rawdb.ReadHeader(ldb, hash, *num) ++ log.Info("Read header from database", "header", header) ++ ++ // We need to update the chain config to set the correct hardforks. ++ genesisHash := rawdb.ReadCanonicalHash(ldb, 0) ++ cfg := rawdb.ReadChainConfig(ldb, genesisHash) ++ if cfg == nil { ++ log.Crit("chain config not found") ++ } ++ log.Info("Read chain config from database", "config", cfg) ++ ++ // Set up the backing store. ++ // TODO(pl): Do we need the preimages setting here? ++ underlyingDB := state.NewDatabaseWithConfig(ldb, &triedb.Config{Preimages: true}) ++ ++ // Open up the state database. ++ db, err := state.New(header.Root, underlyingDB, nil) ++ if err != nil { ++ return nil, fmt.Errorf("cannot open StateDB: %w", err) ++ } ++ ++ // Apply the changes to the state DB. ++ applyAllocsToState(db, genesis, cfg) ++ ++ // Initialize the distribution schedule contract ++ // This uses the original config which won't enable recent hardforks (and things like the PUSH0 opcode) ++ // This is fine, as the token uses solc 0.5.x and therefore compatible bytecode ++ err = setupDistributionSchedule(db, cfg) ++ if err != nil { ++ // An error here shouldn't stop the migration, just log it ++ log.Warn("Error setting up distribution schedule", "error", err) ++ } ++ ++ migrationBlock := new(big.Int).Add(header.Number, common.Big1) ++ ++ // We're done messing around with the database, so we can now commit the changes to the DB. ++ // Note that this doesn't actually write the changes to disk. ++ log.Info("Committing state DB") ++ newRoot, err := db.Commit(migrationBlock.Uint64(), true) ++ if err != nil { ++ return nil, err ++ } ++ ++ baseFee := new(big.Int).SetUint64(params.InitialBaseFee) ++ if header.BaseFee != nil { ++ baseFee = header.BaseFee ++ } ++ ++ // Create the header for the Cel2 transition block. ++ cel2Header := &types.Header{ ++ ParentHash: header.Hash(), ++ UncleHash: types.EmptyUncleHash, ++ Coinbase: predeploys.SequencerFeeVaultAddr, ++ Root: newRoot, ++ TxHash: types.EmptyTxsHash, ++ ReceiptHash: types.EmptyReceiptsHash, ++ Bloom: types.Bloom{}, ++ Difficulty: new(big.Int).Set(common.Big0), ++ Number: migrationBlock, ++ GasLimit: header.GasLimit, ++ GasUsed: 0, ++ Time: header.Time + 5, ++ Extra: []byte("CeL2 migration"), ++ MixDigest: common.Hash{}, ++ Nonce: types.BlockNonce{}, ++ BaseFee: baseFee, ++ WithdrawalsHash: &types.EmptyWithdrawalsHash, ++ BlobGasUsed: new(uint64), ++ ExcessBlobGas: new(uint64), ++ ParentBeaconRoot: &common.Hash{}, ++ } ++ log.Info("Build Cel2 migration header", "header", cel2Header) ++ ++ // Create the Cel2 transition block from the header. Note that there are no transactions, ++ // uncle blocks, or receipts in the Cel2 transition block. ++ cel2Block := types.NewBlock(cel2Header, nil, nil, nil, trie.NewStackTrie(nil)) ++ ++ // We did it! ++ log.Info( ++ "Built Cel2 migration block", ++ "hash", cel2Block.Hash(), ++ "root", cel2Block.Root(), ++ "number", cel2Block.NumberU64(), ++ ) ++ ++ log.Info("Committing trie DB") ++ if err := db.Database().TrieDB().Commit(newRoot, true); err != nil { ++ return nil, err ++ } ++ ++ // Next we write the Cel2 genesis block to the database. ++ rawdb.WriteTd(ldb, cel2Block.Hash(), cel2Block.NumberU64(), cel2Block.Difficulty()) ++ rawdb.WriteBlock(ldb, cel2Block) ++ rawdb.WriteReceipts(ldb, cel2Block.Hash(), cel2Block.NumberU64(), nil) ++ rawdb.WriteCanonicalHash(ldb, cel2Block.Hash(), cel2Block.NumberU64()) ++ rawdb.WriteHeadBlockHash(ldb, cel2Block.Hash()) ++ rawdb.WriteHeadFastBlockHash(ldb, cel2Block.Hash()) ++ rawdb.WriteHeadHeaderHash(ldb, cel2Block.Hash()) ++ ++ // Mark the first CeL2 block as finalized ++ rawdb.WriteFinalizedBlockHash(ldb, cel2Block.Hash()) ++ ++ // Set the standard options. ++ cfg.LondonBlock = cel2Block.Number() ++ cfg.BerlinBlock = cel2Block.Number() ++ cfg.ArrowGlacierBlock = cel2Block.Number() ++ cfg.GrayGlacierBlock = cel2Block.Number() ++ cfg.MergeNetsplitBlock = cel2Block.Number() ++ cfg.TerminalTotalDifficulty = big.NewInt(0) ++ cfg.TerminalTotalDifficultyPassed = true ++ cfg.ShanghaiTime = &cel2Header.Time ++ cfg.CancunTime = &cel2Header.Time ++ ++ // Set the Optimism options. ++ cfg.BedrockBlock = cel2Block.Number() ++ // Enable Regolith from the start of Bedrock ++ cfg.RegolithTime = new(uint64) // what are those? do we need those? ++ cfg.Optimism = &params.OptimismConfig{ ++ EIP1559Denominator: config.EIP1559Denominator, ++ EIP1559DenominatorCanyon: config.EIP1559DenominatorCanyon, ++ EIP1559Elasticity: config.EIP1559Elasticity, ++ } ++ cfg.CanyonTime = &cel2Header.Time ++ cfg.EcotoneTime = &cel2Header.Time ++ cfg.FjordTime = &cel2Header.Time ++ cfg.Cel2Time = &cel2Header.Time ++ ++ // Write the chain config to disk. ++ // TODO(pl): Why do we need to write this with the genesis hash, not `cel2Block.Hash()`?` ++ rawdb.WriteChainConfig(ldb, genesisHash, cfg) ++ marhslledConfig, err := json.Marshal(cfg) ++ if err != nil { ++ return nil, fmt.Errorf("failed to marshal chain config to JSON: %w", err) ++ } ++ log.Info("Wrote updated chain config", "config", string(marhslledConfig)) ++ ++ // We're done! ++ log.Info( ++ "Wrote CeL2 migration block", ++ "height", cel2Header.Number, ++ "root", cel2Header.Root.String(), ++ "hash", cel2Header.Hash().String(), ++ "timestamp", cel2Header.Time, ++ ) ++ ++ // Close the database handle ++ if err := ldb.Close(); err != nil { ++ return nil, err ++ } ++ ++ return cel2Header, nil ++} ++ ++// applyAllocsToState applies the account allocations from the allocation file to the state database. ++// It creates new accounts, sets their nonce, balance, code, and storage values. ++// If an account already exists, it adds the balance of the new account to the existing balance. ++// If the code of an existing account is different from the code in the genesis block, it logs a warning. ++// This changes the state root, so `Commit` needs to be called after this function. ++func applyAllocsToState(db *state.StateDB, genesis *core.Genesis, config *params.ChainConfig) { ++ log.Info("Starting to migrate OP contracts into state DB") ++ ++ accountCounter := 0 ++ overwriteCounter := 0 ++ for k, v := range genesis.Alloc { ++ accountCounter++ ++ ++ balance := uint256.MustFromBig(v.Balance) ++ ++ if db.Exist(k) { ++ // If the account already has balance, add it to the balance of the new account ++ balance = balance.Add(balance, db.GetBalance(k)) ++ ++ currentCode := db.GetCode(k) ++ equalCode := bytes.Equal(currentCode, v.Code) ++ if currentCode != nil && !equalCode { ++ if whitelist, exists := accountOverwriteWhitelist[config.ChainID.Uint64()]; exists { ++ if _, ok := whitelist[k]; ok { ++ log.Info("Account already exists with different code and is whitelisted, overwriting...", "address", k) ++ } else { ++ log.Warn("Account already exists with different code and is not whitelisted, overwriting...", "address", k, "oldCode", db.GetCode(k), "newCode", v.Code) ++ } ++ } else { ++ log.Warn("Account already exists with different code and no whitelist exists", "address", k, "oldCode", db.GetCode(k), "newCode", v.Code) ++ } ++ ++ overwriteCounter++ ++ } ++ } ++ db.CreateAccount(k) ++ ++ db.SetNonce(k, v.Nonce) ++ db.SetBalance(k, balance) ++ db.SetCode(k, v.Code) ++ for key, value := range v.Storage { ++ db.SetState(k, key, value) ++ } ++ ++ log.Info("Moved account", "address", k) ++ } ++ log.Info("Migrated OP contracts into state DB", "copiedAccounts", accountCounter, "overwrittenAccounts", overwriteCounter) ++} ++ ++// setupDistributionSchedule sets up the distribution schedule contract with the correct balance ++// The balance is set to the difference between the ceiling and the total supply of the token ++func setupDistributionSchedule(db *state.StateDB, config *params.ChainConfig) error { ++ log.Info("Setting up CeloDistributionSchedule balance") ++ ++ celoDistributionScheduleAddress, exists := distributionScheduleAddressMap[config.ChainID.Uint64()] ++ if !exists { ++ return errors.New("DistributionSchedule address not configured for this chain, skipping migration step") ++ } ++ ++ if !db.Exist(celoDistributionScheduleAddress) { ++ return errors.New("DistributionSchedule account does not exist, skipping migration step") ++ } ++ ++ tokenAddress, exists := celoTokenAddressMap[config.ChainID.Uint64()] ++ if !exists { ++ return errors.New("celo token address not configured for this chain, skipping migration step") ++ } ++ log.Info("Read contract addresses", "tokenAddress", tokenAddress, "distributionScheduleAddress", celoDistributionScheduleAddress) ++ ++ // totalSupply is stored in the third slot ++ totalSupply := db.GetState(tokenAddress, common.HexToHash("0x02")).Big() ++ ++ // Get total supply of celo token ++ billion := new(uint256.Int).Exp(Big10, Big9) ++ ethInWei := new(uint256.Int).Exp(Big10, Big18) ++ ++ ceiling := new(uint256.Int).Mul(billion, ethInWei) ++ ++ supplyU256 := uint256.MustFromBig(totalSupply) ++ if supplyU256.Cmp(ceiling) > 0 { ++ return fmt.Errorf("supply %s is greater than ceiling %s", totalSupply, ceiling) ++ } ++ ++ balance := new(uint256.Int).Sub(ceiling, supplyU256) ++ // Don't discard existing balance of the account ++ balance = new(uint256.Int).Add(balance, db.GetBalance(celoDistributionScheduleAddress)) ++ db.SetBalance(celoDistributionScheduleAddress, balance) ++ ++ log.Info("Set up CeloDistributionSchedule balance", "distributionScheduleAddress", celoDistributionScheduleAddress, "balance", balance, "total_supply", supplyU256, "ceiling", ceiling) ++ return nil ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+109
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/transform.go CELO/op-chain-ops/cmd/celo-migrate/transform.go +new file mode 100644 +index 0000000000000000000000000000000000000000..5a80e8a51566f47408976d16f94885e4937108d3 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/transform.go +@@ -0,0 +1,109 @@ ++package main ++ ++import ( ++ "bytes" ++ "errors" ++ "fmt" ++ "math/big" ++ ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/core/types" ++ "github.com/ethereum/go-ethereum/crypto" ++ "github.com/ethereum/go-ethereum/rlp" ++) ++ ++var ( ++ IstanbulExtraVanity = 32 // Fixed number of extra-data bytes reserved for validator vanity ++) ++ ++// IstanbulAggregatedSeal is the aggregated seal for Istanbul blocks ++type IstanbulAggregatedSeal struct { ++ // Bitmap is a bitmap having an active bit for each validator that signed this block ++ Bitmap *big.Int ++ // Signature is an aggregated BLS signature resulting from signatures by each validator that signed this block ++ Signature []byte ++ // Round is the round in which the signature was created. ++ Round *big.Int ++} ++ ++// IstanbulExtra is the extra-data for Istanbul blocks ++type IstanbulExtra struct { ++ // AddedValidators are the validators that have been added in the block ++ AddedValidators []common.Address ++ // AddedValidatorsPublicKeys are the BLS public keys for the validators added in the block ++ AddedValidatorsPublicKeys [][96]byte ++ // RemovedValidators is a bitmap having an active bit for each removed validator in the block ++ RemovedValidators *big.Int ++ // Seal is an ECDSA signature by the proposer ++ Seal []byte ++ // AggregatedSeal contains the aggregated BLS signature created via IBFT consensus. ++ AggregatedSeal IstanbulAggregatedSeal ++ // ParentAggregatedSeal contains and aggregated BLS signature for the previous block. ++ ParentAggregatedSeal IstanbulAggregatedSeal ++} ++ ++// transformHeader removes the aggregated seal from the header ++func transformHeader(header []byte) ([]byte, error) { ++ newHeader := new(types.Header) ++ err := rlp.DecodeBytes(header, &newHeader) ++ if err != nil { ++ return nil, err ++ } ++ ++ if len(newHeader.Extra) < IstanbulExtraVanity { ++ return nil, errors.New("invalid istanbul header extra-data") ++ } ++ ++ istanbulExtra := IstanbulExtra{} ++ err = rlp.DecodeBytes(newHeader.Extra[IstanbulExtraVanity:], &istanbulExtra) ++ if err != nil { ++ return nil, err ++ } ++ ++ istanbulExtra.AggregatedSeal = IstanbulAggregatedSeal{} ++ ++ payload, err := rlp.EncodeToBytes(&istanbulExtra) ++ if err != nil { ++ return nil, err ++ } ++ ++ newHeader.Extra = append(newHeader.Extra[:IstanbulExtraVanity], payload...) ++ ++ return rlp.EncodeToBytes(newHeader) ++} ++ ++func hasSameHash(newHeader, oldHash []byte) (bool, common.Hash) { ++ newHash := crypto.Keccak256Hash(newHeader) ++ return bytes.Equal(oldHash, newHash.Bytes()), newHash ++} ++ ++// transformBlockBody migrates the block body from the old format to the new format (works with []byte input output) ++func transformBlockBody(oldBodyData []byte) ([]byte, error) { ++ // decode body into celo-blockchain Body structure ++ // remove epochSnarkData and randomness data ++ var celoBody struct { ++ Transactions types.Transactions ++ Randomness rlp.RawValue ++ EpochSnarkData rlp.RawValue ++ } ++ if err := rlp.DecodeBytes(oldBodyData, &celoBody); err != nil { ++ // body may have already been transformed in a previous migration ++ body := types.Body{} ++ if err := rlp.DecodeBytes(oldBodyData, &body); err == nil { ++ return oldBodyData, nil ++ } ++ return nil, fmt.Errorf("failed to RLP decode body: %w", err) ++ } ++ ++ // transform into op-geth types.Body structure ++ newBody := types.Body{ ++ Transactions: celoBody.Transactions, ++ Uncles: []*types.Header{}, ++ } ++ newBodyData, err := rlp.EncodeToBytes(newBody) ++ if err != nil { ++ return nil, fmt.Errorf("failed to RLP encode body: %w", err) ++ } ++ ++ return newBodyData, nil ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-3
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/check-derivation/main.go CELO/op-chain-ops/cmd/check-derivation/main.go +index 499b128f8767fe560d92ee66f3ed770c373d7bf4..88a13f6efce7ffb37d36b6b709c0e698495793b7 100644 +--- OP/op-chain-ops/cmd/check-derivation/main.go ++++ CELO/op-chain-ops/cmd/check-derivation/main.go +@@ -225,7 +225,7 @@ data := testutils.RandomData(rng, 10) + var txData types.TxData + switch txType { + case types.LegacyTxType: +- gasLimit, err := core.IntrinsicGas(data, nil, false, true, true, false) ++ gasLimit, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) + if err != nil { + return nil, fmt.Errorf("failed to get intrinsicGas: %w", err) + } +@@ -242,7 +242,7 @@ accessList := types.AccessList{types.AccessTuple{ + Address: randomAddress, + StorageKeys: []common.Hash{common.HexToHash("0x1234")}, + }} +- gasLimit, err := core.IntrinsicGas(data, accessList, false, true, true, false) ++ gasLimit, err := core.IntrinsicGas(data, accessList, false, true, true, false, nil) + if err != nil { + return nil, fmt.Errorf("failed to get intrinsicGas: %w", err) + } +@@ -257,7 +257,7 @@ AccessList: accessList, + Data: data, + } + case types.DynamicFeeTxType: +- gasLimit, err := core.IntrinsicGas(data, nil, false, true, true, false) ++ gasLimit, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) + if err != nil { + return nil, fmt.Errorf("failed to get intrinsicGas: %w", err) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-48
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/op-version-check/README.md CELO/op-chain-ops/cmd/op-version-check/README.md +deleted file mode 100644 +index 29e5f08ab4de4f273213a2561374d844a0d191be..0000000000000000000000000000000000000000 +--- OP/op-chain-ops/cmd/op-version-check/README.md ++++ /dev/null +@@ -1,48 +0,0 @@ +-# op-version-check +- +-A CLI tool for determining which contract versions are deployed for +-chains in a superchain. It will output a JSON file that contains a +-list of each chain's versions. It is assumed that the implementations +-that are being checked have already been deployed and their contract +-addresses exist inside of the `superchain-registry` repository. It is +-also assumed that the semantic version file in the `superchain-registry` +-has been updated. The tool will output the semantic versioning to +-determine which contract versions are deployed. +- +-### Configuration +- +-#### L1 RPC URL +- +-The L1 RPC URL is used to determine which superchain to target. All +-L2s that are not based on top of the L1 chain that corresponds to the +-L1 RPC URL are filtered out from being checked. It also is used to +-double check that the data in the `superchain-registry` is correct. +- +-#### Chain IDs +- +-A list of L2 chain IDs can be passed that will be used to filter which +-L2 chains will have their versions checked. Omitting this argument will +-result in all chains in the superchain being considered. +- +-#### Deploy Config +- +-The path to the `deploy-config` directory in the contracts package. +-Since multiple L2 networks may be considered in the check, the `deploy-config` +-directory must be passed and then the particular deploy config files will +-be read out of the directory as needed. +- +-#### Outfile +- +-The file that the versions should be written to. If omitted, the file +-will be written to stdout +- +-#### Usage +- +-It can be built and run using the [Makefile](../../Makefile) `op-version-check` +-target. Run `make op-version-check` to create a binary in [../../bin/op-version-check](../../bin/op-version-check) +-that can be executed, optionally providing the `--l1-rpc-url`, `--chain-ids`, +-`--superchain-target`, and `--outfile` flags. +- +-```sh +-./bin/op-version-check +-```
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-169
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/op-version-check/main.go CELO/op-chain-ops/cmd/op-version-check/main.go +deleted file mode 100644 +index 163ff8599ef56a63a0640924fb18a005c2abb372..0000000000000000000000000000000000000000 +--- OP/op-chain-ops/cmd/op-version-check/main.go ++++ /dev/null +@@ -1,169 +0,0 @@ +-package main +- +-import ( +- "encoding/json" +- "errors" +- "fmt" +- "os" +- +- "github.com/ethereum/go-ethereum/ethclient" +- "github.com/ethereum/go-ethereum/log" +- "github.com/mattn/go-isatty" +- "github.com/urfave/cli/v2" +- "golang.org/x/exp/maps" +- +- "github.com/ethereum-optimism/optimism/op-chain-ops/upgrades" +- "github.com/ethereum-optimism/optimism/op-service/jsonutil" +- oplog "github.com/ethereum-optimism/optimism/op-service/log" +- +- "github.com/ethereum-optimism/superchain-registry/superchain" +-) +- +-type Contract struct { +- Version string `yaml:"version"` +- Address superchain.Address `yaml:"address"` +-} +- +-type ChainVersionCheck struct { +- Name string `yaml:"name"` +- ChainID uint64 `yaml:"chain_id"` +- Contracts map[string]Contract `yaml:"contracts"` +-} +- +-func main() { +- color := isatty.IsTerminal(os.Stderr.Fd()) +- oplog.SetGlobalLogHandler(log.NewTerminalHandler(os.Stderr, color)) +- +- app := &cli.App{ +- Name: "op-version-check", +- Usage: "Determine which contract versions are deployed for chains in a superchain", +- Flags: []cli.Flag{ +- &cli.StringSliceFlag{ +- Name: "l1-rpc-urls", +- Usage: "L1 RPC URLs, the chain ID will be used to determine the superchain", +- EnvVars: []string{"L1_RPC_URLS"}, +- }, +- &cli.StringSliceFlag{ +- Name: "l2-rpc-urls", +- Usage: "L2 RPC URLs, corresponding to chains to check versions for. Corresponds to all chains if empty", +- EnvVars: []string{"L2_RPC_URLS"}, +- }, +- &cli.PathFlag{ +- Name: "outfile", +- Usage: "The file to write the output to. If not specified, output is written to stdout", +- EnvVars: []string{"OUTFILE"}, +- }, +- }, +- Action: entrypoint, +- } +- +- if err := app.Run(os.Args); err != nil { +- log.Crit("error op-version-check", "err", err) +- } +-} +- +-// entrypoint contains the main logic of the script +-func entrypoint(ctx *cli.Context) error { +- l1RPCURLs := ctx.StringSlice("l1-rpc-urls") +- l2RPCURLs := ctx.StringSlice("l2-rpc-urls") +- +- var l2ChainIDs []uint64 +- +- // If no L2 RPC URLs are specified, we check all chains for the L1 RPC URL +- if len(l2RPCURLs) == 0 { +- l2ChainIDs = maps.Keys(superchain.OPChains) +- } else { +- for _, l2RPCURL := range l2RPCURLs { +- client, err := ethclient.Dial(l2RPCURL) +- if err != nil { +- return errors.New("cannot create L2 client") +- } +- +- l2ChainID, err := client.ChainID(ctx.Context) +- if err != nil { +- return fmt.Errorf("cannot fetch L2 chain ID: %w", err) +- } +- +- l2ChainIDs = append(l2ChainIDs, l2ChainID.Uint64()) +- } +- } +- +- output := []ChainVersionCheck{} +- +- for _, l2ChainID := range l2ChainIDs { +- chainConfig := superchain.OPChains[l2ChainID] +- +- if chainConfig.ChainID != l2ChainID { +- return fmt.Errorf("mismatched chain IDs: %d != %d", chainConfig.ChainID, l2ChainID) +- } +- +- for _, l1RPCURL := range l1RPCURLs { +- client, err := ethclient.Dial(l1RPCURL) +- if err != nil { +- return errors.New("cannot create L1 client") +- } +- +- l1ChainID, err := client.ChainID(ctx.Context) +- if err != nil { +- return fmt.Errorf("cannot fetch L1 chain ID: %w", err) +- } +- +- sc, ok := superchain.Superchains[chainConfig.Superchain] +- if !ok { +- return fmt.Errorf("superchain name %s not registered", chainConfig.Superchain) +- } +- +- declaredL1ChainID := sc.Config.L1.ChainID +- +- if l1ChainID.Uint64() != declaredL1ChainID { +- // L2 corresponds to a different superchain than L1, skip +- log.Info("Ignoring L1/L2", "l1-chain-id", l1ChainID, "l2-chain-id", l2ChainID) +- continue +- } +- +- log.Info(chainConfig.Name, "l1-chain-id", l1ChainID, "l2-chain-id", l2ChainID) +- +- log.Info("Detecting on chain contracts") +- // Tracking the individual addresses can be deprecated once the system is upgraded +- // to the new contracts where the system config has a reference to each address. +- addresses, ok := superchain.Addresses[l2ChainID] +- if !ok { +- return fmt.Errorf("no addresses for chain ID %d", l2ChainID) +- } +- versions, err := upgrades.GetContractVersions(ctx.Context, addresses, chainConfig, client) +- if err != nil { +- return fmt.Errorf("error getting contract versions: %w", err) +- } +- +- contracts := make(map[string]Contract) +- +- contracts["AddressManager"] = Contract{Version: "null", Address: addresses.AddressManager} +- contracts["L1CrossDomainMessenger"] = Contract{Version: versions.L1CrossDomainMessenger, Address: addresses.L1CrossDomainMessengerProxy} +- contracts["L1ERC721Bridge"] = Contract{Version: versions.L1ERC721Bridge, Address: addresses.L1ERC721BridgeProxy} +- contracts["L1StandardBridge"] = Contract{Version: versions.L1ERC721Bridge, Address: addresses.L1StandardBridgeProxy} +- contracts["L2OutputOracle"] = Contract{Version: versions.L2OutputOracle, Address: addresses.L2OutputOracleProxy} +- contracts["OptimismMintableERC20Factory"] = Contract{Version: versions.OptimismMintableERC20Factory, Address: addresses.OptimismMintableERC20FactoryProxy} +- contracts["OptimismPortal"] = Contract{Version: versions.OptimismPortal, Address: addresses.OptimismPortalProxy} +- contracts["SystemConfig"] = Contract{Version: versions.SystemConfig, Address: addresses.SystemConfigProxy} +- contracts["ProxyAdmin"] = Contract{Version: "null", Address: addresses.ProxyAdmin} +- +- output = append(output, ChainVersionCheck{Name: chainConfig.Name, ChainID: l2ChainID, Contracts: contracts}) +- +- log.Info("Successfully processed contract versions", "chain", chainConfig.Name, "l1-chain-id", l1ChainID, "l2-chain-id", l2ChainID) +- break +- } +- } +- // Write contract versions to disk or stdout +- if outfile := ctx.Path("outfile"); outfile != "" { +- if err := jsonutil.WriteJSON(outfile, output, 0o666); err != nil { +- return err +- } +- } else { +- data, err := json.MarshalIndent(output, "", " ") +- if err != nil { +- return err +- } +- fmt.Println(string(data)) +- } +- return nil +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+58
+
-2
+ +
+ +
+
+
diff --git OP/op-chain-ops/foundry/artifact.go CELO/op-chain-ops/foundry/artifact.go +index 93791ef806d08b68692c0ec9b76f07ac9eaec3ee..0e0dfcb463f9236ef86f35501b473ee9b1c942ad 100644 +--- OP/op-chain-ops/foundry/artifact.go ++++ CELO/op-chain-ops/foundry/artifact.go +@@ -4,11 +4,17 @@ import ( + "encoding/json" + "fmt" + "os" ++ "path/filepath" + "strings" +  ++ "github.com/holiman/uint256" ++ "golang.org/x/exp/maps" ++ + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum/go-ethereum/accounts/abi" ++ "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ++ "github.com/ethereum/go-ethereum/core/types" + ) +  + // Artifact represents a foundry compilation artifact. +@@ -49,7 +55,7 @@ } + return json.Marshal(artifact) + } +  +-// artifactMarshaling is a helper struct for marshaling and unmarshaling ++// artifactMarshaling is a helper struct for marshaling and unmarshalling + // foundry artifacts. + type artifactMarshaling struct { + ABI json.RawMessage `json:"abi"` +@@ -66,7 +72,7 @@ LinkReferences json.RawMessage `json:"linkReferences"` + ImmutableReferences json.RawMessage `json:"immutableReferences,omitempty"` + } +  +-// DeployedBytecode represents the bytecode section of the solc compiler output. ++// Bytecode represents the bytecode section of the solc compiler output. + type Bytecode struct { + SourceMap string `json:"sourceMap"` + Object hexutil.Bytes `json:"object"` +@@ -86,3 +92,53 @@ return nil, err + } + return &artifact, nil + } ++ ++type ForgeAllocs struct { ++ Accounts types.GenesisAlloc ++} ++ ++func (d *ForgeAllocs) Copy() *ForgeAllocs { ++ out := make(types.GenesisAlloc, len(d.Accounts)) ++ maps.Copy(out, d.Accounts) ++ return &ForgeAllocs{Accounts: out} ++} ++ ++func (d *ForgeAllocs) UnmarshalJSON(b []byte) error { ++ // forge, since integrating Alloy, likes to hex-encode everything. ++ type forgeAllocAccount struct { ++ Balance hexutil.U256 `json:"balance"` ++ Nonce hexutil.Uint64 `json:"nonce"` ++ Code hexutil.Bytes `json:"code,omitempty"` ++ Storage map[common.Hash]common.Hash `json:"storage,omitempty"` ++ } ++ var allocs map[common.Address]forgeAllocAccount ++ if err := json.Unmarshal(b, &allocs); err != nil { ++ return err ++ } ++ d.Accounts = make(types.GenesisAlloc, len(allocs)) ++ for addr, acc := range allocs { ++ acc := acc ++ d.Accounts[addr] = types.Account{ ++ Code: acc.Code, ++ Storage: acc.Storage, ++ Balance: (*uint256.Int)(&acc.Balance).ToBig(), ++ Nonce: (uint64)(acc.Nonce), ++ PrivateKey: nil, ++ } ++ } ++ return nil ++} ++ ++func LoadForgeAllocs(allocsPath string) (*ForgeAllocs, error) { ++ path := filepath.Join(allocsPath) ++ f, err := os.OpenFile(path, os.O_RDONLY, 0644) ++ if err != nil { ++ return nil, fmt.Errorf("failed to open forge allocs %q: %w", path, err) ++ } ++ defer f.Close() ++ var out ForgeAllocs ++ if err := json.NewDecoder(f).Decode(&out); err != nil { ++ return nil, fmt.Errorf("failed to json-decode forge allocs %q: %w", path, err) ++ } ++ return &out, nil ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+8
+
-43
+ +
+ +
+
+
diff --git OP/op-chain-ops/genesis/config.go CELO/op-chain-ops/genesis/config.go +index a2661b057ecb0cbddb499ab29ba570a0ea264616..007feeb915a9c6d4a088f0a731e386258e9d64d1 100644 +--- OP/op-chain-ops/genesis/config.go ++++ CELO/op-chain-ops/genesis/config.go +@@ -10,9 +10,6 @@ "os" + "path/filepath" + "reflect" +  +- "github.com/holiman/uint256" +- "golang.org/x/exp/maps" +- + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +@@ -292,6 +289,9 @@ L1CancunTimeOffset *hexutil.Uint64 `json:"l1CancunTimeOffset,omitempty"` +  + // UseInterop is a flag that indicates if the system is using interop + UseInterop bool `json:"useInterop,omitempty"` ++ ++ // DeployCeloContracts indicates whether to deploy Celo contracts. ++ DeployCeloContracts bool `json:"deployCeloContracts"` + } +  + // Copy will deeply copy the DeployConfig. This does a JSON roundtrip to copy +@@ -441,10 +441,10 @@ log.Warn("DisputeGameFinalityDelaySeconds is 0") + } + if d.UsePlasma { + if d.DAChallengeWindow == 0 { +- return fmt.Errorf("%w: DAChallengeWindow cannot be 0 when using plasma mode", ErrInvalidDeployConfig) ++ return fmt.Errorf("%w: DAChallengeWindow cannot be 0 when using alt-da mode", ErrInvalidDeployConfig) + } + if d.DAResolveWindow == 0 { +- return fmt.Errorf("%w: DAResolveWindow cannot be 0 when using plasma mode", ErrInvalidDeployConfig) ++ return fmt.Errorf("%w: DAResolveWindow cannot be 0 when using alt-da mode", ErrInvalidDeployConfig) + } + if !(d.DACommitmentType == plasma.KeccakCommitmentString || d.DACommitmentType == plasma.GenericCommitmentString) { + return fmt.Errorf("%w: DACommitmentType must be either KeccakCommtiment or GenericCommitment", ErrInvalidDeployConfig) +@@ -520,9 +520,9 @@ if d.OptimismPortalProxy == (common.Address{}) { + return fmt.Errorf("%w: OptimismPortalProxy cannot be address(0)", ErrInvalidDeployConfig) + } + if d.UsePlasma && d.DACommitmentType == plasma.KeccakCommitmentString && d.DAChallengeProxy == (common.Address{}) { +- return fmt.Errorf("%w: DAChallengeContract cannot be address(0) when using plasma mode", ErrInvalidDeployConfig) ++ return fmt.Errorf("%w: DAChallengeContract cannot be address(0) when using alt-da mode", ErrInvalidDeployConfig) + } else if d.UsePlasma && d.DACommitmentType == plasma.GenericCommitmentString && d.DAChallengeProxy != (common.Address{}) { +- return fmt.Errorf("%w: DAChallengeContract must be address(0) when using generic commitments in plasma mode", ErrInvalidDeployConfig) ++ return fmt.Errorf("%w: DAChallengeContract must be address(0) when using generic commitments in alt-da mode", ErrInvalidDeployConfig) + } + return nil + } +@@ -660,6 +660,7 @@ EcotoneTime: d.EcotoneTime(l1StartBlock.Time()), + FjordTime: d.FjordTime(l1StartBlock.Time()), + InteropTime: d.InteropTime(l1StartBlock.Time()), + PlasmaConfig: plasma, ++ Cel2Time: d.RegolithTime(l1StartBlock.Time()), + }, nil + } +  +@@ -794,42 +795,6 @@ return nil, fmt.Errorf("cannot unmarshal L1 deployments: %w", err) + } +  + return &deployments, nil +-} +- +-type ForgeAllocs struct { +- Accounts types.GenesisAlloc +-} +- +-func (d *ForgeAllocs) Copy() *ForgeAllocs { +- out := make(types.GenesisAlloc, len(d.Accounts)) +- maps.Copy(out, d.Accounts) +- return &ForgeAllocs{Accounts: out} +-} +- +-func (d *ForgeAllocs) UnmarshalJSON(b []byte) error { +- // forge, since integrating Alloy, likes to hex-encode everything. +- type forgeAllocAccount struct { +- Balance hexutil.U256 `json:"balance"` +- Nonce hexutil.Uint64 `json:"nonce"` +- Code hexutil.Bytes `json:"code,omitempty"` +- Storage map[common.Hash]common.Hash `json:"storage,omitempty"` +- } +- var allocs map[common.Address]forgeAllocAccount +- if err := json.Unmarshal(b, &allocs); err != nil { +- return err +- } +- d.Accounts = make(types.GenesisAlloc, len(allocs)) +- for addr, acc := range allocs { +- acc := acc +- d.Accounts[addr] = types.Account{ +- Code: acc.Code, +- Storage: acc.Storage, +- Balance: (*uint256.Int)(&acc.Balance).ToBig(), +- Nonce: (uint64)(acc.Nonce), +- PrivateKey: nil, +- } +- } +- return nil + } +  + type MarshalableRPCBlockNumberOrHash rpc.BlockNumberOrHash
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/genesis/genesis.go CELO/op-chain-ops/genesis/genesis.go +index bbfb15c346496a1f19cccd5915f4e457bd3f8a2b..93953a06089cda171548e38d9538ee6ead06a2b1 100644 +--- OP/op-chain-ops/genesis/genesis.go ++++ CELO/op-chain-ops/genesis/genesis.go +@@ -68,6 +68,7 @@ CancunTime: config.EcotoneTime(block.Time()), + EcotoneTime: config.EcotoneTime(block.Time()), + FjordTime: config.FjordTime(block.Time()), + InteropTime: config.InteropTime(block.Time()), ++ Cel2Time: config.RegolithTime(block.Time()), + Optimism: &params.OptimismConfig{ + EIP1559Denominator: eip1559Denom, + EIP1559Elasticity: eip1559Elasticity,
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-1
+ +
+ +
+
+
diff --git OP/op-chain-ops/genesis/layer_one.go CELO/op-chain-ops/genesis/layer_one.go +index ee9d408212a9ddde0dba96bba9bfffdcb29cbafc..16f1a3c48c2dface5b8742452a16d8e2f3c7ec24 100644 +--- OP/op-chain-ops/genesis/layer_one.go ++++ CELO/op-chain-ops/genesis/layer_one.go +@@ -4,6 +4,8 @@ import ( + "fmt" + "math/big" +  ++ "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" ++ + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/log" +@@ -18,7 +20,7 @@ // BuildL1DeveloperGenesis will create a L1 genesis block after creating + // all of the state required for an Optimism network to function. + // It is expected that the dump contains all of the required state to bootstrap + // the L1 chain. +-func BuildL1DeveloperGenesis(config *DeployConfig, dump *ForgeAllocs, l1Deployments *L1Deployments) (*core.Genesis, error) { ++func BuildL1DeveloperGenesis(config *DeployConfig, dump *foundry.ForgeAllocs, l1Deployments *L1Deployments) (*core.Genesis, error) { + log.Info("Building developer L1 genesis block") + genesis, err := NewL1Genesis(config) + if err != nil {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-19
+ +
+ +
+
+
diff --git OP/op-chain-ops/genesis/layer_two.go CELO/op-chain-ops/genesis/layer_two.go +index 6cd0bbe7d27f59be70721d6b148eb9347d1eaed6..a898afc70aaa5557d270910bcf66b0bb09e33a2b 100644 +--- OP/op-chain-ops/genesis/layer_two.go ++++ CELO/op-chain-ops/genesis/layer_two.go +@@ -1,11 +1,8 @@ + package genesis +  + import ( +- "encoding/json" + "fmt" + "math/big" +- "os" +- "path/filepath" +  + hdwallet "github.com/ethereum-optimism/go-ethereum-hdwallet" + "github.com/holiman/uint256" +@@ -16,6 +13,7 @@ "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" +  ++ "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + ) +  +@@ -34,10 +32,10 @@ // mnemonic for the test accounts in hardhat/foundry + testMnemonic = "test test test test test test test test test test test junk" + ) +  +-type AllocsLoader func(mode L2AllocsMode) *ForgeAllocs ++type AllocsLoader func(mode L2AllocsMode) *foundry.ForgeAllocs +  + // BuildL2Genesis will build the L2 genesis block. +-func BuildL2Genesis(config *DeployConfig, dump *ForgeAllocs, l1StartBlock *types.Block) (*core.Genesis, error) { ++func BuildL2Genesis(config *DeployConfig, dump *foundry.ForgeAllocs, l1StartBlock *types.Block) (*core.Genesis, error) { + genspec, err := NewL2Genesis(config, l1StartBlock) + if err != nil { + return nil, err +@@ -94,17 +92,3 @@ } + } + return false, nil + } +- +-func LoadForgeAllocs(allocsPath string) (*ForgeAllocs, error) { +- path := filepath.Join(allocsPath) +- f, err := os.OpenFile(path, os.O_RDONLY, 0644) +- if err != nil { +- return nil, fmt.Errorf("failed to open forge allocs %q: %w", path, err) +- } +- defer f.Close() +- var out ForgeAllocs +- if err := json.NewDecoder(f).Decode(&out); err != nil { +- return nil, fmt.Errorf("failed to json-decode forge allocs %q: %w", path, err) +- } +- return &out, nil +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/op-chain-ops/genesis/testdata/test-deploy-config-full.json CELO/op-chain-ops/genesis/testdata/test-deploy-config-full.json +index c0aefac625ff5b0a11560dd40ee2ed22c53a7416..09415e40bfd164a02482b916eb2ac2ca8be04479 100644 +--- OP/op-chain-ops/genesis/testdata/test-deploy-config-full.json ++++ CELO/op-chain-ops/genesis/testdata/test-deploy-config-full.json +@@ -92,5 +92,6 @@ "daCommitmentType": "KeccakCommtiment", + "daChallengeProxy": "0x0000000000000000000000000000000000000000", + "daChallengeWindow": 0, + "daResolveWindow": 0, +- "daResolverRefundPercentage": 0 ++ "daResolverRefundPercentage": 0, ++ "deployCeloContracts": false + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+15
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/justfile CELO/op-chain-ops/justfile +index 9775f535d227d4d2dacfa690fe318b2f1b78f23f..b0ec8ee270a586b46ea9383d29bf5041e46273bd 100644 +--- OP/op-chain-ops/justfile ++++ CELO/op-chain-ops/justfile +@@ -23,3 +23,18 @@ build_abi SystemConfig + #build_abi ISemver + build_abi ProxyAdmin + build_abi StorageSetter ++ ++bindings-celo-migrate: ++ #!/usr/bin/env bash ++ set -euxo pipefail ++ ++ build_abi() { ++ local lowercase=$(echo "$2" | awk '{print tolower($0)}') ++ abigen \ ++ --abi "{{abis}}/$1.json" \ ++ --pkg bindings \ ++ --out "cmd/celo-migrate/bindings/$lowercase.go" \ ++ --type $2 + } -+] -\ No newline at end of file
++ ++ build_abi GoldToken CeloToken
+
+ + + +
+
+ +
+
+ + + +
+ +
+
+
+ +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+21
+
-21
+ +
+ +
+
+
diff --git OP/op-challenger/cmd/main_test.go CELO/op-challenger/cmd/main_test.go +index 7e77f19408e8b5832cd152c1443fcfcb367d3c94..e6234b063b3400b5954271cc3ae5018769d6558b 100644 +--- OP/op-challenger/cmd/main_test.go ++++ CELO/op-challenger/cmd/main_test.go +@@ -277,7 +277,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-bin", "--asterisc-bin=./asterisc")) +- require.Equal(t, "./asterisc", cfg.AsteriscBin) ++ require.Equal(t, "./asterisc", cfg.Asterisc.VmBin) + }) + }) +  +@@ -292,7 +292,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-server", "--asterisc-server=./op-program")) +- require.Equal(t, "./op-program", cfg.AsteriscServer) ++ require.Equal(t, "./op-program", cfg.Asterisc.Server) + }) + }) +  +@@ -349,12 +349,12 @@ + t.Run(fmt.Sprintf("TestAsteriscSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) +- require.Equal(t, config.DefaultAsteriscSnapshotFreq, cfg.AsteriscSnapshotFreq) ++ require.Equal(t, config.DefaultAsteriscSnapshotFreq, cfg.Asterisc.SnapshotFreq) + }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--asterisc-snapshot-freq=1234")) +- require.Equal(t, uint(1234), cfg.AsteriscSnapshotFreq) ++ require.Equal(t, uint(1234), cfg.Asterisc.SnapshotFreq) + }) +  + t.Run("Invalid", func(t *testing.T) { +@@ -366,12 +366,12 @@ + t.Run(fmt.Sprintf("TestAsteriscInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) +- require.Equal(t, config.DefaultAsteriscInfoFreq, cfg.AsteriscInfoFreq) ++ require.Equal(t, config.DefaultAsteriscInfoFreq, cfg.Asterisc.InfoFreq) + }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--asterisc-info-freq=1234")) +- require.Equal(t, uint(1234), cfg.AsteriscInfoFreq) ++ require.Equal(t, uint(1234), cfg.Asterisc.InfoFreq) + }) +  + t.Run("Invalid", func(t *testing.T) { +@@ -432,7 +432,7 @@ delete(args, "--asterisc-network") + delete(args, "--game-factory-address") + args["--network"] = "op-sepolia" + cfg := configForArgs(t, toArgList(args)) +- require.Equal(t, "op-sepolia", cfg.AsteriscNetwork) ++ require.Equal(t, "op-sepolia", cfg.Asterisc.Network) + }) +  + t.Run("MustNotSpecifyNetworkAndAsteriscNetwork", func(t *testing.T) { +@@ -442,7 +442,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-network", testNetwork)) +- require.Equal(t, testNetwork, cfg.AsteriscNetwork) ++ require.Equal(t, testNetwork, cfg.Asterisc.Network) + }) + }) +  +@@ -453,7 +453,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-rollup-config=rollup.json", "--asterisc-l2-genesis=genesis.json")) +- require.Equal(t, "rollup.json", cfg.AsteriscRollupConfigPath) ++ require.Equal(t, "rollup.json", cfg.Asterisc.RollupConfigPath) + }) + }) +  +@@ -464,7 +464,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--asterisc-network", "--asterisc-rollup-config=rollup.json", "--asterisc-l2-genesis=genesis.json")) +- require.Equal(t, "genesis.json", cfg.AsteriscL2GenesisPath) ++ require.Equal(t, "genesis.json", cfg.Asterisc.L2GenesisPath) + }) + }) + } +@@ -502,7 +502,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-bin", "--cannon-bin=./cannon")) +- require.Equal(t, "./cannon", cfg.CannonBin) ++ require.Equal(t, "./cannon", cfg.Cannon.VmBin) + }) + }) +  +@@ -517,7 +517,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-server", "--cannon-server=./op-program")) +- require.Equal(t, "./op-program", cfg.CannonServer) ++ require.Equal(t, "./op-program", cfg.Cannon.Server) + }) + }) +  +@@ -570,12 +570,12 @@ + t.Run(fmt.Sprintf("TestCannonSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) +- require.Equal(t, config.DefaultCannonSnapshotFreq, cfg.CannonSnapshotFreq) ++ require.Equal(t, config.DefaultCannonSnapshotFreq, cfg.Cannon.SnapshotFreq) + }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--cannon-snapshot-freq=1234")) +- require.Equal(t, uint(1234), cfg.CannonSnapshotFreq) ++ require.Equal(t, uint(1234), cfg.Cannon.SnapshotFreq) + }) +  + t.Run("Invalid", func(t *testing.T) { +@@ -587,12 +587,12 @@ + t.Run(fmt.Sprintf("TestCannonInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("UsesDefault", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType)) +- require.Equal(t, config.DefaultCannonInfoFreq, cfg.CannonInfoFreq) ++ require.Equal(t, config.DefaultCannonInfoFreq, cfg.Cannon.InfoFreq) + }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(traceType, "--cannon-info-freq=1234")) +- require.Equal(t, uint(1234), cfg.CannonInfoFreq) ++ require.Equal(t, uint(1234), cfg.Cannon.InfoFreq) + }) +  + t.Run("Invalid", func(t *testing.T) { +@@ -653,7 +653,7 @@ delete(args, "--cannon-network") + delete(args, "--game-factory-address") + args["--network"] = "op-sepolia" + cfg := configForArgs(t, toArgList(args)) +- require.Equal(t, "op-sepolia", cfg.CannonNetwork) ++ require.Equal(t, "op-sepolia", cfg.Cannon.Network) + }) +  + t.Run("MustNotSpecifyNetworkAndCannonNetwork", func(t *testing.T) { +@@ -663,7 +663,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-network", testNetwork)) +- require.Equal(t, testNetwork, cfg.CannonNetwork) ++ require.Equal(t, testNetwork, cfg.Cannon.Network) + }) + }) +  +@@ -674,7 +674,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-rollup-config=rollup.json", "--cannon-l2-genesis=genesis.json")) +- require.Equal(t, "rollup.json", cfg.CannonRollupConfigPath) ++ require.Equal(t, "rollup.json", cfg.Cannon.RollupConfigPath) + }) + }) +  +@@ -685,7 +685,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgsExcept(traceType, "--cannon-network", "--cannon-rollup-config=rollup.json", "--cannon-l2-genesis=genesis.json")) +- require.Equal(t, "genesis.json", cfg.CannonL2GenesisPath) ++ require.Equal(t, "genesis.json", cfg.Cannon.L2GenesisPath) + }) + }) + } +@@ -729,7 +729,7 @@ }) +  + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(config.TraceTypeAlphabet, "--game-window=1m")) +- require.Equal(t, time.Duration(time.Minute), cfg.GameWindow) ++ require.Equal(t, time.Minute, cfg.GameWindow) + }) +  + t.Run("ParsesDefault", func(t *testing.T) {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+43
+
-43
+ +
+ +
+
+
diff --git OP/op-challenger/config/config.go CELO/op-challenger/config/config.go +index 9586eb54906b257816b9913cd0f5bd47a38d9655..3c6f8c846aff9fc0f79e7f4e663da1bcb1a66fdf 100644 +--- OP/op-challenger/config/config.go ++++ CELO/op-challenger/config/config.go +@@ -8,12 +8,12 @@ "runtime" + "slices" + "time" +  +- "github.com/ethereum/go-ethereum/common" +- ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/txmgr" ++ "github.com/ethereum/go-ethereum/common" + ) +  + var ( +@@ -129,26 +129,14 @@ + L2Rpc string // L2 RPC Url +  + // Specific to the cannon trace provider +- CannonBin string // Path to the cannon executable to run when generating trace data +- CannonServer string // Path to the op-program executable that provides the pre-image oracle server ++ Cannon vm.Config + CannonAbsolutePreState string // File to load the absolute pre-state for Cannon traces from + CannonAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Cannon traces from +- CannonNetwork string +- CannonRollupConfigPath string +- CannonL2GenesisPath string +- CannonSnapshotFreq uint // Frequency of snapshots to create when executing cannon (in VM instructions) +- CannonInfoFreq uint // Frequency of cannon progress log messages (in VM instructions) +  + // Specific to the asterisc trace provider +- AsteriscBin string // Path to the asterisc executable to run when generating trace data +- AsteriscServer string // Path to the op-program executable that provides the pre-image oracle server ++ Asterisc vm.Config + AsteriscAbsolutePreState string // File to load the absolute pre-state for Asterisc traces from + AsteriscAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Asterisc traces from +- AsteriscNetwork string +- AsteriscRollupConfigPath string +- AsteriscL2GenesisPath string +- AsteriscSnapshotFreq uint // Frequency of snapshots to create when executing asterisc (in VM instructions) +- AsteriscInfoFreq uint // Frequency of asterisc progress log messages (in VM instructions) +  + MaxPendingTx uint64 // Maximum number of pending transactions (0 == no limit) +  +@@ -185,11 +173,23 @@ PprofConfig: oppprof.DefaultCLIConfig(), +  + Datadir: datadir, +  +- CannonSnapshotFreq: DefaultCannonSnapshotFreq, +- CannonInfoFreq: DefaultCannonInfoFreq, +- AsteriscSnapshotFreq: DefaultAsteriscSnapshotFreq, +- AsteriscInfoFreq: DefaultAsteriscInfoFreq, +- GameWindow: DefaultGameWindow, ++ Cannon: vm.Config{ ++ VmType: TraceTypeCannon.String(), ++ L1: l1EthRpc, ++ L1Beacon: l1BeaconApi, ++ L2: l2EthRpc, ++ SnapshotFreq: DefaultCannonSnapshotFreq, ++ InfoFreq: DefaultCannonInfoFreq, ++ }, ++ Asterisc: vm.Config{ ++ VmType: TraceTypeAsterisc.String(), ++ L1: l1EthRpc, ++ L1Beacon: l1BeaconApi, ++ L2: l2EthRpc, ++ SnapshotFreq: DefaultAsteriscSnapshotFreq, ++ InfoFreq: DefaultAsteriscInfoFreq, ++ }, ++ GameWindow: DefaultGameWindow, + } + } +  +@@ -223,28 +223,28 @@ if c.MaxConcurrency == 0 { + return ErrMaxConcurrencyZero + } + if c.TraceTypeEnabled(TraceTypeCannon) || c.TraceTypeEnabled(TraceTypePermissioned) { +- if c.CannonBin == "" { ++ if c.Cannon.VmBin == "" { + return ErrMissingCannonBin + } +- if c.CannonServer == "" { ++ if c.Cannon.Server == "" { + return ErrMissingCannonServer + } +- if c.CannonNetwork == "" { +- if c.CannonRollupConfigPath == "" { ++ if c.Cannon.Network == "" { ++ if c.Cannon.RollupConfigPath == "" { + return ErrMissingCannonRollupConfig + } +- if c.CannonL2GenesisPath == "" { ++ if c.Cannon.L2GenesisPath == "" { + return ErrMissingCannonL2Genesis + } + } else { +- if c.CannonRollupConfigPath != "" { ++ if c.Cannon.RollupConfigPath != "" { + return ErrCannonNetworkAndRollupConfig + } +- if c.CannonL2GenesisPath != "" { ++ if c.Cannon.L2GenesisPath != "" { + return ErrCannonNetworkAndL2Genesis + } +- if ch := chaincfg.ChainByName(c.CannonNetwork); ch == nil { +- return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.CannonNetwork) ++ if ch := chaincfg.ChainByName(c.Cannon.Network); ch == nil { ++ return fmt.Errorf("%w: %v", ErrCannonNetworkUnknown, c.Cannon.Network) + } + } + if c.CannonAbsolutePreState == "" && c.CannonAbsolutePreStateBaseURL == nil { +@@ -253,36 +253,36 @@ } + if c.CannonAbsolutePreState != "" && c.CannonAbsolutePreStateBaseURL != nil { + return ErrCannonAbsolutePreStateAndBaseURL + } +- if c.CannonSnapshotFreq == 0 { ++ if c.Cannon.SnapshotFreq == 0 { + return ErrMissingCannonSnapshotFreq + } +- if c.CannonInfoFreq == 0 { ++ if c.Cannon.InfoFreq == 0 { + return ErrMissingCannonInfoFreq + } + } + if c.TraceTypeEnabled(TraceTypeAsterisc) { +- if c.AsteriscBin == "" { ++ if c.Asterisc.VmBin == "" { + return ErrMissingAsteriscBin + } +- if c.AsteriscServer == "" { ++ if c.Asterisc.Server == "" { + return ErrMissingAsteriscServer + } +- if c.AsteriscNetwork == "" { +- if c.AsteriscRollupConfigPath == "" { ++ if c.Asterisc.Network == "" { ++ if c.Asterisc.RollupConfigPath == "" { + return ErrMissingAsteriscRollupConfig + } +- if c.AsteriscL2GenesisPath == "" { ++ if c.Asterisc.L2GenesisPath == "" { + return ErrMissingAsteriscL2Genesis + } + } else { +- if c.AsteriscRollupConfigPath != "" { ++ if c.Asterisc.RollupConfigPath != "" { + return ErrAsteriscNetworkAndRollupConfig + } +- if c.AsteriscL2GenesisPath != "" { ++ if c.Asterisc.L2GenesisPath != "" { + return ErrAsteriscNetworkAndL2Genesis + } +- if ch := chaincfg.ChainByName(c.AsteriscNetwork); ch == nil { +- return fmt.Errorf("%w: %v", ErrAsteriscNetworkUnknown, c.AsteriscNetwork) ++ if ch := chaincfg.ChainByName(c.Asterisc.Network); ch == nil { ++ return fmt.Errorf("%w: %v", ErrAsteriscNetworkUnknown, c.Asterisc.Network) + } + } + if c.AsteriscAbsolutePreState == "" && c.AsteriscAbsolutePreStateBaseURL == nil { +@@ -291,10 +291,10 @@ } + if c.AsteriscAbsolutePreState != "" && c.AsteriscAbsolutePreStateBaseURL != nil { + return ErrAsteriscAbsolutePreStateAndBaseURL + } +- if c.AsteriscSnapshotFreq == 0 { ++ if c.Asterisc.SnapshotFreq == 0 { + return ErrMissingAsteriscSnapshotFreq + } +- if c.AsteriscInfoFreq == 0 { ++ if c.Asterisc.InfoFreq == 0 { + return ErrMissingAsteriscInfoFreq + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+44
+
-44
+ +
+ +
+
+
diff --git OP/op-challenger/config/config_test.go CELO/op-challenger/config/config_test.go +index 297bc60b97dc35f3c5c9f2f1d591426c0d3725ad..6cfae373277ce11e30994ae26642e09a236b5fa1 100644 +--- OP/op-challenger/config/config_test.go ++++ CELO/op-challenger/config/config_test.go +@@ -36,17 +36,17 @@ var cannonTraceTypes = []TraceType{TraceTypeCannon, TraceTypePermissioned} + var asteriscTraceTypes = []TraceType{TraceTypeAsterisc} +  + func applyValidConfigForCannon(cfg *Config) { +- cfg.CannonBin = validCannonBin +- cfg.CannonServer = validCannonOpProgramBin ++ cfg.Cannon.VmBin = validCannonBin ++ cfg.Cannon.Server = validCannonOpProgramBin + cfg.CannonAbsolutePreStateBaseURL = validCannonAbsolutPreStateBaseURL +- cfg.CannonNetwork = validCannonNetwork ++ cfg.Cannon.Network = validCannonNetwork + } +  + func applyValidConfigForAsterisc(cfg *Config) { +- cfg.AsteriscBin = validAsteriscBin +- cfg.AsteriscServer = validAsteriscOpProgramBin ++ cfg.Asterisc.VmBin = validAsteriscBin ++ cfg.Asterisc.Server = validAsteriscOpProgramBin + cfg.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutPreStateBaseURL +- cfg.AsteriscNetwork = validAsteriscNetwork ++ cfg.Asterisc.Network = validAsteriscNetwork + } +  + func validConfig(traceType TraceType) Config { +@@ -115,13 +115,13 @@ traceType := traceType +  + t.Run(fmt.Sprintf("TestCannonBinRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) +- config.CannonBin = "" ++ config.Cannon.VmBin = "" + require.ErrorIs(t, config.Check(), ErrMissingCannonBin) + }) +  + t.Run(fmt.Sprintf("TestCannonServerRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) +- config.CannonServer = "" ++ config.Cannon.Server = "" + require.ErrorIs(t, config.Check(), ErrMissingCannonServer) + }) +  +@@ -162,7 +162,7 @@ + t.Run(fmt.Sprintf("TestCannonSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonSnapshotFreq = 0 ++ cfg.Cannon.SnapshotFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingCannonSnapshotFreq) + }) + }) +@@ -170,46 +170,46 @@ + t.Run(fmt.Sprintf("TestCannonInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonInfoFreq = 0 ++ cfg.Cannon.InfoFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingCannonInfoFreq) + }) + }) +  + t.Run(fmt.Sprintf("TestCannonNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonNetwork = "" +- cfg.CannonRollupConfigPath = "" +- cfg.CannonL2GenesisPath = "genesis.json" ++ cfg.Cannon.Network = "" ++ cfg.Cannon.RollupConfigPath = "" ++ cfg.Cannon.L2GenesisPath = "genesis.json" + require.ErrorIs(t, cfg.Check(), ErrMissingCannonRollupConfig) + }) +  + t.Run(fmt.Sprintf("TestCannonNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonNetwork = "" +- cfg.CannonRollupConfigPath = "foo.json" +- cfg.CannonL2GenesisPath = "" ++ cfg.Cannon.Network = "" ++ cfg.Cannon.RollupConfigPath = "foo.json" ++ cfg.Cannon.L2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrMissingCannonL2Genesis) + }) +  + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonNetwork = validCannonNetwork +- cfg.CannonRollupConfigPath = "foo.json" +- cfg.CannonL2GenesisPath = "" ++ cfg.Cannon.Network = validCannonNetwork ++ cfg.Cannon.RollupConfigPath = "foo.json" ++ cfg.Cannon.L2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkAndRollupConfig) + }) +  + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndL2Genesis-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonNetwork = validCannonNetwork +- cfg.CannonRollupConfigPath = "" +- cfg.CannonL2GenesisPath = "foo.json" ++ cfg.Cannon.Network = validCannonNetwork ++ cfg.Cannon.RollupConfigPath = "" ++ cfg.Cannon.L2GenesisPath = "foo.json" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkAndL2Genesis) + }) +  + t.Run(fmt.Sprintf("TestNetworkMustBeValid-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.CannonNetwork = "unknown" ++ cfg.Cannon.Network = "unknown" + require.ErrorIs(t, cfg.Check(), ErrCannonNetworkUnknown) + }) + } +@@ -221,13 +221,13 @@ traceType := traceType +  + t.Run(fmt.Sprintf("TestAsteriscBinRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) +- config.AsteriscBin = "" ++ config.Asterisc.VmBin = "" + require.ErrorIs(t, config.Check(), ErrMissingAsteriscBin) + }) +  + t.Run(fmt.Sprintf("TestAsteriscServerRequired-%v", traceType), func(t *testing.T) { + config := validConfig(traceType) +- config.AsteriscServer = "" ++ config.Asterisc.Server = "" + require.ErrorIs(t, config.Check(), ErrMissingAsteriscServer) + }) +  +@@ -268,7 +268,7 @@ + t.Run(fmt.Sprintf("TestAsteriscSnapshotFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscSnapshotFreq = 0 ++ cfg.Asterisc.SnapshotFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscSnapshotFreq) + }) + }) +@@ -276,46 +276,46 @@ + t.Run(fmt.Sprintf("TestAsteriscInfoFreq-%v", traceType), func(t *testing.T) { + t.Run("MustNotBeZero", func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscInfoFreq = 0 ++ cfg.Asterisc.InfoFreq = 0 + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscInfoFreq) + }) + }) +  + t.Run(fmt.Sprintf("TestAsteriscNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscNetwork = "" +- cfg.AsteriscRollupConfigPath = "" +- cfg.AsteriscL2GenesisPath = "genesis.json" ++ cfg.Asterisc.Network = "" ++ cfg.Asterisc.RollupConfigPath = "" ++ cfg.Asterisc.L2GenesisPath = "genesis.json" + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscRollupConfig) + }) +  + t.Run(fmt.Sprintf("TestAsteriscNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscNetwork = "" +- cfg.AsteriscRollupConfigPath = "foo.json" +- cfg.AsteriscL2GenesisPath = "" ++ cfg.Asterisc.Network = "" ++ cfg.Asterisc.RollupConfigPath = "foo.json" ++ cfg.Asterisc.L2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscL2Genesis) + }) +  + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscNetwork = validAsteriscNetwork +- cfg.AsteriscRollupConfigPath = "foo.json" +- cfg.AsteriscL2GenesisPath = "" ++ cfg.Asterisc.Network = validAsteriscNetwork ++ cfg.Asterisc.RollupConfigPath = "foo.json" ++ cfg.Asterisc.L2GenesisPath = "" + require.ErrorIs(t, cfg.Check(), ErrAsteriscNetworkAndRollupConfig) + }) +  + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndL2Genesis-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscNetwork = validAsteriscNetwork +- cfg.AsteriscRollupConfigPath = "" +- cfg.AsteriscL2GenesisPath = "foo.json" ++ cfg.Asterisc.Network = validAsteriscNetwork ++ cfg.Asterisc.RollupConfigPath = "" ++ cfg.Asterisc.L2GenesisPath = "foo.json" + require.ErrorIs(t, cfg.Check(), ErrAsteriscNetworkAndL2Genesis) + }) +  + t.Run(fmt.Sprintf("TestNetworkMustBeValid-%v", traceType), func(t *testing.T) { + cfg := validConfig(traceType) +- cfg.AsteriscNetwork = "unknown" ++ cfg.Asterisc.Network = "unknown" + require.ErrorIs(t, cfg.Check(), ErrAsteriscNetworkUnknown) + }) + } +@@ -404,9 +404,9 @@ cfg.RollupRpc = validRollupRpc + require.NoError(t, cfg.Check()) +  + // Require cannon specific args +- cfg.CannonBin = "" ++ cfg.Cannon.VmBin = "" + require.ErrorIs(t, cfg.Check(), ErrMissingCannonBin) +- cfg.CannonBin = validCannonBin ++ cfg.Cannon.VmBin = validCannonBin +  + // Require asterisc specific args + cfg.AsteriscAbsolutePreState = "" +@@ -415,9 +415,9 @@ require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscAbsolutePreState) + cfg.AsteriscAbsolutePreState = validAsteriscAbsolutPreState +  + // Require cannon specific args +- cfg.AsteriscServer = "" ++ cfg.Asterisc.Server = "" + require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscServer) +- cfg.AsteriscServer = validAsteriscOpProgramBin ++ cfg.Asterisc.Server = validAsteriscOpProgramBin +  + // Check final config is valid + require.NoError(t, cfg.Check())
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+44
+
-29
+ +
+ +
+
+
diff --git OP/op-challenger/flags/flags.go CELO/op-challenger/flags/flags.go +index 5c2c6ea199599cbb1819ac273a2623e994b4b4a5..726501ec65ec954d27bdeadd6fc130f3ef0fd516 100644 +--- OP/op-challenger/flags/flags.go ++++ CELO/op-challenger/flags/flags.go +@@ -7,6 +7,7 @@ "runtime" + "slices" + "strings" +  ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-service/flags" + "github.com/ethereum-optimism/superchain-registry/superchain" + "github.com/ethereum/go-ethereum/common" +@@ -496,39 +497,53 @@ asteriscNetwork := ctx.String(AsteriscNetworkFlag.Name) + if ctx.IsSet(flags.NetworkFlagName) { + asteriscNetwork = ctx.String(flags.NetworkFlagName) + } ++ l1EthRpc := ctx.String(L1EthRpcFlag.Name) ++ l1Beacon := ctx.String(L1BeaconFlag.Name) + return &config.Config{ + // Required Flags +- L1EthRpc: ctx.String(L1EthRpcFlag.Name), +- L1Beacon: ctx.String(L1BeaconFlag.Name), +- TraceTypes: traceTypes, +- GameFactoryAddress: gameFactoryAddress, +- GameAllowlist: allowedGames, +- GameWindow: ctx.Duration(GameWindowFlag.Name), +- MaxConcurrency: maxConcurrency, +- L2Rpc: l2Rpc, +- MaxPendingTx: ctx.Uint64(MaxPendingTransactionsFlag.Name), +- PollInterval: ctx.Duration(HTTPPollInterval.Name), +- AdditionalBondClaimants: claimants, +- RollupRpc: ctx.String(RollupRpcFlag.Name), +- CannonNetwork: cannonNetwork, +- CannonRollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), +- CannonL2GenesisPath: ctx.String(CannonL2GenesisFlag.Name), +- CannonBin: ctx.String(CannonBinFlag.Name), +- CannonServer: ctx.String(CannonServerFlag.Name), +- CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name), +- CannonAbsolutePreStateBaseURL: cannonPrestatesURL, +- Datadir: ctx.String(DatadirFlag.Name), +- CannonSnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), +- CannonInfoFreq: ctx.Uint(CannonInfoFreqFlag.Name), +- AsteriscNetwork: asteriscNetwork, +- AsteriscRollupConfigPath: ctx.String(AsteriscRollupConfigFlag.Name), +- AsteriscL2GenesisPath: ctx.String(AsteriscL2GenesisFlag.Name), +- AsteriscBin: ctx.String(AsteriscBinFlag.Name), +- AsteriscServer: ctx.String(AsteriscServerFlag.Name), ++ L1EthRpc: l1EthRpc, ++ L1Beacon: l1Beacon, ++ TraceTypes: traceTypes, ++ GameFactoryAddress: gameFactoryAddress, ++ GameAllowlist: allowedGames, ++ GameWindow: ctx.Duration(GameWindowFlag.Name), ++ MaxConcurrency: maxConcurrency, ++ L2Rpc: l2Rpc, ++ MaxPendingTx: ctx.Uint64(MaxPendingTransactionsFlag.Name), ++ PollInterval: ctx.Duration(HTTPPollInterval.Name), ++ AdditionalBondClaimants: claimants, ++ RollupRpc: ctx.String(RollupRpcFlag.Name), ++ Cannon: vm.Config{ ++ VmType: config.TraceTypeCannon.String(), ++ L1: l1EthRpc, ++ L1Beacon: l1Beacon, ++ L2: l2Rpc, ++ VmBin: ctx.String(CannonBinFlag.Name), ++ Server: ctx.String(CannonServerFlag.Name), ++ Network: cannonNetwork, ++ RollupConfigPath: ctx.String(CannonRollupConfigFlag.Name), ++ L2GenesisPath: ctx.String(CannonL2GenesisFlag.Name), ++ SnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), ++ InfoFreq: ctx.Uint(CannonInfoFreqFlag.Name), ++ }, ++ CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name), ++ CannonAbsolutePreStateBaseURL: cannonPrestatesURL, ++ Datadir: ctx.String(DatadirFlag.Name), ++ Asterisc: vm.Config{ ++ VmType: config.TraceTypeAsterisc.String(), ++ L1: l1EthRpc, ++ L1Beacon: l1Beacon, ++ L2: l2Rpc, ++ VmBin: ctx.String(AsteriscBinFlag.Name), ++ Server: ctx.String(AsteriscServerFlag.Name), ++ Network: asteriscNetwork, ++ RollupConfigPath: ctx.String(AsteriscRollupConfigFlag.Name), ++ L2GenesisPath: ctx.String(AsteriscL2GenesisFlag.Name), ++ SnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), ++ InfoFreq: ctx.Uint(AsteriscInfoFreqFlag.Name), ++ }, + AsteriscAbsolutePreState: ctx.String(AsteriscPreStateFlag.Name), + AsteriscAbsolutePreStateBaseURL: asteriscPreStatesURL, +- AsteriscSnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), +- AsteriscInfoFreq: ctx.Uint(AsteriscInfoFreqFlag.Name), + TxMgrConfig: txMgrConfig, + MetricsConfig: metricsConfig, + PprofConfig: pprofConfig,
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/register.go CELO/op-challenger/game/fault/register.go +index cb23266a2b1236e5bfab45c89cf72562704c1c5f..7478dd4b23fedd092c184d8fef574c68e27b7dbf 100644 +--- OP/op-challenger/game/fault/register.go ++++ CELO/op-challenger/game/fault/register.go +@@ -254,7 +254,7 @@ asteriscPrestate, err := prestateSource.PrestatePath(requiredPrestatehash) + if err != nil { + return nil, fmt.Errorf("failed to get asterisc prestate: %w", err) + } +- accessor, err := outputs.NewOutputAsteriscTraceAccessor(logger, m, cfg, l2Client, prestateProvider, asteriscPrestate, rollupClient, dir, l1HeadID, splitDepth, prestateBlock, poststateBlock) ++ accessor, err := outputs.NewOutputAsteriscTraceAccessor(logger, m, cfg.Asterisc, l2Client, prestateProvider, asteriscPrestate, rollupClient, dir, l1HeadID, splitDepth, prestateBlock, poststateBlock) + if err != nil { + return nil, err + } +@@ -349,7 +349,7 @@ cannonPrestate, err := prestateSource.PrestatePath(requiredPrestatehash) + if err != nil { + return nil, fmt.Errorf("failed to get cannon prestate: %w", err) + } +- accessor, err := outputs.NewOutputCannonTraceAccessor(logger, m, cfg, l2Client, prestateProvider, cannonPrestate, rollupClient, dir, l1HeadID, splitDepth, prestateBlock, poststateBlock) ++ accessor, err := outputs.NewOutputCannonTraceAccessor(logger, m, cfg.Cannon, l2Client, prestateProvider, cannonPrestate, rollupClient, dir, l1HeadID, splitDepth, prestateBlock, poststateBlock) + if err != nil { + return nil, err + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+20
+
-25
+ +
+ +
+
+
diff --git OP/op-challenger/game/monitor.go CELO/op-challenger/game/monitor.go +index fe263a661692e84d2e0282b6e64485be241035ad..dbdcc26ab81c7343a271f757973b5b9348166a45 100644 +--- OP/op-challenger/game/monitor.go ++++ CELO/op-challenger/game/monitor.go +@@ -19,8 +19,6 @@ "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + ) +  +-type blockNumberFetcher func(ctx context.Context) (uint64, error) +- + // gameSource loads information about the games available to play + type gameSource interface { + GetGamesAtOrAfter(ctx context.Context, blockHash common.Hash, earliestTimestamp uint64) ([]types.GameMetadata, error) +@@ -44,18 +42,17 @@ Schedule(blockNumber uint64, games []types.GameMetadata) error + } +  + type gameMonitor struct { +- logger log.Logger +- clock RWClock +- source gameSource +- scheduler gameScheduler +- preimages preimageScheduler +- gameWindow time.Duration +- claimer claimer +- fetchBlockNumber blockNumberFetcher +- allowedGames []common.Address +- l1HeadsSub ethereum.Subscription +- l1Source *headSource +- runState sync.Mutex ++ logger log.Logger ++ clock RWClock ++ source gameSource ++ scheduler gameScheduler ++ preimages preimageScheduler ++ gameWindow time.Duration ++ claimer claimer ++ allowedGames []common.Address ++ l1HeadsSub ethereum.Subscription ++ l1Source *headSource ++ runState sync.Mutex + } +  + type MinimalSubscriber interface { +@@ -78,21 +75,19 @@ scheduler gameScheduler, + preimages preimageScheduler, + gameWindow time.Duration, + claimer claimer, +- fetchBlockNumber blockNumberFetcher, + allowedGames []common.Address, + l1Source MinimalSubscriber, + ) *gameMonitor { + return &gameMonitor{ +- logger: logger, +- clock: cl, +- scheduler: scheduler, +- preimages: preimages, +- source: source, +- gameWindow: gameWindow, +- claimer: claimer, +- fetchBlockNumber: fetchBlockNumber, +- allowedGames: allowedGames, +- l1Source: &headSource{inner: l1Source}, ++ logger: logger, ++ clock: cl, ++ scheduler: scheduler, ++ preimages: preimages, ++ source: source, ++ gameWindow: gameWindow, ++ claimer: claimer, ++ allowedGames: allowedGames, ++ l1Source: &headSource{inner: l1Source}, + } + } +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-6
+ +
+ +
+
+
diff --git OP/op-challenger/game/monitor_test.go CELO/op-challenger/game/monitor_test.go +index 7a3da241aa247b429a9968479d6368edc72e817c..ff2871ed4efb7c65648b8e355eb971118f38a529 100644 +--- OP/op-challenger/game/monitor_test.go ++++ CELO/op-challenger/game/monitor_test.go +@@ -155,11 +155,6 @@ allowedGames []common.Address, + ) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource, *stubPreimageScheduler, *mockScheduler) { + logger := testlog.Logger(t, log.LevelDebug) + source := &stubGameSource{} +- i := uint64(1) +- fetchBlockNum := func(ctx context.Context) (uint64, error) { +- i++ +- return i, nil +- } + sched := &stubScheduler{} + preimages := &stubPreimageScheduler{} + mockHeadSource := &mockNewHeadSource{} +@@ -172,7 +167,6 @@ sched, + preimages, + time.Duration(0), + stubClaimer, +- fetchBlockNum, + allowedGames, + mockHeadSource, + )
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+6
+
-1
+ +
+ +
+
+
diff --git OP/op-challenger/game/service.go CELO/op-challenger/game/service.go +index 587de9a366ab887a32ad4f48a2462741a3357334..fea6bc2aaf63c603cba4d5b13010108269842ecf 100644 +--- OP/op-challenger/game/service.go ++++ CELO/op-challenger/game/service.go +@@ -251,7 +251,7 @@ return nil + } +  + func (s *Service) initMonitor(cfg *config.Config) { +- s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, s.l1Client.BlockNumber, cfg.GameAllowlist, s.pollClient) ++ s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, cfg.GameAllowlist, s.pollClient) + } +  + func (s *Service) Start(ctx context.Context) error { +@@ -280,6 +280,11 @@ } + } + if s.monitor != nil { + s.monitor.StopMonitoring() ++ } ++ if s.claimer != nil { ++ if err := s.claimer.Close(); err != nil { ++ result = errors.Join(result, fmt.Errorf("failed to close claimer: %w", err)) ++ } + } + if s.faultGamesCloser != nil { + s.faultGamesCloser()
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+11
+
-24
+ +
+ +
+
+
diff --git OP/op-challenger/metrics/metrics.go CELO/op-challenger/metrics/metrics.go +index 9253f26cc63146d1d3d8f959c766d0900df04763..c46edcd67fccde32bbc6e36397be6a1da445d7e1 100644 +--- OP/op-challenger/metrics/metrics.go ++++ CELO/op-challenger/metrics/metrics.go +@@ -2,6 +2,7 @@ package metrics +  + import ( + "io" ++ "time" +  + "github.com/ethereum-optimism/optimism/op-service/httputil" + "github.com/ethereum-optimism/optimism/op-service/sources/caching" +@@ -37,8 +38,7 @@ + RecordGameStep() + RecordGameMove() + RecordGameL2Challenge() +- RecordCannonExecutionTime(t float64) +- RecordAsteriscExecutionTime(t float64) ++ RecordVmExecutionTime(vmType string, t time.Duration) + RecordClaimResolutionTime(t float64) + RecordGameActTime(t float64) +  +@@ -88,10 +88,9 @@ moves prometheus.Counter + steps prometheus.Counter + l2Challenges prometheus.Counter +  +- claimResolutionTime prometheus.Histogram +- gameActTime prometheus.Histogram +- cannonExecutionTime prometheus.Histogram +- asteriscExecutionTime prometheus.Histogram ++ claimResolutionTime prometheus.Histogram ++ gameActTime prometheus.Histogram ++ vmExecutionTime *prometheus.HistogramVec +  + trackedGames prometheus.GaugeVec + inflightGames prometheus.Gauge +@@ -152,14 +151,6 @@ Namespace: Namespace, + Name: "l2_challenges", + Help: "Number of L2 challenges made by the challenge agent", + }), +- cannonExecutionTime: factory.NewHistogram(prometheus.HistogramOpts{ +- Namespace: Namespace, +- Name: "cannon_execution_time", +- Help: "Time (in seconds) to execute cannon", +- Buckets: append( +- []float64{1.0, 10.0}, +- prometheus.ExponentialBuckets(30.0, 2.0, 14)...), +- }), + claimResolutionTime: factory.NewHistogram(prometheus.HistogramOpts{ + Namespace: Namespace, + Name: "claim_resolution_time", +@@ -174,14 +165,14 @@ Buckets: append( + []float64{1.0, 2.0, 5.0, 10.0}, + prometheus.ExponentialBuckets(30.0, 2.0, 14)...), + }), +- asteriscExecutionTime: factory.NewHistogram(prometheus.HistogramOpts{ ++ vmExecutionTime: factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: Namespace, +- Name: "asterisc_execution_time", +- Help: "Time (in seconds) to execute asterisc", ++ Name: "vm_execution_time", ++ Help: "Time (in seconds) to execute the fault proof VM", + Buckets: append( + []float64{1.0, 10.0}, + prometheus.ExponentialBuckets(30.0, 2.0, 14)...), +- }), ++ }, []string{"vm"}), + bondClaimFailures: factory.NewCounter(prometheus.CounterOpts{ + Namespace: Namespace, + Name: "claim_failures", +@@ -278,12 +269,8 @@ func (m *Metrics) RecordBondClaimed(amount uint64) { + m.bondsClaimed.Add(float64(amount)) + } +  +-func (m *Metrics) RecordCannonExecutionTime(t float64) { +- m.cannonExecutionTime.Observe(t) +-} +- +-func (m *Metrics) RecordAsteriscExecutionTime(t float64) { +- m.asteriscExecutionTime.Observe(t) ++func (m *Metrics) RecordVmExecutionTime(vmType string, dur time.Duration) { ++ m.vmExecutionTime.WithLabelValues(vmType).Observe(dur.Seconds()) + } +  + func (m *Metrics) RecordClaimResolutionTime(t float64) {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-4
+ +
+ +
+
+
diff --git OP/op-challenger/metrics/noop.go CELO/op-challenger/metrics/noop.go +index c238f03fcb7338370ed40efcd5c25f04d53748a3..fc0f6d077803bc8950f30963a274cb8c107608aa 100644 +--- OP/op-challenger/metrics/noop.go ++++ CELO/op-challenger/metrics/noop.go +@@ -2,6 +2,7 @@ package metrics +  + import ( + "io" ++ "time" +  + contractMetrics "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" + "github.com/ethereum/go-ethereum/common" +@@ -37,10 +38,9 @@ + func (*NoopMetricsImpl) RecordBondClaimFailed() {} + func (*NoopMetricsImpl) RecordBondClaimed(uint64) {} +  +-func (*NoopMetricsImpl) RecordCannonExecutionTime(t float64) {} +-func (*NoopMetricsImpl) RecordAsteriscExecutionTime(t float64) {} +-func (*NoopMetricsImpl) RecordClaimResolutionTime(t float64) {} +-func (*NoopMetricsImpl) RecordGameActTime(t float64) {} ++func (*NoopMetricsImpl) RecordVmExecutionTime(_ string, _ time.Duration) {} ++func (*NoopMetricsImpl) RecordClaimResolutionTime(t float64) {} ++func (*NoopMetricsImpl) RecordGameActTime(t float64) {} +  + func (*NoopMetricsImpl) RecordGamesStatus(inProgress, defenderWon, challengerWon int) {} +
+
+ + + +
+
+ +
+
+
+ + +
+ +
+
+
+ +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/op-e2e/actions/l2_batcher.go CELO/op-e2e/actions/l2_batcher.go +index 310a6cade9516d26a8e55cc3273aca494eb7368b..fd249e58fe6ab2638ba2fec7b8abda9c6c796aa7 100644 +--- OP/op-e2e/actions/l2_batcher.go ++++ CELO/op-e2e/actions/l2_batcher.go +@@ -277,7 +277,7 @@ for _, opt := range txOpts { + opt(rawTx) + } +  +- gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false) ++ gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false, nil) + require.NoError(t, err, "need to compute intrinsic gas") + rawTx.Gas = gas + txData = rawTx +@@ -468,7 +468,7 @@ GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Data: outputFrame, + } +- gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false) ++ gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false, nil) + require.NoError(t, err, "need to compute intrinsic gas") + rawTx.Gas = gas +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-2
+ +
+ +
+
+
diff --git OP/op-e2e/actions/l2_batcher_test.go CELO/op-e2e/actions/l2_batcher_test.go +index 3a137ce992af6458cc10688ec0a2b8fdfaf697b2..371dfe8a02ff0e1a0723316f025a3aebb7d186ea 100644 +--- OP/op-e2e/actions/l2_batcher_test.go ++++ CELO/op-e2e/actions/l2_batcher_test.go +@@ -17,6 +17,7 @@ + batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/finality" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" +@@ -246,7 +247,7 @@ // Now try to finalize block 4, but with a bad/malicious alternative hash. + // If we get this false signal, we shouldn't finalize the L2 chain. + altBlock4 := sequencer.SyncStatus().SafeL1 + altBlock4.Hash = common.HexToHash("0xdead") +- sequencer.finalizer.Finalize(t.Ctx(), altBlock4) ++ sequencer.synchronousEvents.Emit(finality.FinalizeL1Event{FinalizedL1: altBlock4}) + sequencer.ActL2PipelineFull(t) + require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number) + require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored") +@@ -496,7 +497,7 @@ signer := types.LatestSigner(sd.L2Cfg.Config) + data := make([]byte, 120_000) // very large L2 txs, as large as the tx-pool will accept + _, err := rng.Read(data[:]) // fill with random bytes, to make compression ineffective + require.NoError(t, err) +- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) ++ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) + require.NoError(t, err) + if gas > engine.engineApi.RemainingBlockGas() { + break
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/actions/l2_proposer_test.go CELO/op-e2e/actions/l2_proposer_test.go +index 3de913f8e50ae833e1f1bbb48e58f4b62a427632..ba1f1d3640c0a4f388671ab054c69f9c3c7c9128 100644 +--- OP/op-e2e/actions/l2_proposer_test.go ++++ CELO/op-e2e/actions/l2_proposer_test.go +@@ -96,6 +96,7 @@ // derive and see the L2 chain fully finalize + sequencer.ActL2PipelineFull(t) + sequencer.ActL1SafeSignal(t) + sequencer.ActL1FinalizedSignal(t) ++ sequencer.ActL2PipelineFull(t) + require.Equal(t, sequencer.SyncStatus().UnsafeL2, sequencer.SyncStatus().FinalizedL2) + require.True(t, proposer.CanPropose(t)) +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/op-e2e/actions/l2_sequencer.go CELO/op-e2e/actions/l2_sequencer.go +index fdbfb89b3426d3308a2456a0562f2c7a867d6be1..dd3a795e96ca26b09b645897d6e2cf61e6a62882 100644 +--- OP/op-e2e/actions/l2_sequencer.go ++++ CELO/op-e2e/actions/l2_sequencer.go +@@ -34,7 +34,7 @@ + // L2Sequencer is an actor that functions like a rollup node, + // without the full P2P/API/Node stack, but just the derivation state, and simplified driver with sequencing ability. + type L2Sequencer struct { +- L2Verifier ++ *L2Verifier +  + sequencer *driver.Sequencer +  +@@ -52,7 +52,7 @@ l1OriginSelector := &MockL1OriginSelector{ + actual: driver.NewL1OriginSelector(log, cfg, seqConfDepthL1), + } + return &L2Sequencer{ +- L2Verifier: *ver, ++ L2Verifier: ver, + sequencer: driver.NewSequencer(log, cfg, ver.engine, attrBuilder, l1OriginSelector, metrics.NoopMetrics), + mockL1OriginSelector: l1OriginSelector, + failL2GossipUnsafeBlock: nil,
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+114
+
-59
+ +
+ +
+
+
diff --git OP/op-e2e/actions/l2_verifier.go CELO/op-e2e/actions/l2_verifier.go +index a0f513cfba38672c66969d00b1bf13c60977d72b..95fd95eaf0c0b6c75f5169ec27b149e82c41adcc 100644 +--- OP/op-e2e/actions/l2_verifier.go ++++ CELO/op-e2e/actions/l2_verifier.go +@@ -3,6 +3,7 @@ + import ( + "context" + "errors" ++ "fmt" + "io" +  + "github.com/ethereum/go-ethereum/common" +@@ -22,6 +23,7 @@ "github.com/ethereum-optimism/optimism/op-node/rollup/finality" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" ++ "github.com/ethereum-optimism/optimism/op-service/safego" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testutils" + ) +@@ -36,6 +38,8 @@ engine.Engine + L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) + } +  ++ synchronousEvents *rollup.SynchronousEvents ++ + syncDeriver *driver.SyncDeriver +  + // L2 rollup +@@ -43,10 +47,9 @@ engine *engine.EngineController + derivation *derive.DerivationPipeline + clSync *clsync.CLSync +  +- attributesHandler driver.AttributesHandler +- safeHeadListener rollup.SafeHeadListener +- finalizer driver.Finalizer +- syncCfg *sync.Config ++ safeHeadListener rollup.SafeHeadListener ++ finalizer driver.Finalizer ++ syncCfg *sync.Config +  + l1 derive.L1Fetcher + l1State *driver.L1State +@@ -59,6 +62,10 @@ + rpc *rpc.Server +  + failRPC error // mock error ++ ++ // The L2Verifier actor is embedded in the L2Sequencer actor, ++ // but must not be copied for the deriver-functionality to modify the same state. ++ _ safego.NoCopy + } +  + type L2API interface { +@@ -76,47 +83,78 @@ node.SafeDBReader + } +  + func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, plasmaSrc driver.PlasmaIface, eng L2API, cfg *rollup.Config, syncCfg *sync.Config, safeHeadListener safeDB) *L2Verifier { ++ ctx, cancel := context.WithCancel(context.Background()) ++ t.Cleanup(cancel) ++ ++ rootDeriver := &rollup.SynchronousDerivers{} ++ synchronousEvents := rollup.NewSynchronousEvents(log, ctx, rootDeriver) ++ + metrics := &testutils.TestDerivationMetrics{} +- engine := engine.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode) ++ ec := engine.NewEngineController(eng, log, metrics, cfg, syncCfg.SyncMode, synchronousEvents) ++ engineResetDeriver := engine.NewEngineResetDeriver(ctx, log, cfg, l1, eng, syncCfg, synchronousEvents) +  +- clSync := clsync.NewCLSync(log, cfg, metrics, engine) ++ clSync := clsync.NewCLSync(log, cfg, metrics, synchronousEvents) +  + var finalizer driver.Finalizer + if cfg.PlasmaEnabled() { +- finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasmaSrc) ++ finalizer = finality.NewPlasmaFinalizer(ctx, log, cfg, l1, synchronousEvents, plasmaSrc) + } else { +- finalizer = finality.NewFinalizer(log, cfg, l1, engine) ++ finalizer = finality.NewFinalizer(ctx, log, cfg, l1, synchronousEvents) + } +  +- attributesHandler := attributes.NewAttributesHandler(log, cfg, engine, eng) ++ attributesHandler := attributes.NewAttributesHandler(log, cfg, ctx, eng, synchronousEvents) +  + pipeline := derive.NewDerivationPipeline(log, cfg, l1, blobsSrc, plasmaSrc, eng, metrics) ++ pipelineDeriver := derive.NewPipelineDeriver(ctx, pipeline, synchronousEvents) ++ ++ syncDeriver := &driver.SyncDeriver{ ++ Derivation: pipeline, ++ Finalizer: finalizer, ++ SafeHeadNotifs: safeHeadListener, ++ CLSync: clSync, ++ Engine: ec, ++ SyncCfg: syncCfg, ++ Config: cfg, ++ L1: l1, ++ L2: eng, ++ Emitter: synchronousEvents, ++ Log: log, ++ Ctx: ctx, ++ Drain: synchronousEvents.Drain, ++ } ++ ++ engDeriv := engine.NewEngDeriver(log, ctx, cfg, ec, synchronousEvents) +  + rollupNode := &L2Verifier{ + log: log, + eng: eng, +- engine: engine, ++ engine: ec, + clSync: clSync, + derivation: pipeline, + finalizer: finalizer, +- attributesHandler: attributesHandler, + safeHeadListener: safeHeadListener, + syncCfg: syncCfg, +- syncDeriver: &driver.SyncDeriver{ +- Derivation: pipeline, +- Finalizer: finalizer, +- AttributesHandler: attributesHandler, +- SafeHeadNotifs: safeHeadListener, +- CLSync: clSync, +- Engine: engine, +- }, +- l1: l1, +- l1State: driver.NewL1State(log, metrics), +- l2PipelineIdle: true, +- l2Building: false, +- rollupCfg: cfg, +- rpc: rpc.NewServer(), ++ syncDeriver: syncDeriver, ++ l1: l1, ++ l1State: driver.NewL1State(log, metrics), ++ l2PipelineIdle: true, ++ l2Building: false, ++ rollupCfg: cfg, ++ rpc: rpc.NewServer(), ++ synchronousEvents: synchronousEvents, ++ } ++ ++ *rootDeriver = rollup.SynchronousDerivers{ ++ syncDeriver, ++ engineResetDeriver, ++ engDeriv, ++ rollupNode, ++ clSync, ++ pipelineDeriver, ++ attributesHandler, ++ finalizer, + } ++ + t.Cleanup(rollupNode.rpc.Stop) +  + // setup RPC server for rollup node, hooked to the actor as backend +@@ -169,6 +207,10 @@ } +  + func (s *l2VerifierBackend) SequencerActive(ctx context.Context) (bool, error) { + return false, nil ++} ++ ++func (s *l2VerifierBackend) OverrideLeader(ctx context.Context) error { ++ return nil + } +  + func (s *l2VerifierBackend) OnUnsafeL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) error { +@@ -250,63 +292,76 @@ func (s *L2Verifier) ActL1FinalizedSignal(t Testing) { + finalized, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Finalized) + require.NoError(t, err) + s.l1State.HandleNewL1FinalizedBlock(finalized) +- s.finalizer.Finalize(t.Ctx(), finalized) ++ s.synchronousEvents.Emit(finality.FinalizeL1Event{FinalizedL1: finalized}) ++} ++ ++func (s *L2Verifier) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case rollup.L1TemporaryErrorEvent: ++ s.log.Warn("L1 temporary error", "err", x.Err) ++ case rollup.EngineTemporaryErrorEvent: ++ s.log.Warn("Engine temporary error", "err", x.Err) ++ if errors.Is(x.Err, sync.WrongChainErr) { // action-tests don't back off on temporary errors. Avoid a bad genesis setup from looping. ++ panic(fmt.Errorf("genesis setup issue: %w", x.Err)) ++ } ++ case rollup.ResetEvent: ++ s.log.Warn("Derivation pipeline is being reset", "err", x.Err) ++ case rollup.CriticalErrorEvent: ++ panic(fmt.Errorf("derivation failed critically: %w", x.Err)) ++ case derive.DeriverIdleEvent: ++ s.l2PipelineIdle = true ++ } + } +  +-// syncStep represents the Driver.syncStep +-func (s *L2Verifier) syncStep(ctx context.Context) error { +- return s.syncDeriver.SyncStep(ctx) ++func (s *L2Verifier) ActL2EventsUntilPending(t Testing, num uint64) { ++ s.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ x, ok := ev.(engine.PendingSafeUpdateEvent) ++ return ok && x.PendingSafe.Number == num ++ }, 1000, false) + } +  +-// ActL2PipelineStep runs one iteration of the L2 derivation pipeline +-func (s *L2Verifier) ActL2PipelineStep(t Testing) { ++func (s *L2Verifier) ActL2EventsUntil(t Testing, fn func(ev rollup.Event) bool, max int, excl bool) { ++ t.Helper() + if s.l2Building { + t.InvalidAction("cannot derive new data while building L2 block") + return + } +- +- err := s.syncStep(t.Ctx()) +- if err == io.EOF || (err != nil && errors.Is(err, derive.EngineELSyncing)) { +- s.l2PipelineIdle = true +- return +- } else if err != nil && errors.Is(err, derive.NotEnoughData) { +- return +- } else if err != nil && errors.Is(err, derive.ErrReset) { +- s.log.Warn("Derivation pipeline is reset", "err", err) +- s.derivation.Reset() +- if err := engine.ResetEngine(t.Ctx(), s.log, s.rollupCfg, s.engine, s.l1, s.eng, s.syncCfg, s.safeHeadListener); err != nil { +- s.log.Error("Derivation pipeline not ready, failed to reset engine", "err", err) +- // Derivation-pipeline will return a new ResetError until we confirm the engine has been successfully reset. ++ for i := 0; i < max; i++ { ++ err := s.synchronousEvents.DrainUntil(fn, excl) ++ if err == nil { + return + } +- s.derivation.ConfirmEngineReset() +- return +- } else if err != nil && errors.Is(err, derive.ErrTemporary) { +- s.log.Warn("Derivation process temporary error", "err", err) +- if errors.Is(err, sync.WrongChainErr) { // action-tests don't back off on temporary errors. Avoid a bad genesis setup from looping. +- t.Fatalf("genesis setup issue: %v", err) ++ if err == io.EOF { ++ s.synchronousEvents.Emit(driver.StepEvent{}) + } +- return +- } else if err != nil && errors.Is(err, derive.ErrCritical) { +- t.Fatalf("derivation failed critically: %v", err) +- } else if err != nil { +- t.Fatalf("derivation failed: %v", err) +- } else { +- return + } ++ t.Fatalf("event condition did not hit, ran maximum number of steps: %d", max) + } +  + func (s *L2Verifier) ActL2PipelineFull(t Testing) { + s.l2PipelineIdle = false ++ i := 0 + for !s.l2PipelineIdle { +- s.ActL2PipelineStep(t) ++ i += 1 ++ // Some tests do generate a lot of derivation steps ++ // (e.g. thousand blocks span-batch, or deep reorgs). ++ // Hence we set the sanity limit to something really high. ++ if i > 10_000 { ++ t.Fatalf("ActL2PipelineFull running for too long. Is a deriver looping?") ++ } ++ if s.l2Building { ++ t.InvalidAction("cannot derive new data while building L2 block") ++ return ++ } ++ s.syncDeriver.Emitter.Emit(driver.StepEvent{}) ++ require.NoError(t, s.syncDeriver.Drain(), "complete all event processing triggered by deriver step") + } + } +  + // ActL2UnsafeGossipReceive creates an action that can receive an unsafe execution payload, like gossipsub + func (s *L2Verifier) ActL2UnsafeGossipReceive(payload *eth.ExecutionPayloadEnvelope) Action { + return func(t Testing) { +- s.clSync.AddUnsafePayload(payload) ++ s.synchronousEvents.Emit(clsync.ReceivedUnsafePayloadEvent{Envelope: payload}) + } + } +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+17
+
-9
+ +
+ +
+
+
diff --git OP/op-e2e/actions/plasma_test.go CELO/op-e2e/actions/plasma_test.go +index 4307cab1867790760097ea91ba746331c8043b95..d1a5606d24fb82d1d1db6e540a79e42b6039c8a1 100644 +--- OP/op-e2e/actions/plasma_test.go ++++ CELO/op-e2e/actions/plasma_test.go +@@ -5,21 +5,24 @@ "math/big" + "math/rand" + "testing" +  ++ "github.com/stretchr/testify/require" ++ ++ "github.com/ethereum/go-ethereum/accounts/abi/bind" ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/core/types" ++ "github.com/ethereum/go-ethereum/log" ++ + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-node/node/safedb" ++ "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + plasma "github.com/ethereum-optimism/optimism/op-plasma" + "github.com/ethereum-optimism/optimism/op-plasma/bindings" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testlog" +- "github.com/ethereum/go-ethereum/accounts/abi/bind" +- "github.com/ethereum/go-ethereum/common" +- "github.com/ethereum/go-ethereum/core/types" +- "github.com/ethereum/go-ethereum/log" +- "github.com/stretchr/testify/require" + ) +  +-// Devnet allocs should have plasma mode enabled for these tests to pass ++// Devnet allocs should have alt-da mode enabled for these tests to pass +  + // L2PlasmaDA is a test harness for manipulating plasma DA state. + type L2PlasmaDA struct { +@@ -497,9 +500,13 @@ require.NoError(t, err) +  + // advance the pipeline until it errors out as it is still stuck + // on deriving the first commitment +- for i := 0; i < 3; i++ { +- a.sequencer.ActL2PipelineStep(t) +- } ++ a.sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ x, ok := ev.(rollup.EngineTemporaryErrorEvent) ++ if ok { ++ require.ErrorContains(t, x.Err, "failed to fetch input data") ++ } ++ return ok ++ }, 100, false) +  + // keep track of the second commitment + comm2 := a.lastComm +@@ -618,6 +625,7 @@ + // advance derivation and finalize plasma via the L1 signal + a.sequencer.ActL2PipelineFull(t) + a.ActL1Finalized(t) ++ a.sequencer.ActL2PipelineFull(t) // finality event needs to be processed +  + // given 12s l1 time and 1s l2 time, l2 should be 12 * 3 = 36 blocks finalized + require.Equal(t, uint64(36), a.sequencer.SyncStatus().FinalizedL2.Number)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-e2e/actions/reorg_test.go CELO/op-e2e/actions/reorg_test.go +index 1b4edbaf9cfeef648cf75533260b1159cb2dcd70..27cce2aa1086e86cb18cb56236d521398154272c 100644 +--- OP/op-e2e/actions/reorg_test.go ++++ CELO/op-e2e/actions/reorg_test.go +@@ -767,7 +767,7 @@ + // give the unsafe block to the verifier, and see if it reorgs because of any unsafe inputs + head, err := altSeqEngCl.PayloadByLabel(t.Ctx(), eth.Unsafe) + require.NoError(t, err) +- verifier.ActL2UnsafeGossipReceive(head) ++ verifier.ActL2UnsafeGossipReceive(head)(t) +  + // make sure verifier has processed everything + verifier.ActL2PipelineFull(t)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/op-e2e/actions/span_batch_test.go CELO/op-e2e/actions/span_batch_test.go +index 6ccb76a461bd6d9eab10aaf4ac2a3187f8239dbb..39dd3f817a7aef9febeb32da30d217f7eb898a34 100644 +--- OP/op-e2e/actions/span_batch_test.go ++++ CELO/op-e2e/actions/span_batch_test.go +@@ -524,7 +524,7 @@ signer := types.LatestSigner(sd.L2Cfg.Config) + data := make([]byte, rand.Intn(100)) + _, err := crand.Read(data[:]) // fill with random bytes + require.NoError(t, err) +- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) ++ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) + require.NoError(t, err) + baseFee := seqEngine.l2Chain.CurrentBlock().BaseFee + nonce, err := cl.PendingNonceAt(t.Ctx(), addrs[userIdx]) +@@ -663,7 +663,7 @@ signer := types.LatestSigner(sdDeltaActivated.L2Cfg.Config) + data := make([]byte, rand.Intn(100)) + _, err := crand.Read(data[:]) // fill with random bytes + require.NoError(t, err) +- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) ++ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) + require.NoError(t, err) + baseFee := seqEngine.l2Chain.CurrentBlock().BaseFee + nonce, err := seqEngCl.PendingNonceAt(t.Ctx(), addrs[userIdx])
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+47
+
-38
+ +
+ +
+
+
diff --git OP/op-e2e/actions/sync_test.go CELO/op-e2e/actions/sync_test.go +index e7521bdd8c94b0b800ca469b661074d03385f112..3cd110cf59d9ad9a25ffce84742c6220a7993210 100644 +--- OP/op-e2e/actions/sync_test.go ++++ CELO/op-e2e/actions/sync_test.go +@@ -7,13 +7,7 @@ "math/rand" + "testing" + "time" +  +- "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" +- "github.com/ethereum-optimism/optimism/op-node/rollup/derive" +- "github.com/ethereum-optimism/optimism/op-node/rollup/sync" +- "github.com/ethereum-optimism/optimism/op-service/eth" +- "github.com/ethereum-optimism/optimism/op-service/sources" +- "github.com/ethereum-optimism/optimism/op-service/testlog" +- "github.com/ethereum-optimism/optimism/op-service/testutils" ++ "github.com/stretchr/testify/require" +  + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/beacon/engine" +@@ -22,7 +16,16 @@ "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +- "github.com/stretchr/testify/require" ++ ++ "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" ++ "github.com/ethereum-optimism/optimism/op-node/rollup" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ engine2 "github.com/ethereum-optimism/optimism/op-node/rollup/engine" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/sync" ++ "github.com/ethereum-optimism/optimism/op-service/eth" ++ "github.com/ethereum-optimism/optimism/op-service/sources" ++ "github.com/ethereum-optimism/optimism/op-service/testlog" ++ "github.com/ethereum-optimism/optimism/op-service/testutils" + ) +  + func newSpanChannelOut(t StatefulTesting, e e2eutils.SetupData) derive.ChannelOut { +@@ -262,10 +265,8 @@ // before stepping, make sure backupUnsafe is empty + require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe()) + // pendingSafe must not be advanced as well + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0)) +- // Preheat engine queue and consume A1 from batch +- for i := 0; i < 4; i++ { +- sequencer.ActL2PipelineStep(t) +- } ++ // Run until we consume A1 from batch ++ sequencer.ActL2EventsUntilPending(t, 1) + // A1 is valid original block so pendingSafe is advanced + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(1)) + require.Equal(t, sequencer.L2Unsafe().Number, uint64(5)) +@@ -273,8 +274,8 @@ // backupUnsafe is still empty + require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe()) +  + // Process B2 +- sequencer.ActL2PipelineStep(t) +- sequencer.ActL2PipelineStep(t) ++ // Run until we consume B2 from batch ++ sequencer.ActL2EventsUntilPending(t, 2) + // B2 is valid different block, triggering unsafe chain reorg + require.Equal(t, sequencer.L2Unsafe().Number, uint64(2)) + // B2 is valid different block, triggering unsafe block backup +@@ -425,10 +426,8 @@ // before stepping, make sure backupUnsafe is empty + require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe()) + // pendingSafe must not be advanced as well + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0)) +- // Preheat engine queue and consume A1 from batch +- for i := 0; i < 4; i++ { +- sequencer.ActL2PipelineStep(t) +- } ++ // Run till we consumed A1 from batch ++ sequencer.ActL2EventsUntilPending(t, 1) + // A1 is valid original block so pendingSafe is advanced + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(1)) + require.Equal(t, sequencer.L2Unsafe().Number, uint64(5)) +@@ -436,8 +435,7 @@ // backupUnsafe is still empty + require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe()) +  + // Process B2 +- sequencer.ActL2PipelineStep(t) +- sequencer.ActL2PipelineStep(t) ++ sequencer.ActL2EventsUntilPending(t, 2) + // B2 is valid different block, triggering unsafe chain reorg + require.Equal(t, sequencer.L2Unsafe().Number, uint64(2)) + // B2 is valid different block, triggering unsafe block backup +@@ -447,14 +445,14 @@ require.Equal(t, sequencer.L2PendingSafe().Number, uint64(2)) +  + // B3 is invalid block + // NextAttributes is called +- sequencer.ActL2PipelineStep(t) +- // forceNextSafeAttributes is called +- sequencer.ActL2PipelineStep(t) ++ sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ _, ok := ev.(engine2.ProcessAttributesEvent) ++ return ok ++ }, 100, true) + // mock forkChoiceUpdate error while restoring previous unsafe chain using backupUnsafe. + seqEng.ActL2RPCFail(t, eth.InputError{Inner: errors.New("mock L2 RPC error"), Code: eth.InvalidForkchoiceState}) +  +- // TryBackupUnsafeReorg is called +- sequencer.ActL2PipelineStep(t) ++ // The backup-unsafe rewind is applied +  + // try to process invalid leftovers: B4, B5 + sequencer.ActL2PipelineFull(t) +@@ -565,9 +563,7 @@ require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe()) + // pendingSafe must not be advanced as well + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(0)) + // Preheat engine queue and consume A1 from batch +- for i := 0; i < 4; i++ { +- sequencer.ActL2PipelineStep(t) +- } ++ sequencer.ActL2EventsUntilPending(t, 1) + // A1 is valid original block so pendingSafe is advanced + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(1)) + require.Equal(t, sequencer.L2Unsafe().Number, uint64(5)) +@@ -575,8 +571,7 @@ // backupUnsafe is still empty + require.Equal(t, eth.L2BlockRef{}, sequencer.L2BackupUnsafe()) +  + // Process B2 +- sequencer.ActL2PipelineStep(t) +- sequencer.ActL2PipelineStep(t) ++ sequencer.ActL2EventsUntilPending(t, 2) + // B2 is valid different block, triggering unsafe chain reorg + require.Equal(t, sequencer.L2Unsafe().Number, uint64(2)) + // B2 is valid different block, triggering unsafe block backup +@@ -585,17 +580,21 @@ // B2 is valid different block, so pendingSafe is advanced + require.Equal(t, sequencer.L2PendingSafe().Number, uint64(2)) +  + // B3 is invalid block +- // NextAttributes is called +- sequencer.ActL2PipelineStep(t) +- // forceNextSafeAttributes is called +- sequencer.ActL2PipelineStep(t) ++ // wait till attributes processing (excl.) before mocking errors ++ sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ _, ok := ev.(engine2.ProcessAttributesEvent) ++ return ok ++ }, 100, true) +  + serverErrCnt := 2 + for i := 0; i < serverErrCnt; i++ { + // mock forkChoiceUpdate failure while restoring previous unsafe chain using backupUnsafe. + seqEng.ActL2RPCFail(t, engine.GenericServerError) + // TryBackupUnsafeReorg is called - forkChoiceUpdate returns GenericServerError so retry +- sequencer.ActL2PipelineStep(t) ++ sequencer.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ _, ok := ev.(rollup.EngineTemporaryErrorEvent) ++ return ok ++ }, 100, false) + // backupUnsafeHead not emptied yet + require.Equal(t, targetUnsafeHeadHash, sequencer.L2BackupUnsafe().Hash) + } +@@ -896,7 +895,7 @@ // Create valid TX + aliceNonce, err := seqEng.EthClient().PendingNonceAt(t.Ctx(), dp.Addresses.Alice) + require.NoError(t, err) + data := make([]byte, rand.Intn(100)) +- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) ++ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) + require.NoError(t, err) + baseFee := seqEng.l2Chain.CurrentBlock().BaseFee + tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ +@@ -980,7 +979,12 @@ // Start verifier safe sync + verifier.ActL1HeadSignal(t) + verifier.l2PipelineIdle = false + for !verifier.l2PipelineIdle { +- verifier.ActL2PipelineStep(t) ++ // wait for next pending block ++ verifier.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ _, pending := ev.(engine2.PendingSafeUpdateEvent) ++ _, idle := ev.(derive.DeriverIdleEvent) ++ return pending || idle ++ }, 1000, false) + if verifier.L2PendingSafe().Number < targetHeadNumber { + // If the span batch is not fully processed, the safe head must not advance. + require.Equal(t, verifier.L2Safe().Number, uint64(0)) +@@ -1027,7 +1031,12 @@ // Start verifier safe sync + verifier.ActL1HeadSignal(t) + verifier.l2PipelineIdle = false + for !verifier.l2PipelineIdle { +- verifier.ActL2PipelineStep(t) ++ // wait for next pending block ++ verifier.ActL2EventsUntil(t, func(ev rollup.Event) bool { ++ _, pending := ev.(engine2.PendingSafeUpdateEvent) ++ _, idle := ev.(derive.DeriverIdleEvent) ++ return pending || idle ++ }, 1000, false) + if verifier.L2PendingSafe().Number < targetHeadNumber { + // If the span batch is not fully processed, the safe head must not advance. + require.Equal(t, verifier.L2Safe().Number, uint64(0))
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-e2e/brotli_batcher_test.go CELO/op-e2e/brotli_batcher_test.go +index 97211c471ba05df73b58aa7ea504b30dd3e11b0b..1ebb27efb5ad775b24cf3f566827e6e158df1121 100644 +--- OP/op-e2e/brotli_batcher_test.go ++++ CELO/op-e2e/brotli_batcher_test.go +@@ -85,7 +85,7 @@ receipt := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) { + opts.Value = big.NewInt(1_000_000_000) + opts.Nonce = 1 // Already have deposit + opts.ToAddr = &common.Address{0xff, 0xff} +- opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) ++ opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false, nil) + require.NoError(t, err) + opts.VerifyOnClients(l2Verif) + })
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+56
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/celo/run_all_tests.sh CELO/op-e2e/celo/run_all_tests.sh +new file mode 100755 +index 0000000000000000000000000000000000000000..272dea975368579a77069d98e2f9fa97261eaf72 +--- /dev/null ++++ CELO/op-e2e/celo/run_all_tests.sh +@@ -0,0 +1,56 @@ ++#!/bin/bash ++set -eo pipefail ++ ++SCRIPT_DIR=$(readlink -f "$(dirname "$0")") ++TEST_GLOB=$1 ++cd "$SCRIPT_DIR" || exit 1 ++source "$SCRIPT_DIR/shared.sh" ++ ++## Start geth ++cd "$SCRIPT_DIR/../.." || exit 1 ++trap 'cd "$SCRIPT_DIR/../.." && make devnet-down' EXIT # kill bg job at exit ++make devnet-up ++ ++# Wait for geth to be ready ++for _ in {1..10} ++do ++ if cast block &> /dev/null ++ then ++ break ++ fi ++ sleep 0.2 ++done ++ ++## Run tests ++echo Geth ready, start tests ++failures=0 ++tests=0 ++cd "$SCRIPT_DIR" || exit 1 ++for f in test_*"$TEST_GLOB"* ++do ++ echo -e "\nRun $f" ++ if "./$f" ++ then ++ tput setaf 2 || true ++ echo "PASS $f" ++ else ++ tput setaf 1 || true ++ echo "FAIL $f ❌" ++ ((failures++)) || true ++ fi ++ tput sgr0 || true ++ ((tests++)) || true ++done ++ ++## Final summary ++echo ++if [[ $failures -eq 0 ]] ++then ++ tput setaf 2 || true ++ echo All tests succeeded! ++else ++ tput setaf 1 || true ++ echo $failures/$tests failed. ++fi ++tput sgr0 || true ++exit $failures
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+11
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/celo/shared.sh CELO/op-e2e/celo/shared.sh +new file mode 100644 +index 0000000000000000000000000000000000000000..5c09d3c03a4dfb41f197c8f369258963bd4d3519 +--- /dev/null ++++ CELO/op-e2e/celo/shared.sh +@@ -0,0 +1,11 @@ ++#!/bin/bash ++#shellcheck disable=SC2034 # unused vars make sense in a shared file ++ ++export ETH_RPC_URL=http://127.0.0.1:9545 ++export ETH_RPC_URL_L1=http://127.0.0.1:8545 ++ ++export ACC_PRIVKEY=ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 ++export ACC_ADDR=$(cast wallet address $ACC_PRIVKEY) ++export REGISTRY_ADDR=0x000000000000000000000000000000000000ce10 ++export TOKEN_ADDR=0x471ece3750da237f93b8e339c536989b8978a438 ++export FEE_CURRENCY_DIRECTORY_ADDR=0x71FFbD48E34bdD5a87c3c683E866dc63b8B2a685
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+12
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/celo/test_token_duality.sh CELO/op-e2e/celo/test_token_duality.sh +new file mode 100755 +index 0000000000000000000000000000000000000000..355afef8c7ca71f39454d1e452c60d2046b6ebf8 +--- /dev/null ++++ CELO/op-e2e/celo/test_token_duality.sh +@@ -0,0 +1,12 @@ ++#!/bin/bash ++#shellcheck disable=SC2086 ++set -eo pipefail ++ ++source shared.sh ++ ++# Send token and check balance ++balance_before=$(cast balance 0x000000000000000000000000000000000000dEaD) ++cast send --private-key $ACC_PRIVKEY $TOKEN_ADDR 'transfer(address to, uint256 value) returns (bool)' 0x000000000000000000000000000000000000dEaD 100 ++balance_after=$(cast balance 0x000000000000000000000000000000000000dEaD) ++echo "Balance change: $balance_before -> $balance_after" ++[[ $((balance_before + 100)) -eq $balance_after ]] || (echo "Balance did not change as expected"; exit 1)
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+42
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/celo/test_weth_bridge.sh CELO/op-e2e/celo/test_weth_bridge.sh +new file mode 100755 +index 0000000000000000000000000000000000000000..a25195d416b313be687deef5033a6d019b9d0dee +--- /dev/null ++++ CELO/op-e2e/celo/test_weth_bridge.sh +@@ -0,0 +1,42 @@ ++#!/bin/bash ++#shellcheck disable=SC2086 ++set -eo pipefail ++set -x ++ ++source shared.sh ++SCRIPT_DIR=$(readlink -f "$(dirname "$0")") ++CONTRACTS_DIR=$SCRIPT_DIR/../../packages/contracts-bedrock ++ ++# Deploy WETH ++L1_WETH=$( ++ ETH_RPC_URL=$ETH_RPC_URL_L1 forge create --private-key=$ACC_PRIVKEY --root $CONTRACTS_DIR $CONTRACTS_DIR/src/dispute/weth/WETH98.sol:WETH98 --json | jq .deployedTo -r ++) ++ ++# create ERC20 token on L2 ++L2_TOKEN=$( ++ cast send --private-key $ACC_PRIVKEY 0x4200000000000000000000000000000000000012 "createOptimismMintableERC20(address,string,string)" $L1_WETH "Wrapped Ether" "WETH" --json \ ++ | jq -r '.logs[0].topics[2]' | cast parse-bytes32-address ++) ++ ++# Wrap some ETH ++ETH_RPC_URL=$ETH_RPC_URL_L1 cast send --private-key $ACC_PRIVKEY $L1_WETH --value 1ether ++# Approve transfer to bridge ++L1_BRIDGE_ADDR=$(cast call 0x4200000000000000000000000000000000000010 'otherBridge() returns (address)') ++ETH_RPC_URL=$ETH_RPC_URL_L1 cast send --private-key $ACC_PRIVKEY $L1_WETH 'approve(address, uint256) returns (bool)' $L1_BRIDGE_ADDR 1ether ++# Bridge to L2 ++ETH_RPC_URL=$ETH_RPC_URL_L1 cast send --private-key $ACC_PRIVKEY $L1_BRIDGE_ADDR 'bridgeERC20(address _localToken, address _remoteToken, uint256 _amount, uint32 _minGasLimit, bytes calldata _extraData)' $L1_WETH $L2_TOKEN 0.3ether 50000 0x --gas-limit 6000000 ++ ++# Setup up oracle and FeeCurrencyDirectory ++ORACLE=$(forge create --private-key=$ACC_PRIVKEY --root $CONTRACTS_DIR $CONTRACTS_DIR/src/celo/testing/MockSortedOracles.sol:MockSortedOracles --json | jq .deployedTo -r) ++cast send --private-key $ACC_PRIVKEY $ORACLE 'setMedianRate(address, uint256)' $L2_TOKEN 100000000000000000 ++cast send --private-key $ACC_PRIVKEY $FEE_CURRENCY_DIRECTORY_ADDR 'setCurrencyConfig(address, address, uint256)' $L2_TOKEN $ORACLE 60000 ++ ++# Check balance from bridging (we intentionally don't do this right after bridging, since it takes a bit) ++L2_BALANCE=$(cast call $L2_TOKEN 'balanceOf(address) returns (uint256)' $ACC_ADDR) ++echo L2 balance: $L2_BALANCE ++[[ $(echo $L2_BALANCE | awk '{print $1}') -gt 0 ]] || (echo "Bridging to L2 failed!"; exit 1) ++ ++# Send fee currency tx! ++#TXHASH=$(~/op-geth/e2e_test/js-tests/send_tx.mjs 901 $ACC_PRIVKEY $L2_TOKEN) ++#cast receipt $TXHASH ++echo You can use privkey $ACC_PRIVKEY to pay for txs with $L2_TOKEN, now.
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+7
+
-6
+ +
+ +
+
+
diff --git OP/op-e2e/config/init.go CELO/op-e2e/config/init.go +index d53fd0697124144db2ca775e64958acf7044f49d..c5a38ad5a6af46394a0793f4ec958750bb7fa14f 100644 +--- OP/op-e2e/config/init.go ++++ CELO/op-e2e/config/init.go +@@ -15,6 +15,7 @@ + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" +  ++ "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-e2e/external" + op_service "github.com/ethereum-optimism/optimism/op-service" +@@ -39,12 +40,12 @@ // foundry deploy script. These are globally exported to be used + // in end to end tests. +  + // L1Allocs represents the L1 genesis block state. +- L1Allocs *genesis.ForgeAllocs ++ L1Allocs *foundry.ForgeAllocs + // L1Deployments maps contract names to accounts in the L1 + // genesis block state. + L1Deployments *genesis.L1Deployments + // l2Allocs represents the L2 allocs, by hardfork/mode (e.g. delta, ecotone, interop, other) +- l2Allocs map[genesis.L2AllocsMode]*genesis.ForgeAllocs ++ l2Allocs map[genesis.L2AllocsMode]*foundry.ForgeAllocs + // DeployConfig represents the deploy config used by the system. + DeployConfig *genesis.DeployConfig + // ExternalL2Shim is the shim to use if external ethereum client testing is +@@ -107,14 +108,14 @@ if err := allExist(l1AllocsPath, l1DeploymentsPath, deployConfigPath); err != nil { + return + } +  +- L1Allocs, err = genesis.LoadForgeAllocs(l1AllocsPath) ++ L1Allocs, err = foundry.LoadForgeAllocs(l1AllocsPath) + if err != nil { + panic(err) + } +- l2Allocs = make(map[genesis.L2AllocsMode]*genesis.ForgeAllocs) ++ l2Allocs = make(map[genesis.L2AllocsMode]*foundry.ForgeAllocs) + mustL2Allocs := func(mode genesis.L2AllocsMode) { + name := "allocs-l2-" + string(mode) +- allocs, err := genesis.LoadForgeAllocs(filepath.Join(l2AllocsDir, name+".json")) ++ allocs, err := foundry.LoadForgeAllocs(filepath.Join(l2AllocsDir, name+".json")) + if err != nil { + panic(err) + } +@@ -153,7 +154,7 @@ } + } + } +  +-func L2Allocs(mode genesis.L2AllocsMode) *genesis.ForgeAllocs { ++func L2Allocs(mode genesis.L2AllocsMode) *foundry.ForgeAllocs { + allocs, ok := l2Allocs[mode] + if !ok { + panic(fmt.Errorf("unknown L2 allocs mode: %q", mode))
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+9
+
-9
+ +
+ +
+
+
diff --git OP/op-e2e/e2eutils/challenger/helper.go CELO/op-e2e/e2eutils/challenger/helper.go +index 95b963666d633f12565399be771ff5197381d6c0..5431efbd3992b375fa65a62bb29af01cd9b54448 100644 +--- OP/op-e2e/e2eutils/challenger/helper.go ++++ CELO/op-e2e/e2eutils/challenger/helper.go +@@ -102,22 +102,22 @@ + func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) { + require := require.New(t) + root := FindMonorepoRoot(t) +- c.CannonBin = root + "cannon/bin/cannon" +- c.CannonServer = root + "op-program/bin/op-program" ++ c.Cannon.VmBin = root + "cannon/bin/cannon" ++ c.Cannon.Server = root + "op-program/bin/op-program" + c.CannonAbsolutePreState = root + "op-program/bin/prestate.json" +- c.CannonSnapshotFreq = 10_000_000 ++ c.Cannon.SnapshotFreq = 10_000_000 +  + genesisBytes, err := json.Marshal(l2Genesis) + require.NoError(err, "marshall l2 genesis config") + genesisFile := filepath.Join(c.Datadir, "l2-genesis.json") + require.NoError(os.WriteFile(genesisFile, genesisBytes, 0o644)) +- c.CannonL2GenesisPath = genesisFile ++ c.Cannon.L2GenesisPath = genesisFile +  + rollupBytes, err := json.Marshal(rollupCfg) + require.NoError(err, "marshall rollup config") + rollupFile := filepath.Join(c.Datadir, "rollup.json") + require.NoError(os.WriteFile(rollupFile, rollupBytes, 0o644)) +- c.CannonRollupConfigPath = rollupFile ++ c.Cannon.RollupConfigPath = rollupFile + } +  + func WithCannon(t *testing.T, rollupCfg *rollup.Config, l2Genesis *core.Genesis) Option { +@@ -177,12 +177,12 @@ } + require.NotEmpty(t, cfg.TxMgrConfig.PrivateKey, "Missing private key for TxMgrConfig") + require.NoError(t, cfg.Check(), "op-challenger config should be valid") +  +- if cfg.CannonBin != "" { +- _, err := os.Stat(cfg.CannonBin) ++ if cfg.Cannon.VmBin != "" { ++ _, err := os.Stat(cfg.Cannon.VmBin) + require.NoError(t, err, "cannon should be built. Make sure you've run make cannon-prestate") + } +- if cfg.CannonServer != "" { +- _, err := os.Stat(cfg.CannonServer) ++ if cfg.Cannon.Server != "" { ++ _, err := os.Stat(cfg.Cannon.Server) + require.NoError(t, err, "op-program should be built. Make sure you've run make cannon-prestate") + } + if cfg.CannonAbsolutePreState != "" {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+5
+
-6
+ +
+ +
+
+
diff --git OP/op-e2e/e2eutils/disputegame/helper.go CELO/op-e2e/e2eutils/disputegame/helper.go +index 6920a6737c10d58e5c6c99815fadc67de96c2b42..0ed6a6a6db035b049f91e93064e75f8b60e76d58 100644 +--- OP/op-e2e/e2eutils/disputegame/helper.go ++++ CELO/op-e2e/e2eutils/disputegame/helper.go +@@ -124,15 +124,14 @@ func (h *FactoryHelper) PreimageHelper(ctx context.Context) *preimage.Helper { + opts := &bind.CallOpts{Context: ctx} + gameAddr, err := h.Factory.GameImpls(opts, cannonGameType) + h.Require.NoError(err) +- game, err := bindings.NewFaultDisputeGameCaller(gameAddr, h.Client) ++ caller := batching.NewMultiCaller(h.Client.Client(), batching.DefaultBatchSize) ++ game, err := contracts.NewFaultDisputeGameContract(ctx, metrics.NoopContractMetrics, gameAddr, caller) + h.Require.NoError(err) +- vmAddr, err := game.Vm(opts) ++ vm, err := game.Vm(ctx) + h.Require.NoError(err) +- vm, err := bindings.NewMIPSCaller(vmAddr, h.Client) ++ oracle, err := vm.Oracle(ctx) + h.Require.NoError(err) +- oracleAddr, err := vm.Oracle(opts) +- h.Require.NoError(err) +- return preimage.NewHelper(h.T, h.Opts, h.Client, oracleAddr) ++ return preimage.NewHelper(h.T, h.PrivKey, h.Client, oracle) + } +  + func NewGameCfg(opts ...GameOpt) *GameCfg {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-e2e/e2eutils/disputegame/output_cannon_helper.go CELO/op-e2e/e2eutils/disputegame/output_cannon_helper.go +index 6b6e6ff4ff7606a20dcda640efdb84d339aa37f9..e2f72c4915d96caa88693803fae92fe4ffcbefde 100644 +--- OP/op-e2e/e2eutils/disputegame/output_cannon_helper.go ++++ CELO/op-e2e/e2eutils/disputegame/output_cannon_helper.go +@@ -62,7 +62,7 @@ rollupClient := g.System.RollupClient(l2Node) + prestateProvider := outputs.NewPrestateProvider(rollupClient, prestateBlock) + l1Head := g.GetL1Head(ctx) + accessor, err := outputs.NewOutputCannonTraceAccessor( +- logger, metrics.NoopMetrics, cfg, l2Client, prestateProvider, cfg.CannonAbsolutePreState, rollupClient, dir, l1Head, splitDepth, prestateBlock, poststateBlock) ++ logger, metrics.NoopMetrics, cfg.Cannon, l2Client, prestateProvider, cfg.CannonAbsolutePreState, rollupClient, dir, l1Head, splitDepth, prestateBlock, poststateBlock) + g.Require.NoError(err, "Failed to create output cannon trace accessor") + return NewOutputHonestHelper(g.T, g.Require, &g.OutputGameHelper, g.Game, accessor) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-9
+ +
+ +
+
+
diff --git OP/op-e2e/e2eutils/disputegame/output_game_helper.go CELO/op-e2e/e2eutils/disputegame/output_game_helper.go +index f57b92a4508a82402342ce9fbc30cc25df524be9..e7b9a35d10b5618544fa3f3e2846c1d16a4ed4ec 100644 +--- OP/op-e2e/e2eutils/disputegame/output_game_helper.go ++++ CELO/op-e2e/e2eutils/disputegame/output_game_helper.go +@@ -3,7 +3,6 @@ + import ( + "context" + "crypto/ecdsa" +- "errors" + "fmt" + "math/big" + "testing" +@@ -19,6 +18,7 @@ "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + preimage "github.com/ethereum-optimism/optimism/op-preimage" ++ "github.com/ethereum-optimism/optimism/op-service/errutil" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum/go-ethereum/accounts/abi/bind" +@@ -578,20 +578,15 @@ } + return false + } +  +-type ErrWithData interface { +- ErrorData() interface{} +-} +- + // StepFails attempts to call step and verifies that it fails with ValidStep() + func (g *OutputGameHelper) StepFails(ctx context.Context, claimIdx int64, isAttack bool, stateData []byte, proof []byte) { + g.T.Logf("Attempting step against claim %v isAttack: %v", claimIdx, isAttack) + candidate, err := g.Game.StepTx(uint64(claimIdx), isAttack, stateData, proof) + g.Require.NoError(err, "Failed to create tx candidate") + _, _, err = transactions.SendTx(ctx, g.Client, candidate, g.PrivKey, transactions.WithReceiptFail()) +- var errData ErrWithData +- ok := errors.As(err, &errData) +- g.Require.Truef(ok, "Error should provide ErrorData method: %v", err) +- g.Require.Equal("0xfb4e40dd", errData.ErrorData(), "Revert reason should be abi encoded ValidStep()") ++ err = errutil.TryAddRevertReason(err) ++ g.Require.Error(err, "Transaction should fail") ++ g.Require.Contains(err.Error(), "0xfb4e40dd", "Revert reason should be abi encoded ValidStep()") + } +  + // ResolveClaim resolves a single subgame
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+21
+
-38
+ +
+ +
+
+
diff --git OP/op-e2e/e2eutils/disputegame/preimage/preimage_helper.go CELO/op-e2e/e2eutils/disputegame/preimage/preimage_helper.go +index ad043d3e37a9ebd95f77cc43f6d0e081c6877209..e2eb49836865eaee24c354f55cae3688beaf308d 100644 +--- OP/op-e2e/e2eutils/disputegame/preimage/preimage_helper.go ++++ CELO/op-e2e/e2eutils/disputegame/preimage/preimage_helper.go +@@ -3,6 +3,7 @@ + import ( + "bytes" + "context" ++ "crypto/ecdsa" + "errors" + "io" + "math/big" +@@ -15,13 +16,12 @@ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/preimages" + "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/matrix" + "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/types" +- "github.com/ethereum-optimism/optimism/op-e2e/bindings" ++ "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" +- "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/testutils" +- "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" + ) +@@ -29,28 +29,21 @@ + const MinPreimageSize = 10000 +  + type Helper struct { +- t *testing.T +- require *require.Assertions +- client *ethclient.Client +- opts *bind.TransactOpts +- oracleBindings *bindings.PreimageOracle +- oracle *contracts.PreimageOracleContract +- uuidProvider atomic.Int64 ++ t *testing.T ++ require *require.Assertions ++ client *ethclient.Client ++ privKey *ecdsa.PrivateKey ++ oracle *contracts.PreimageOracleContract ++ uuidProvider atomic.Int64 + } +  +-func NewHelper(t *testing.T, opts *bind.TransactOpts, client *ethclient.Client, addr common.Address) *Helper { +- require := require.New(t) +- oracleBindings, err := bindings.NewPreimageOracle(addr, client) +- require.NoError(err) +- +- oracle := contracts.NewPreimageOracleContract(addr, batching.NewMultiCaller(client.Client(), batching.DefaultBatchSize)) ++func NewHelper(t *testing.T, privKey *ecdsa.PrivateKey, client *ethclient.Client, oracle *contracts.PreimageOracleContract) *Helper { + return &Helper{ +- t: t, +- require: require, +- client: client, +- opts: opts, +- oracleBindings: oracleBindings, +- oracle: oracle, ++ t: t, ++ require: require.New(t), ++ client: client, ++ privKey: privKey, ++ oracle: oracle, + } + } +  +@@ -82,14 +75,9 @@ func (h *Helper) UploadLargePreimage(ctx context.Context, dataSize int, modifiers ...InputModifier) types.LargePreimageIdent { + data := testutils.RandomData(rand.New(rand.NewSource(1234)), dataSize) + s := matrix.NewStateMatrix() + uuid := big.NewInt(h.uuidProvider.Add(1)) +- bondValue, err := h.oracleBindings.MINBONDSIZE(&bind.CallOpts{}) +- h.require.NoError(err) +- h.opts.Value = bondValue +- tx, err := h.oracleBindings.InitLPP(h.opts, uuid, 32, uint32(len(data))) +- h.require.NoError(err) +- _, err = wait.ForReceiptOK(ctx, h.client, tx.Hash()) ++ candidate, err := h.oracle.InitLargePreimage(uuid, 32, uint32(len(data))) + h.require.NoError(err) +- h.opts.Value = big.NewInt(0) ++ transactions.RequireSendTx(h.t, ctx, h.client, candidate, h.privKey) +  + startBlock := big.NewInt(0) + totalBlocks := len(data) / types.BlockSize +@@ -102,15 +90,10 @@ } + for _, modifier := range modifiers { + modifier(startBlock.Uint64(), &inputData) + } +- commitments := make([][32]byte, len(inputData.Commitments)) +- for i, commitment := range inputData.Commitments { +- commitments[i] = commitment +- } +- h.t.Logf("Uploading %v parts of preimage %v starting at block %v of about %v Finalize: %v", len(commitments), uuid.Uint64(), startBlock.Uint64(), totalBlocks, inputData.Finalize) +- tx, err := h.oracleBindings.AddLeavesLPP(h.opts, uuid, startBlock, inputData.Input, commitments, inputData.Finalize) ++ h.t.Logf("Uploading %v parts of preimage %v starting at block %v of about %v Finalize: %v", len(inputData.Commitments), uuid.Uint64(), startBlock.Uint64(), totalBlocks, inputData.Finalize) ++ tx, err := h.oracle.AddLeaves(uuid, startBlock, inputData.Input, inputData.Commitments, inputData.Finalize) + h.require.NoError(err) +- _, err = wait.ForReceiptOK(ctx, h.client, tx.Hash()) +- h.require.NoError(err) ++ transactions.RequireSendTx(h.t, ctx, h.client, tx, h.privKey) + startBlock = new(big.Int).Add(startBlock, big.NewInt(int64(len(inputData.Commitments)))) + if inputData.Finalize { + break +@@ -118,7 +101,7 @@ } + } +  + return types.LargePreimageIdent{ +- Claimant: h.opts.From, ++ Claimant: crypto.PubkeyToAddress(h.privKey.PublicKey), + UUID: uuid, + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+6
+
-13
+ +
+ +
+
+
diff --git OP/op-e2e/e2eutils/transactions/send.go CELO/op-e2e/e2eutils/transactions/send.go +index 09bc029daddc5e41845063aafb7294b1d536cdd7..d154a97515986d923995c75220502986486a5c47 100644 +--- OP/op-e2e/e2eutils/transactions/send.go ++++ CELO/op-e2e/e2eutils/transactions/send.go +@@ -3,12 +3,12 @@ + import ( + "context" + "crypto/ecdsa" +- "errors" + "fmt" + "math/big" + "testing" +  + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" ++ "github.com/ethereum-optimism/optimism/op-service/errutil" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/core/types" +@@ -20,10 +20,6 @@ ) +  + type SendTxOpt func(cfg *sendTxCfg) +  +-type ErrWithData interface { +- ErrorData() interface{} +-} +- + type sendTxCfg struct { + receiptStatus uint64 + } +@@ -44,9 +40,10 @@ cfg.receiptStatus = types.ReceiptStatusFailed + } + } +  +-func RequireSendTx(t *testing.T, ctx context.Context, client *ethclient.Client, candidate txmgr.TxCandidate, privKey *ecdsa.PrivateKey, opts ...SendTxOpt) { +- _, _, err := SendTx(ctx, client, candidate, privKey, opts...) ++func RequireSendTx(t *testing.T, ctx context.Context, client *ethclient.Client, candidate txmgr.TxCandidate, privKey *ecdsa.PrivateKey, opts ...SendTxOpt) (*types.Transaction, *types.Receipt) { ++ tx, rcpt, err := SendTx(ctx, client, candidate, privKey, opts...) + require.NoError(t, err, "Failed to send transaction") ++ return tx, rcpt + } +  + func SendTx(ctx context.Context, client *ethclient.Client, candidate txmgr.TxCandidate, privKey *ecdsa.PrivateKey, opts ...SendTxOpt) (*types.Transaction, *types.Receipt, error) { +@@ -82,11 +79,7 @@ Data: candidate.TxData, + } + gas, err := client.EstimateGas(ctx, msg) + if err != nil { +- var errWithData ErrWithData +- if errors.As(err, &errWithData) { +- return nil, nil, fmt.Errorf("failed to estimate gas. errdata: %v err: %w", errWithData.ErrorData(), err) +- } +- return nil, nil, fmt.Errorf("failed to estimate gas: %w", err) ++ return nil, nil, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) + } +  + tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(chainID), &types.DynamicFeeTx{ +@@ -101,7 +94,7 @@ Gas: gas, + }) + err = client.SendTransaction(ctx, tx) + if err != nil { +- return nil, nil, fmt.Errorf("failed to send transaction: %w", err) ++ return nil, nil, fmt.Errorf("failed to send transaction: %w", errutil.TryAddRevertReason(err)) + } + receipt, err := wait.ForReceipt(ctx, client, tx.Hash(), cfg.receiptStatus) + if err != nil {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-e2e/eip4844_test.go CELO/op-e2e/eip4844_test.go +index 5b5cc1d5308edf3d0f2fa927da4838c169128225..c6f481ab67fe7709a7ee9c17c2157134111f81bf 100644 +--- OP/op-e2e/eip4844_test.go ++++ CELO/op-e2e/eip4844_test.go +@@ -102,7 +102,7 @@ opts.Nonce = 1 // Already have deposit + opts.ToAddr = &common.Address{0xff, 0xff} + // put some random data in the tx to make it fill up 6 blobs (multi-blob case) + opts.Data = testutils.RandomData(rand.New(rand.NewSource(420)), 400) +- opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) ++ opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false, nil) + require.NoError(t, err) + opts.VerifyOnClients(l2Verif) + })
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/faultproofs/bigCodeCreateInput.data CELO/op-e2e/faultproofs/bigCodeCreateInput.data +new file mode 100644 +index 0000000000000000000000000000000000000000..44891a39e07f912df925cccb2caa0808872d80ce +--- /dev/null ++++ CELO/op-e2e/faultproofs/bigCodeCreateInput.data +@@ -0,0 +1 @@ ++0x6080604052348015600f57600080fd5b50615fdd8061001f6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063dbd8cd6314610030575b600080fd5b61003861003a565b005b60405180615f800160405280615f4e815260200161005a615f4e91395056feaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa2646970667358221220b25c21f2147000f10799a57b6475538b627899e60949e1142a24c6c19e21af7364736f6c63430008190033
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+196
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/faultproofs/cannon_benchmark_test.go CELO/op-e2e/faultproofs/cannon_benchmark_test.go +new file mode 100644 +index 0000000000000000000000000000000000000000..ecd128a3c956627baf258c1b06bac738f87ce0d9 +--- /dev/null ++++ CELO/op-e2e/faultproofs/cannon_benchmark_test.go +@@ -0,0 +1,196 @@ ++package faultproofs ++ ++import ( ++ "context" ++ "crypto/ecdsa" ++ "encoding/json" ++ "math/big" ++ "os" ++ "path" ++ "sync" ++ "testing" ++ "time" ++ ++ "github.com/ethereum-optimism/optimism/cannon/mipsevm" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ op_e2e "github.com/ethereum-optimism/optimism/op-e2e" ++ "github.com/ethereum-optimism/optimism/op-e2e/bindings" ++ "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" ++ "github.com/ethereum-optimism/optimism/op-service/client" ++ "github.com/ethereum-optimism/optimism/op-service/predeploys" ++ "github.com/ethereum-optimism/optimism/op-service/sources" ++ "github.com/ethereum-optimism/optimism/op-service/testlog" ++ "github.com/ethereum/go-ethereum/accounts/abi/bind" ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/common/hexutil" ++ "github.com/ethereum/go-ethereum/core/types" ++ "github.com/ethereum/go-ethereum/crypto" ++ "github.com/ethereum/go-ethereum/ethclient" ++ "github.com/ethereum/go-ethereum/log" ++ "github.com/ethereum/go-ethereum/rpc" ++ "github.com/pkg/errors" ++ "github.com/stretchr/testify/require" ++) ++ ++func TestBenchmarkCannon_FPP(t *testing.T) { ++ t.Skip("TODO(client-pod#906): Compare total witness size for assertions against pages allocated by the VM") ++ ++ op_e2e.InitParallel(t, op_e2e.UsesCannon) ++ ctx := context.Background() ++ cfg := op_e2e.DefaultSystemConfig(t) ++ // We don't need a verifier - just the sequencer is enough ++ delete(cfg.Nodes, "verifier") ++ // Use a small sequencer window size to avoid test timeout while waiting for empty blocks ++ // But not too small to ensure that our claim and subsequent state change is published ++ cfg.DeployConfig.SequencerWindowSize = 16 ++ minTs := hexutil.Uint64(0) ++ cfg.DeployConfig.L2GenesisDeltaTimeOffset = &minTs ++ cfg.DeployConfig.L2GenesisEcotoneTimeOffset = &minTs ++ ++ sys, err := cfg.Start(t) ++ require.Nil(t, err, "Error starting up system") ++ defer sys.Close() ++ ++ log := testlog.Logger(t, log.LevelInfo) ++ log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) ++ ++ l1Client := sys.Clients["l1"] ++ l2Seq := sys.Clients["sequencer"] ++ rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint()) ++ require.Nil(t, err) ++ rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient)) ++ require.NoError(t, wait.ForUnsafeBlock(ctx, rollupClient, 1)) ++ ++ // Agreed state: 200 Big Contracts deployed at max size - total codesize is 5.90 MB ++ // In Fault Proof: Perform multicalls calling each Big Contract ++ // - induces 200 oracle.CodeByHash preimage loads ++ // Assertion: Under 2000 pages requested by the program (i.e. max ~8 MB). Assumes derivation overhead; block finalization, etc, requires < 1 MB of program memory. ++ ++ const numCreates = 200 ++ newContracts := createBigContracts(ctx, t, cfg, l2Seq, cfg.Secrets.Alice, numCreates) ++ receipt := callBigContracts(ctx, t, cfg, l2Seq, cfg.Secrets.Alice, newContracts) ++ ++ t.Log("Capture the latest L2 head that preceedes contract creations as agreed starting point") ++ agreedBlock, err := l2Seq.BlockByNumber(ctx, new(big.Int).Sub(receipt.BlockNumber, big.NewInt(1))) ++ require.NoError(t, err) ++ agreedL2Output, err := rollupClient.OutputAtBlock(ctx, agreedBlock.NumberU64()) ++ require.NoError(t, err, "could not retrieve l2 agreed block") ++ l2Head := agreedL2Output.BlockRef.Hash ++ l2OutputRoot := agreedL2Output.OutputRoot ++ ++ t.Log("Determine L2 claim") ++ l2ClaimBlockNumber := receipt.BlockNumber ++ l2Output, err := rollupClient.OutputAtBlock(ctx, l2ClaimBlockNumber.Uint64()) ++ require.NoError(t, err, "could not get expected output") ++ l2Claim := l2Output.OutputRoot ++ ++ t.Log("Determine L1 head that includes all batches required for L2 claim block") ++ require.NoError(t, wait.ForSafeBlock(ctx, rollupClient, l2ClaimBlockNumber.Uint64())) ++ l1HeadBlock, err := l1Client.BlockByNumber(ctx, nil) ++ require.NoError(t, err, "get l1 head block") ++ l1Head := l1HeadBlock.Hash() ++ ++ inputs := utils.LocalGameInputs{ ++ L1Head: l1Head, ++ L2Head: l2Head, ++ L2Claim: common.Hash(l2Claim), ++ L2OutputRoot: common.Hash(l2OutputRoot), ++ L2BlockNumber: l2ClaimBlockNumber, ++ } ++ debugfile := path.Join(t.TempDir(), "debug.json") ++ runCannon(t, ctx, sys, inputs, "sequencer", "--debug-info", debugfile) ++ data, err := os.ReadFile(debugfile) ++ require.NoError(t, err) ++ var debuginfo mipsevm.DebugInfo ++ require.NoError(t, json.Unmarshal(data, &debuginfo)) ++ t.Logf("Debug info: %#v", debuginfo) ++ // TODO(client-pod#906): Use maximum witness size for assertions against pages allocated by the VM ++} ++ ++func createBigContracts(ctx context.Context, t *testing.T, cfg op_e2e.SystemConfig, client *ethclient.Client, key *ecdsa.PrivateKey, numContracts int) []common.Address { ++ /* ++ contract Big { ++ bytes constant foo = hex"<24.4 KB of random data>"; ++ function ekans() external { foo; } ++ } ++ */ ++ createInputHex, err := os.ReadFile("bigCodeCreateInput.data") ++ createInput := common.FromHex(string(createInputHex[2:])) ++ require.NoError(t, err) ++ ++ nonce, err := client.NonceAt(ctx, crypto.PubkeyToAddress(key.PublicKey), nil) ++ require.NoError(t, err) ++ ++ type result struct { ++ addr common.Address ++ err error ++ } ++ ++ var wg sync.WaitGroup ++ wg.Add(numContracts) ++ results := make(chan result, numContracts) ++ for i := 0; i < numContracts; i++ { ++ tx := types.MustSignNewTx(key, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ++ ChainID: cfg.L2ChainIDBig(), ++ Nonce: nonce + uint64(i), ++ To: nil, ++ GasTipCap: big.NewInt(10), ++ GasFeeCap: big.NewInt(200), ++ Gas: 10_000_000, ++ Data: createInput, ++ }) ++ go func() { ++ defer wg.Done() ++ ctx, cancel := context.WithTimeout(ctx, 120*time.Second) ++ defer cancel() ++ err := client.SendTransaction(ctx, tx) ++ if err != nil { ++ results <- result{err: errors.Wrap(err, "Sending L2 tx")} ++ return ++ } ++ receipt, err := wait.ForReceiptOK(ctx, client, tx.Hash()) ++ if err != nil { ++ results <- result{err: errors.Wrap(err, "Waiting for receipt")} ++ return ++ } ++ results <- result{addr: receipt.ContractAddress, err: nil} ++ }() ++ } ++ wg.Wait() ++ close(results) ++ ++ var addrs []common.Address ++ for r := range results { ++ require.NoError(t, r.err) ++ addrs = append(addrs, r.addr) ++ } ++ return addrs ++} ++ ++func callBigContracts(ctx context.Context, t *testing.T, cfg op_e2e.SystemConfig, client *ethclient.Client, key *ecdsa.PrivateKey, addrs []common.Address) *types.Receipt { ++ multicall3, err := bindings.NewMultiCall3(predeploys.MultiCall3Addr, client) ++ require.NoError(t, err) ++ ++ chainID, err := client.ChainID(ctx) ++ require.NoError(t, err) ++ opts, err := bind.NewKeyedTransactorWithChainID(key, chainID) ++ require.NoError(t, err) ++ ++ var calls []bindings.Multicall3Call3Value ++ calldata := crypto.Keccak256([]byte("ekans()"))[:4] ++ for _, addr := range addrs { ++ calls = append(calls, bindings.Multicall3Call3Value{ ++ Target: addr, ++ CallData: calldata, ++ Value: new(big.Int), ++ }) ++ } ++ opts.GasLimit = 20_000_000 ++ tx, err := multicall3.Aggregate3Value(opts, calls) ++ require.NoError(t, err) ++ ++ receipt, err := wait.ForReceiptOK(ctx, client, tx.Hash()) ++ require.NoError(t, err) ++ t.Logf("Initiated %d calls to the Big Contract. gas used: %d", len(addrs), receipt.GasUsed) ++ return receipt ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-4
+ +
+ +
+
+
diff --git OP/op-e2e/faultproofs/precompile_test.go CELO/op-e2e/faultproofs/precompile_test.go +index 28807edee6ddef6833d54d3c210b77abd616381e..ab8d0394771dc2476d711db5461d54b6c96cfb45 100644 +--- OP/op-e2e/faultproofs/precompile_test.go ++++ CELO/op-e2e/faultproofs/precompile_test.go +@@ -10,8 +10,8 @@ "testing" +  + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-challenger/config" +- "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/metrics" + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" +@@ -135,7 +135,7 @@ }) + } + } +  +-func runCannon(t *testing.T, ctx context.Context, sys *op_e2e.System, inputs utils.LocalGameInputs, l2Node string) { ++func runCannon(t *testing.T, ctx context.Context, sys *op_e2e.System, inputs utils.LocalGameInputs, l2Node string, extraVmArgs ...string) { + l1Endpoint := sys.NodeEndpoint("l1") + l1Beacon := sys.L1BeaconEndpoint() + rollupEndpoint := sys.RollupEndpoint("sequencer") +@@ -147,10 +147,10 @@ cfg := config.NewConfig(common.Address{}, l1Endpoint, l1Beacon, rollupEndpoint, l2Endpoint, dir) + cannonOpts(&cfg) +  + logger := testlog.Logger(t, log.LevelInfo).New("role", "cannon") +- executor := cannon.NewExecutor(logger, metrics.NoopMetrics, &cfg, cfg.CannonAbsolutePreState, inputs) ++ executor := vm.NewExecutor(logger, metrics.NoopMetrics, cfg.Cannon, cfg.CannonAbsolutePreState, inputs) +  + t.Log("Running cannon") +- err := executor.GenerateProof(ctx, proofsDir, math.MaxUint) ++ err := executor.DoGenerateProof(ctx, proofsDir, math.MaxUint, math.MaxUint, extraVmArgs...) + require.NoError(t, err, "failed to generate proof") +  + state, err := parseState(filepath.Join(proofsDir, "final.json.gz"))
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+11
+
-2
+ +
+ +
+
+
diff --git OP/op-e2e/sequencer_failover_setup.go CELO/op-e2e/sequencer_failover_setup.go +index 4b4aeb20379b36c8e92a684a562602433ed50a48..f1fd867469e61e34c5ad64a9203294ca62cad466 100644 +--- OP/op-e2e/sequencer_failover_setup.go ++++ CELO/op-e2e/sequencer_failover_setup.go +@@ -17,6 +17,7 @@ + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" + con "github.com/ethereum-optimism/optimism/op-conductor/conductor" ++ "github.com/ethereum-optimism/optimism/op-conductor/consensus" + conrpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + rollupNode "github.com/ethereum-optimism/optimism/op-node/node" +@@ -74,8 +75,8 @@ c2 := conductors[Sequencer2Name] + c3 := conductors[Sequencer3Name] +  + require.NoError(t, waitForLeadership(t, c1)) +- require.NoError(t, c1.client.AddServerAsVoter(ctx, Sequencer2Name, c2.ConsensusEndpoint())) +- require.NoError(t, c1.client.AddServerAsVoter(ctx, Sequencer3Name, c3.ConsensusEndpoint())) ++ require.NoError(t, c1.client.AddServerAsVoter(ctx, Sequencer2Name, c2.ConsensusEndpoint(), 0)) ++ require.NoError(t, c1.client.AddServerAsVoter(ctx, Sequencer3Name, c3.ConsensusEndpoint(), 0)) + require.True(t, leader(t, ctx, c1)) + require.False(t, leader(t, ctx, c2)) + require.False(t, leader(t, ctx, c3)) +@@ -508,3 +509,11 @@ return leaders == 1, nil + } + require.NoError(t, wait.For(ctx, 1*time.Second, condition)) + } ++ ++func memberIDs(membership *consensus.ClusterMembership) []string { ++ ids := make([]string, len(membership.Servers)) ++ for _, member := range membership.Servers { ++ ids = append(ids, member.ID) ++ } ++ return ids ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+72
+
-13
+ +
+ +
+
+
diff --git OP/op-e2e/sequencer_failover_test.go CELO/op-e2e/sequencer_failover_test.go +index 6d98dc2af9731a8f4a96b7f6d325cc499c44dc74..0fa38f54ba3c7865de040e4632354baf64703293 100644 +--- OP/op-e2e/sequencer_failover_test.go ++++ CELO/op-e2e/sequencer_failover_test.go +@@ -5,6 +5,7 @@ "context" + "sort" + "testing" +  ++ "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +  + "github.com/ethereum-optimism/optimism/op-conductor/consensus" +@@ -39,9 +40,9 @@ c2 := conductors[Sequencer2Name] + c3 := conductors[Sequencer3Name] + membership, err := c1.client.ClusterMembership(ctx) + require.NoError(t, err) +- require.Equal(t, 3, len(membership), "Expected 3 members in cluster") ++ require.Equal(t, 3, len(membership.Servers), "Expected 3 members in cluster") + ids := make([]string, 0) +- for _, member := range membership { ++ for _, member := range membership.Servers { + ids = append(ids, member.ID) + require.Equal(t, consensus.Voter, member.Suffrage, "Expected all members to be voters") + } +@@ -112,37 +113,54 @@ err = nonvoter.service.Stop(ctx) + require.NoError(t, err) + }() +  +- err = leader.client.AddServerAsNonvoter(ctx, VerifierName, nonvoter.ConsensusEndpoint()) ++ membership, err = leader.client.ClusterMembership(ctx) ++ require.NoError(t, err) ++ ++ err = leader.client.AddServerAsNonvoter(ctx, VerifierName, nonvoter.ConsensusEndpoint(), membership.Version-1) ++ require.ErrorContains(t, err, "configuration changed since", "Expected leader to fail to add nonvoter due to version mismatch") ++ membership, err = leader.client.ClusterMembership(ctx) ++ require.NoError(t, err) ++ require.Equal(t, 3, len(membership.Servers), "Expected 3 members in cluster") ++ ++ err = leader.client.AddServerAsNonvoter(ctx, VerifierName, nonvoter.ConsensusEndpoint(), 0) + require.NoError(t, err, "Expected leader to add non-voter") + membership, err = leader.client.ClusterMembership(ctx) + require.NoError(t, err) +- require.Equal(t, 4, len(membership), "Expected 4 members in cluster") +- require.Equal(t, consensus.Nonvoter, membership[3].Suffrage, "Expected last member to be non-voter") ++ require.Equal(t, 4, len(membership.Servers), "Expected 4 members in cluster") ++ require.Equal(t, consensus.Nonvoter, membership.Servers[3].Suffrage, "Expected last member to be non-voter") +  + t.Log("Testing RemoveServer, call remove on follower, expected to fail") + lid, leader = findLeader(t, conductors) + fid, follower = findFollower(t, conductors) +- err = follower.client.RemoveServer(ctx, lid) ++ err = follower.client.RemoveServer(ctx, lid, membership.Version) + require.ErrorContains(t, err, "node is not the leader", "Expected follower to fail to remove leader") + membership, err = c1.client.ClusterMembership(ctx) + require.NoError(t, err) +- require.Equal(t, 4, len(membership), "Expected 4 members in cluster") ++ require.Equal(t, 4, len(membership.Servers), "Expected 4 members in cluster") +  + t.Log("Testing RemoveServer, call remove on leader, expect non-voter to be removed") +- err = leader.client.RemoveServer(ctx, VerifierName) ++ err = leader.client.RemoveServer(ctx, VerifierName, membership.Version) + require.NoError(t, err, "Expected leader to remove non-voter") + membership, err = c1.client.ClusterMembership(ctx) + require.NoError(t, err) +- require.Equal(t, 3, len(membership), "Expected 2 members in cluster after removal") +- require.NotContains(t, membership, VerifierName, "Expected follower to be removed from cluster") ++ require.Equal(t, 3, len(membership.Servers), "Expected 2 members in cluster after removal") ++ require.NotContains(t, memberIDs(membership), VerifierName, "Expected follower to be removed from cluster") ++ ++ t.Log("Testing RemoveServer, call remove on leader with incorrect version, expect voter not to be removed") ++ err = leader.client.RemoveServer(ctx, fid, membership.Version-1) ++ require.ErrorContains(t, err, "configuration changed since", "Expected leader to fail to remove follower due to version mismatch") ++ membership, err = c1.client.ClusterMembership(ctx) ++ require.NoError(t, err) ++ require.Equal(t, 3, len(membership.Servers), "Expected 3 members in cluster after failed removal") ++ require.Contains(t, memberIDs(membership), fid, "Expected follower to not be removed from cluster") +  + t.Log("Testing RemoveServer, call remove on leader, expect voter to be removed") +- err = leader.client.RemoveServer(ctx, fid) ++ err = leader.client.RemoveServer(ctx, fid, membership.Version) + require.NoError(t, err, "Expected leader to remove follower") + membership, err = c1.client.ClusterMembership(ctx) + require.NoError(t, err) +- require.Equal(t, 2, len(membership), "Expected 2 members in cluster after removal") +- require.NotContains(t, membership, fid, "Expected follower to be removed from cluster") ++ require.Equal(t, 2, len(membership.Servers), "Expected 2 members in cluster after removal") ++ require.NotContains(t, memberIDs(membership), fid, "Expected follower to be removed from cluster") + } +  + // [Category: Sequencer Failover] +@@ -172,3 +190,44 @@ active, err := sys.RollupClient(newLeaderId).SequencerActive(ctx) + require.NoError(t, err) + require.True(t, active, "Expected new leader to be sequencing") + } ++ ++// [Category: Disaster Recovery] ++// Test that sequencer can successfully be started with the overrideLeader flag set to true. ++func TestSequencerFailover_DisasterRecovery_OverrideLeader(t *testing.T) { ++ sys, conductors, cleanup := setupSequencerFailoverTest(t) ++ defer cleanup() ++ ++ // randomly stop 2 nodes in the cluster to simulate a disaster. ++ ctx := context.Background() ++ err := conductors[Sequencer1Name].service.Stop(ctx) ++ require.NoError(t, err) ++ err = conductors[Sequencer2Name].service.Stop(ctx) ++ require.NoError(t, err) ++ ++ require.False(t, conductors[Sequencer3Name].service.Leader(ctx), "Expected sequencer to not be the leader") ++ active, err := sys.RollupClient(Sequencer3Name).SequencerActive(ctx) ++ require.NoError(t, err) ++ require.False(t, active, "Expected sequencer to be inactive") ++ ++ // Start sequencer without the overrideLeader flag set to true, should fail ++ err = sys.RollupClient(Sequencer3Name).StartSequencer(ctx, common.Hash{1, 2, 3}) ++ require.ErrorContains(t, err, "sequencer is not the leader, aborting.", "Expected sequencer to fail to start") ++ ++ // Start sequencer with the overrideLeader flag set to true, should succeed ++ err = sys.RollupClient(Sequencer3Name).OverrideLeader(ctx) ++ require.NoError(t, err) ++ blk, err := sys.NodeClient(Sequencer3Name).BlockByNumber(ctx, nil) ++ require.NoError(t, err) ++ err = sys.RollupClient(Sequencer3Name).StartSequencer(ctx, blk.Hash()) ++ require.NoError(t, err) ++ ++ active, err = sys.RollupClient(Sequencer3Name).SequencerActive(ctx) ++ require.NoError(t, err) ++ require.True(t, active, "Expected sequencer to be active") ++ ++ err = conductors[Sequencer3Name].client.OverrideLeader(ctx) ++ require.NoError(t, err) ++ leader, err := conductors[Sequencer3Name].client.Leader(ctx) ++ require.NoError(t, err) ++ require.True(t, leader, "Expected conductor to return leader true after override") ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-0
+ +
+ +
+
+
diff --git OP/op-e2e/setup.go CELO/op-e2e/setup.go +index 2b255879180c15fbf4f70f898903d03eae68a23a..78120858c516c40ba48d44752e9e0ac6a12f9a27 100644 +--- OP/op-e2e/setup.go ++++ CELO/op-e2e/setup.go +@@ -539,6 +539,7 @@ DeltaTime: cfg.DeployConfig.DeltaTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), + EcotoneTime: cfg.DeployConfig.EcotoneTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), + FjordTime: cfg.DeployConfig.FjordTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), + InteropTime: cfg.DeployConfig.InteropTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), ++ Cel2Time: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), + ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy, + } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+10
+
-8
+ +
+ +
+
+
diff --git OP/op-e2e/system_fpp_test.go CELO/op-e2e/system_fpp_test.go +index 4da5f75ca51c185843e6681d229ee0a160b3b19d..635276fe159a8271a181eb0224690b173b9454fb 100644 +--- OP/op-e2e/system_fpp_test.go ++++ CELO/op-e2e/system_fpp_test.go +@@ -6,20 +6,22 @@ "math/big" + "testing" + "time" +  ++ "github.com/stretchr/testify/require" ++ ++ "github.com/ethereum/go-ethereum/accounts/abi/bind" ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/common/hexutil" ++ "github.com/ethereum/go-ethereum/log" ++ "github.com/ethereum/go-ethereum/rpc" ++ + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" +- "github.com/ethereum-optimism/optimism/op-program/client/driver" ++ "github.com/ethereum-optimism/optimism/op-program/client/claim" + opp "github.com/ethereum-optimism/optimism/op-program/host" + oppconf "github.com/ethereum-optimism/optimism/op-program/host/config" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testlog" +- "github.com/ethereum/go-ethereum/accounts/abi/bind" +- "github.com/ethereum/go-ethereum/common" +- "github.com/ethereum/go-ethereum/common/hexutil" +- "github.com/ethereum/go-ethereum/log" +- "github.com/ethereum/go-ethereum/rpc" +- "github.com/stretchr/testify/require" + ) +  + func TestVerifyL2OutputRoot(t *testing.T) { +@@ -320,7 +322,7 @@ err = opp.FaultProofProgram(ctx, log, fppConfig) + if s.Detached { + require.Error(t, err, "exit status 1") + } else { +- require.ErrorIs(t, err, driver.ErrClaimNotValid) ++ require.ErrorIs(t, err, claim.ErrClaimNotValid) + } + } +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+20
+
-10
+ +
+ +
+
+
diff --git OP/op-e2e/system_test.go CELO/op-e2e/system_test.go +index 4dcea8bd96c368c7565a60c500245594c630b25d..029a056782f83b40d7de77df4917c2a0140153b8 100644 +--- OP/op-e2e/system_test.go ++++ CELO/op-e2e/system_test.go +@@ -11,6 +11,9 @@ "slices" + "testing" + "time" +  ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" ++ metrics2 "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" ++ "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" +@@ -192,21 +195,21 @@ latestGameCount, err := disputeGameFactory.GameCount(&bind.CallOpts{}) + require.Nil(t, err) +  + if latestGameCount.Cmp(initialGameCount) > 0 { ++ caller := batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize) + committedL2Output, err := disputeGameFactory.GameAtIndex(&bind.CallOpts{}, new(big.Int).Sub(latestGameCount, common.Big1)) + require.Nil(t, err) +- proxy, err := bindings.NewFaultDisputeGameCaller(committedL2Output.Proxy, l1Client) ++ proxy, err := contracts.NewFaultDisputeGameContract(context.Background(), metrics2.NoopContractMetrics, committedL2Output.Proxy, caller) + require.Nil(t, err) +- committedOutputRoot, err := proxy.RootClaim(&bind.CallOpts{}) ++ claim, err := proxy.GetClaim(context.Background(), 0) + require.Nil(t, err) +  + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() +- extradata, err := proxy.ExtraData(&bind.CallOpts{}) ++ _, gameBlockNumber, err := proxy.GetBlockRange(ctx) + require.Nil(t, err) +- gameBlockNumber := new(big.Int).SetBytes(extradata[0:32]) +- l2Output, err := rollupClient.OutputAtBlock(ctx, gameBlockNumber.Uint64()) ++ l2Output, err := rollupClient.OutputAtBlock(ctx, gameBlockNumber) + require.Nil(t, err) +- require.Equal(t, l2Output.OutputRoot[:], committedOutputRoot[:]) ++ require.EqualValues(t, l2Output.OutputRoot, claim.Value) + break + } +  +@@ -614,7 +617,6 @@ + // TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that + // the nodes can sync L2 blocks before they are confirmed on L1. + func TestSystemMockP2P(t *testing.T) { +- t.Skip("flaky in CI") // TODO(CLI-3859): Re-enable this test. + InitParallel(t) +  + cfg := DefaultSystemConfig(t) +@@ -1280,8 +1282,16 @@ require.Nil(t, err, "reading gpo decimals") +  + require.Equal(t, decimals.Uint64(), uint64(6), "wrong gpo decimals") +  ++ // Celo changes the base fee recipient ++ var baseFeeRecipient common.Address ++ if sys.RollupConfig.Cel2Time == nil { ++ baseFeeRecipient = predeploys.BaseFeeVaultAddr ++ } else { ++ baseFeeRecipient = predeploys.FeeHandlerAddr ++ } ++ + // BaseFee Recipient +- baseFeeRecipientStartBalance, err := l2Seq.BalanceAt(context.Background(), predeploys.BaseFeeVaultAddr, big.NewInt(rpc.EarliestBlockNumber.Int64())) ++ baseFeeRecipientStartBalance, err := l2Seq.BalanceAt(context.Background(), baseFeeRecipient, big.NewInt(rpc.EarliestBlockNumber.Int64())) + require.Nil(t, err) +  + // L1Fee Recipient +@@ -1324,7 +1334,7 @@ + endBalance, err := l2Seq.BalanceAt(context.Background(), fromAddr, header.Number) + require.Nil(t, err) +  +- baseFeeRecipientEndBalance, err := l2Seq.BalanceAt(context.Background(), predeploys.BaseFeeVaultAddr, header.Number) ++ baseFeeRecipientEndBalance, err := l2Seq.BalanceAt(context.Background(), baseFeeRecipient, header.Number) + require.Nil(t, err) +  + l1Header, err := l1.HeaderByNumber(context.Background(), nil) +@@ -1350,7 +1360,7 @@ require.Equal(t, l2Fee, sequencerFeeVaultDiff) +  + // Tally BaseFee + baseFee := new(big.Int).Mul(header.BaseFee, new(big.Int).SetUint64(receipt.GasUsed)) +- require.Equal(t, baseFee, baseFeeRecipientDiff, "base fee fee mismatch") ++ require.Equal(t, baseFee, baseFeeRecipientDiff, "base fee mismatch") +  + // Tally L1 Fee + tx, _, err := l2Seq.TransactionByHash(context.Background(), receipt.TxHash)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+7
+
-16
+ +
+ +
+
+
diff --git OP/op-e2e/withdrawal_helper.go CELO/op-e2e/withdrawal_helper.go +index 47bb9e8ddfa9e535a35a1a975bf5b8138d1b24cd..bc6b516c3001c187c36f6e41ba5103c5f916f077 100644 +--- OP/op-e2e/withdrawal_helper.go ++++ CELO/op-e2e/withdrawal_helper.go +@@ -10,10 +10,10 @@ + "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" +- legacybindings "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-e2e/config" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" ++ "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-node/bindings" + bindingspreview "github.com/ethereum-optimism/optimism/op-node/bindings/preview" +@@ -201,9 +201,6 @@ game, err := portal2.ProvenWithdrawals(&bind.CallOpts{}, wdHash, opts.From) + require.Nil(t, err) + require.NotNil(t, game, "withdrawal should be proven") +  +- proxy, err := legacybindings.NewFaultDisputeGame(game.DisputeGameProxy, l1Client) +- require.Nil(t, err) +- + caller := batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize) + gameContract, err := contracts.NewFaultDisputeGameContract(context.Background(), metrics.NoopContractMetrics, game.DisputeGameProxy, caller) + require.Nil(t, err) +@@ -216,19 +213,13 @@ t.Logf("Could not resolve dispute game claim: %v", err) + return err == nil, nil + })) +  +- resolveClaimTx, err := proxy.ResolveClaim(opts, common.Big0, common.Big0) +- require.Nil(t, err) ++ tx, err := gameContract.ResolveClaimTx(0) ++ require.NoError(t, err, "create resolveClaim tx") ++ _, resolveClaimReceipt = transactions.RequireSendTx(t, ctx, l1Client, tx, privKey) +  +- resolveClaimReceipt, err = wait.ForReceiptOK(ctx, l1Client, resolveClaimTx.Hash()) +- require.Nil(t, err, "resolve claim") +- require.Equal(t, types.ReceiptStatusSuccessful, resolveClaimReceipt.Status) +- +- resolveTx, err := proxy.Resolve(opts) +- require.Nil(t, err) +- +- resolveReceipt, err = wait.ForReceiptOK(ctx, l1Client, resolveTx.Hash()) +- require.Nil(t, err, "resolve") +- require.Equal(t, types.ReceiptStatusSuccessful, resolveReceipt.Status) ++ tx, err = gameContract.ResolveTx() ++ require.NoError(t, err, "create resolve tx") ++ _, resolveReceipt = transactions.RequireSendTx(t, ctx, l1Client, tx, privKey) + } +  + if e2eutils.UseFaultProofs() {
+
+ + + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+5
+
-4
+ +
+ +
+
+
diff --git OP/op-node/cmd/genesis/cmd.go CELO/op-node/cmd/genesis/cmd.go +index 6d3d687444a24da3112f4157cb69f53daf3c0895..01c93b42164ee12450272f2ac961bb3ba96a2652 100644 +--- OP/op-node/cmd/genesis/cmd.go ++++ CELO/op-node/cmd/genesis/cmd.go +@@ -16,6 +16,7 @@ "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" +  ++ "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + ) +@@ -112,9 +113,9 @@ if err := config.CheckAddresses(); err != nil { + return fmt.Errorf("deploy config at %s invalid: %w", deployConfig, err) + } +  +- var dump *genesis.ForgeAllocs ++ var dump *foundry.ForgeAllocs + if l1Allocs := ctx.String("l1-allocs"); l1Allocs != "" { +- dump, err = genesis.LoadForgeAllocs(l1Allocs) ++ dump, err = foundry.LoadForgeAllocs(l1Allocs) + if err != nil { + return err + } +@@ -169,9 +170,9 @@ return fmt.Errorf("cannot read L1 starting block at %s: %w", l1StartBlockPath, err) + } + } +  +- var l2Allocs *genesis.ForgeAllocs ++ var l2Allocs *foundry.ForgeAllocs + if l2AllocsPath := ctx.String("l2-allocs"); l2AllocsPath != "" { +- l2Allocs, err = genesis.LoadForgeAllocs(l2AllocsPath) ++ l2Allocs, err = foundry.LoadForgeAllocs(l2AllocsPath) + if err != nil { + return err + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-3
+ +
+ +
+
+
diff --git OP/op-node/flags/flags.go CELO/op-node/flags/flags.go +index 9e058350d1e18aaf15300a9d7a6f889736387f3a..59a7b0ef2de8152b87fa13e6677626adbf2bbdcf 100644 +--- OP/op-node/flags/flags.go ++++ CELO/op-node/flags/flags.go +@@ -25,7 +25,7 @@ L1RPCCategory = "2. L1 RPC" + SequencerCategory = "3. SEQUENCER" + OperationsCategory = "4. LOGGING, METRICS, DEBUGGING, AND API" + P2PCategory = "5. PEER-TO-PEER" +- PlasmaCategory = "6. PLASMA (EXPERIMENTAL)" ++ AltDACategory = "6. ALT-DA (EXPERIMENTAL)" + MiscCategory = "7. MISC" + ) +  +@@ -158,7 +158,7 @@ Category: L1RPCCategory, + } + L1RethDBPath = &cli.StringFlag{ + Name: "l1.rethdb", +- Usage: "The L1 RethDB path, used to fetch receipts for L1 blocks. Only applicable when using the `reth_db` RPC kind with `l1.rpckind`.", ++ Usage: "The L1 RethDB path, used to fetch receipts for L1 blocks.", + EnvVars: prefixEnvVars("L1_RETHDB"), + Hidden: true, + Category: L1RPCCategory, +@@ -424,7 +424,7 @@ optionalFlags = append(optionalFlags, oplog.CLIFlagsWithCategory(EnvVarPrefix, OperationsCategory)...) + optionalFlags = append(optionalFlags, oppprof.CLIFlagsWithCategory(EnvVarPrefix, OperationsCategory)...) + optionalFlags = append(optionalFlags, DeprecatedFlags...) + optionalFlags = append(optionalFlags, opflags.CLIFlags(EnvVarPrefix, RollupCategory)...) +- optionalFlags = append(optionalFlags, plasma.CLIFlags(EnvVarPrefix, PlasmaCategory)...) ++ optionalFlags = append(optionalFlags, plasma.CLIFlags(EnvVarPrefix, AltDACategory)...) + Flags = append(requiredFlags, optionalFlags...) + } +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+6
+
-1
+ +
+ +
+
+
diff --git OP/op-node/flags/flags_test.go CELO/op-node/flags/flags_test.go +index c7e87a59830b8eac7ca61404cb862c7b83a90266..3a94041c7e6ff3a12fab822d68177896e5e997f5 100644 +--- OP/op-node/flags/flags_test.go ++++ CELO/op-node/flags/flags_test.go +@@ -5,6 +5,7 @@ "slices" + "strings" + "testing" +  ++ plasma "github.com/ethereum-optimism/optimism/op-plasma" + opservice "github.com/ethereum-optimism/optimism/op-service" +  + "github.com/stretchr/testify/require" +@@ -73,7 +74,11 @@ + func TestHasEnvVar(t *testing.T) { + // known exceptions to the number of env vars + expEnvVars := map[string]int{ +- BeaconFallbackAddrs.Name: 2, ++ BeaconFallbackAddrs.Name: 2, ++ plasma.EnabledFlagName: 2, ++ plasma.DaServerAddressFlagName: 2, ++ plasma.VerifyOnReadFlagName: 2, ++ plasma.DaServiceFlag: 2, + } +  + for _, flag := range Flags {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+9
+
-2
+ +
+ +
+
+
diff --git OP/op-node/node/api.go CELO/op-node/node/api.go +index cb60d22d911b3267678130d1748e76f05c40bc29..a94e2477fe169b23537df5e56caccf5510fa816a 100644 +--- OP/op-node/node/api.go ++++ CELO/op-node/node/api.go +@@ -5,11 +5,11 @@ "context" + "errors" + "fmt" +  +- "github.com/ethereum-optimism/optimism/op-node/node/safedb" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" +  ++ "github.com/ethereum-optimism/optimism/op-node/node/safedb" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/version" + "github.com/ethereum-optimism/optimism/op-service/eth" +@@ -33,6 +33,7 @@ StartSequencer(ctx context.Context, blockHash common.Hash) error + StopSequencer(context.Context) (common.Hash, error) + SequencerActive(context.Context) (bool, error) + OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error ++ OverrideLeader(ctx context.Context) error + } +  + type SafeDBReader interface { +@@ -77,7 +78,6 @@ } +  + // PostUnsafePayload is a special API that allows posting an unsafe payload to the L2 derivation pipeline. + // It should only be used by op-conductor for sequencer failover scenarios. +-// TODO(ethereum-optimism/optimism#9064): op-conductor Dencun changes. + func (n *adminAPI) PostUnsafePayload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) error { + recordDur := n.M.RecordRPCServerRequest("admin_postUnsafePayload") + defer recordDur() +@@ -89,6 +89,13 @@ return fmt.Errorf("payload has bad block hash: %s, actual block hash is: %s", payload.BlockHash.String(), actual.String()) + } +  + return n.dr.OnUnsafeL2Payload(ctx, envelope) ++} ++ ++// OverrideLeader disables sequencer conductor interactions and allow sequencer to run in non-HA mode during disaster recovery scenarios. ++func (n *adminAPI) OverrideLeader(ctx context.Context) error { ++ recordDur := n.M.RecordRPCServerRequest("admin_overrideLeader") ++ defer recordDur() ++ return n.dr.OverrideLeader(ctx) + } +  + type nodeAPI struct {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+28
+
-4
+ +
+ +
+
+
diff --git OP/op-node/node/conductor.go CELO/op-node/node/conductor.go +index 938b9f28c5b1d89f2af3f6dc9d5f242b38a3d7c2..20e0638dc6869305b02d17ee524261dcc9e1b08a 100644 +--- OP/op-node/node/conductor.go ++++ CELO/op-node/node/conductor.go +@@ -3,16 +3,17 @@ + import ( + "context" + "fmt" ++ "sync/atomic" + "time" +  ++ "github.com/ethereum/go-ethereum/log" ++ ++ conductorRpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" + "github.com/ethereum-optimism/optimism/op-node/metrics" + "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/retry" +- "github.com/ethereum/go-ethereum/log" +- +- conductorRpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" + ) +  + // ConductorClient is a client for the op-conductor RPC service. +@@ -21,13 +22,22 @@ cfg *Config + metrics *metrics.Metrics + log log.Logger + apiClient *conductorRpc.APIClient ++ ++ // overrideLeader is used to override the leader check for disaster recovery purposes. ++ // During disaster situations where the cluster is unhealthy (no leader, only 1 or less nodes up), ++ // set this to true to allow the node to assume sequencing responsibilities without being the leader. ++ overrideLeader atomic.Bool + } +  + var _ conductor.SequencerConductor = &ConductorClient{} +  + // NewConductorClient returns a new conductor client for the op-conductor RPC service. + func NewConductorClient(cfg *Config, log log.Logger, metrics *metrics.Metrics) *ConductorClient { +- return &ConductorClient{cfg: cfg, metrics: metrics, log: log} ++ return &ConductorClient{ ++ cfg: cfg, ++ metrics: metrics, ++ log: log, ++ } + } +  + // Initialize initializes the conductor client. +@@ -45,6 +55,10 @@ } +  + // Leader returns true if this node is the leader sequencer. + func (c *ConductorClient) Leader(ctx context.Context) (bool, error) { ++ if c.overrideLeader.Load() { ++ return true, nil ++ } ++ + if err := c.initialize(); err != nil { + return false, err + } +@@ -62,6 +76,10 @@ } +  + // CommitUnsafePayload commits an unsafe payload to the conductor log. + func (c *ConductorClient) CommitUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { ++ if c.overrideLeader.Load() { ++ return nil ++ } ++ + if err := c.initialize(); err != nil { + return err + } +@@ -76,6 +94,12 @@ record(err) + return true, err + }) + return err ++} ++ ++// OverrideLeader implements conductor.SequencerConductor. ++func (c *ConductorClient) OverrideLeader(ctx context.Context) error { ++ c.overrideLeader.Store(true) ++ return nil + } +  + func (c *ConductorClient) Close() {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-node/node/config.go CELO/op-node/node/config.go +index 223afaccf0f936649d9824de09172e5e0e0e5eca..0688c8a6cc755ee5c9693ac58a445beabef97ae1 100644 +--- OP/op-node/node/config.go ++++ CELO/op-node/node/config.go +@@ -175,7 +175,7 @@ if err := cfg.Plasma.Check(); err != nil { + return fmt.Errorf("plasma config error: %w", err) + } + if cfg.Plasma.Enabled { +- log.Warn("Plasma Mode is a Beta feature of the MIT licensed OP Stack. While it has received initial review from core contributors, it is still undergoing testing, and may have bugs or other issues.") ++ log.Warn("Alt-DA Mode is a Beta feature of the MIT licensed OP Stack. While it has received initial review from core contributors, it is still undergoing testing, and may have bugs or other issues.") + } + return nil + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/op-node/node/node.go CELO/op-node/node/node.go +index f2a62cf58da0da0cd5e295a7ebd0ec7976ef2a57..fb5f981d8f14af1af154b609bc8c0a60a3a79e21 100644 +--- OP/op-node/node/node.go ++++ CELO/op-node/node/node.go +@@ -582,7 +582,8 @@ } +  + n.tracer.OnUnsafeL2Payload(ctx, from, envelope) +  +- n.log.Info("Received signed execution payload from p2p", "id", envelope.ExecutionPayload.ID(), "peer", from) ++ n.log.Info("Received signed execution payload from p2p", "id", envelope.ExecutionPayload.ID(), "peer", from, ++ "txs", len(envelope.ExecutionPayload.Transactions)) +  + // Pass on the event to the L2 Engine + ctx, cancel := context.WithTimeout(ctx, time.Second*30)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-0
+ +
+ +
+
+
diff --git OP/op-node/node/server_test.go CELO/op-node/node/server_test.go +index 587befd6de805483a76792e483b14d89947299a7..7063b3ed2807cdc52065ffabe37e2b6a5a51f1a4 100644 +--- OP/op-node/node/server_test.go ++++ CELO/op-node/node/server_test.go +@@ -283,6 +283,10 @@ func (c *mockDriverClient) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { + return c.Mock.MethodCalled("OnUnsafeL2Payload").Get(0).(error) + } +  ++func (c *mockDriverClient) OverrideLeader(ctx context.Context) error { ++ return c.Mock.MethodCalled("OverrideLeader").Get(0).(error) ++} ++ + type mockSafeDBReader struct { + mock.Mock + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+96
+
-127
+ +
+ +
+
+
diff --git OP/op-node/rollup/attributes/attributes.go CELO/op-node/rollup/attributes/attributes.go +index 517c91f708633b5a51d981323c97038254a00942..99bd123598ac1ebb78ab1cdf9a526d5d79239a93 100644 +--- OP/op-node/rollup/attributes/attributes.go ++++ CELO/op-node/rollup/attributes/attributes.go +@@ -4,33 +4,18 @@ import ( + "context" + "errors" + "fmt" +- "io" ++ "sync" + "time" +  + "github.com/ethereum/go-ethereum" +- "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" +- "github.com/ethereum-optimism/optimism/op-node/rollup/async" +- "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-service/eth" + ) +  +-type Engine interface { +- engine.EngineControl +- +- SetUnsafeHead(eth.L2BlockRef) +- SetSafeHead(eth.L2BlockRef) +- SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool) +- SetPendingSafeL2Head(eth.L2BlockRef) +- +- PendingSafeL2Head() eth.L2BlockRef +- BackupUnsafeL2Head() eth.L2BlockRef +-} +- + type L2 interface { + PayloadByNumber(context.Context, uint64) (*eth.ExecutionPayloadEnvelope, error) + } +@@ -39,150 +24,134 @@ type AttributesHandler struct { + log log.Logger + cfg *rollup.Config +  +- ec Engine ++ // when the rollup node shuts down, stop any in-flight sub-processes of the attributes-handler ++ ctx context.Context ++ + l2 L2 +  ++ mu sync.Mutex ++ ++ emitter rollup.EventEmitter ++ + attributes *derive.AttributesWithParent + } +  +-func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ec Engine, l2 L2) *AttributesHandler { ++func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ctx context.Context, l2 L2, emitter rollup.EventEmitter) *AttributesHandler { + return &AttributesHandler{ + log: log, + cfg: cfg, +- ec: ec, ++ ctx: ctx, + l2: l2, ++ emitter: emitter, + attributes: nil, + } + } +  +-func (eq *AttributesHandler) HasAttributes() bool { +- return eq.attributes != nil +-} ++func (eq *AttributesHandler) OnEvent(ev rollup.Event) { ++ // Events may be concurrent in the future. Prevent unsafe concurrent modifications to the attributes. ++ eq.mu.Lock() ++ defer eq.mu.Unlock() +  +-func (eq *AttributesHandler) SetAttributes(attributes *derive.AttributesWithParent) { +- eq.attributes = attributes ++ switch x := ev.(type) { ++ case engine.PendingSafeUpdateEvent: ++ eq.onPendingSafeUpdate(x) ++ case derive.DerivedAttributesEvent: ++ eq.attributes = x.Attributes ++ eq.emitter.Emit(derive.ConfirmReceivedAttributesEvent{}) ++ // to make sure we have a pre-state signal to process the attributes from ++ eq.emitter.Emit(engine.PendingSafeRequestEvent{}) ++ case engine.InvalidPayloadAttributesEvent: ++ // If the engine signals that attributes are invalid, ++ // that should match our last applied attributes, which we should thus drop. ++ eq.attributes = nil ++ // Time to re-evaluate without attributes. ++ // (the pending-safe state will then be forwarded to our source of attributes). ++ eq.emitter.Emit(engine.PendingSafeRequestEvent{}) ++ } + } +  +-// Proceed processes block attributes, if any. +-// Proceed returns io.EOF if there are no attributes to process. +-// Proceed returns a temporary, reset, or critical error like other derivers. +-// Proceed returns no error if the safe-head may have changed. +-func (eq *AttributesHandler) Proceed(ctx context.Context) error { ++// onPendingSafeUpdate applies the queued-up block attributes, if any, on top of the signaled pending state. ++// The event is also used to clear the queued-up attributes, when successfully processed. ++// On processing failure this may emit a temporary, reset, or critical error like other derivers. ++func (eq *AttributesHandler) onPendingSafeUpdate(x engine.PendingSafeUpdateEvent) { ++ if x.Unsafe.Number < x.PendingSafe.Number { ++ // invalid chain state, reset to try and fix it ++ eq.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("pending-safe label (%d) may not be ahead of unsafe head label (%d)", x.PendingSafe.Number, x.Unsafe.Number)}) ++ return ++ } ++ + if eq.attributes == nil { +- return io.EOF ++ // Request new attributes to be generated, only if we don't currently have attributes that have yet to be processed. ++ // It is safe to request the pipeline, the attributes-handler is the only user of it, ++ // and the pipeline will not generate another set of attributes until the last set is recognized. ++ eq.emitter.Emit(derive.PipelineStepEvent{PendingSafe: x.PendingSafe}) ++ return + } +- // validate the safe attributes before processing them. The engine may have completed processing them through other means. +- if eq.ec.PendingSafeL2Head() != eq.attributes.Parent { +- // Previously the attribute's parent was the pending safe head. If the pending safe head advances so pending safe head's parent is the same as the +- // attribute's parent then we need to cancel the attributes. +- if eq.ec.PendingSafeL2Head().ParentHash == eq.attributes.Parent.Hash { +- eq.log.Warn("queued safe attributes are stale, safehead progressed", +- "pending_safe_head", eq.ec.PendingSafeL2Head(), "pending_safe_head_parent", eq.ec.PendingSafeL2Head().ParentID(), +- "attributes_parent", eq.attributes.Parent) +- eq.attributes = nil +- return nil +- } +- // If something other than a simple advance occurred, perform a full reset +- return derive.NewResetError(fmt.Errorf("pending safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s", +- eq.ec.PendingSafeL2Head(), eq.ec.PendingSafeL2Head().ParentID(), eq.attributes.Parent)) ++ ++ // Drop attributes if they don't apply on top of the pending safe head ++ if eq.attributes.Parent.Number != x.PendingSafe.Number { ++ eq.log.Warn("dropping stale attributes", ++ "pending", x.PendingSafe, "attributes_parent", eq.attributes.Parent) ++ eq.attributes = nil ++ return + } +- if eq.ec.PendingSafeL2Head().Number < eq.ec.UnsafeL2Head().Number { +- if err := eq.consolidateNextSafeAttributes(ctx, eq.attributes); err != nil { +- return err +- } +- eq.attributes = nil +- return nil +- } else if eq.ec.PendingSafeL2Head().Number == eq.ec.UnsafeL2Head().Number { +- if err := eq.forceNextSafeAttributes(ctx, eq.attributes); err != nil { +- return err ++ ++ if eq.attributes.Parent != x.PendingSafe { ++ // If the attributes are supposed to follow the pending safe head, but don't build on the exact block, ++ // then there's some reorg inconsistency. Either bad attributes, or bad pending safe head. ++ // Trigger a reset, and the system can derive attributes on top of the pending safe head. ++ // Until the reset is complete we don't clear the attributes state, ++ // so we can re-emit the ResetEvent until the reset actually happens. ++ ++ eq.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("pending safe head changed to %s with parent %s, conflicting with queued safe attributes on top of %s", ++ x.PendingSafe, x.PendingSafe.ParentID(), eq.attributes.Parent)}) ++ } else { ++ // if there already exists a block we can just consolidate it ++ if x.PendingSafe.Number < x.Unsafe.Number { ++ eq.consolidateNextSafeAttributes(eq.attributes, x.PendingSafe) ++ } else { ++ // append to tip otherwise ++ eq.emitter.Emit(engine.ProcessAttributesEvent{Attributes: eq.attributes}) + } +- eq.attributes = nil +- return nil +- } else { +- // For some reason the unsafe head is behind the pending safe head. Log it, and correct it. +- eq.log.Error("invalid sync state, unsafe head is behind pending safe head", "unsafe", eq.ec.UnsafeL2Head(), "pending_safe", eq.ec.PendingSafeL2Head()) +- eq.ec.SetUnsafeHead(eq.ec.PendingSafeL2Head()) +- return nil + } + } +  + // consolidateNextSafeAttributes tries to match the next safe attributes against the existing unsafe chain, + // to avoid extra processing or unnecessary unwinding of the chain. +-// However, if the attributes do not match, they will be forced with forceNextSafeAttributes. +-func (eq *AttributesHandler) consolidateNextSafeAttributes(ctx context.Context, attributes *derive.AttributesWithParent) error { +- ctx, cancel := context.WithTimeout(ctx, time.Second*10) ++// However, if the attributes do not match, they will be forced to process the attributes. ++func (eq *AttributesHandler) consolidateNextSafeAttributes(attributes *derive.AttributesWithParent, onto eth.L2BlockRef) { ++ ctx, cancel := context.WithTimeout(eq.ctx, time.Second*10) + defer cancel() +  +- envelope, err := eq.l2.PayloadByNumber(ctx, eq.ec.PendingSafeL2Head().Number+1) ++ envelope, err := eq.l2.PayloadByNumber(ctx, attributes.Parent.Number+1) + if err != nil { + if errors.Is(err, ethereum.NotFound) { + // engine may have restarted, or inconsistent safe head. We need to reset +- return derive.NewResetError(fmt.Errorf("expected engine was synced and had unsafe block to reconcile, but cannot find the block: %w", err)) ++ eq.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("expected engine was synced and had unsafe block to reconcile, but cannot find the block: %w", err)}) ++ return + } +- return derive.NewTemporaryError(fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err)) ++ eq.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: fmt.Errorf("failed to get existing unsafe payload to compare against derived attributes from L1: %w", err)}) ++ return + } +- if err := AttributesMatchBlock(eq.cfg, attributes.Attributes, eq.ec.PendingSafeL2Head().Hash, envelope, eq.log); err != nil { +- eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", "err", err, "unsafe", eq.ec.UnsafeL2Head(), "pending_safe", eq.ec.PendingSafeL2Head(), "safe", eq.ec.SafeL2Head()) ++ if err := AttributesMatchBlock(eq.cfg, attributes.Attributes, onto.Hash, envelope, eq.log); err != nil { ++ eq.log.Warn("L2 reorg: existing unsafe block does not match derived attributes from L1", ++ "err", err, "unsafe", envelope.ExecutionPayload.ID(), "pending_safe", onto) ++ + // geth cannot wind back a chain without reorging to a new, previously non-canonical, block +- return eq.forceNextSafeAttributes(ctx, attributes) ++ eq.emitter.Emit(engine.ProcessAttributesEvent{Attributes: attributes}) ++ return ++ } else { ++ ref, err := derive.PayloadToBlockRef(eq.cfg, envelope.ExecutionPayload) ++ if err != nil { ++ eq.log.Error("Failed to compute block-ref from execution payload") ++ return ++ } ++ eq.emitter.Emit(engine.PromotePendingSafeEvent{ ++ Ref: ref, ++ Safe: attributes.IsLastInSpan, ++ DerivedFrom: attributes.DerivedFrom, ++ }) + } +- ref, err := derive.PayloadToBlockRef(eq.cfg, envelope.ExecutionPayload) +- if err != nil { +- return derive.NewResetError(fmt.Errorf("failed to decode L2 block ref from payload: %w", err)) +- } +- eq.ec.SetPendingSafeL2Head(ref) +- if attributes.IsLastInSpan { +- eq.ec.SetSafeHead(ref) +- } ++ + // unsafe head stays the same, we did not reorg the chain. +- return nil +-} +- +-// forceNextSafeAttributes inserts the provided attributes, reorging away any conflicting unsafe chain. +-func (eq *AttributesHandler) forceNextSafeAttributes(ctx context.Context, attributes *derive.AttributesWithParent) error { +- attrs := attributes.Attributes +- errType, err := eq.ec.StartPayload(ctx, eq.ec.PendingSafeL2Head(), attributes, true) +- if err == nil { +- _, errType, err = eq.ec.ConfirmPayload(ctx, async.NoOpGossiper{}, &conductor.NoOpConductor{}) +- } +- if err != nil { +- switch errType { +- case engine.BlockInsertTemporaryErr: +- // RPC errors are recoverable, we can retry the buffered payload attributes later. +- return derive.NewTemporaryError(fmt.Errorf("temporarily cannot insert new safe block: %w", err)) +- case engine.BlockInsertPrestateErr: +- _ = eq.ec.CancelPayload(ctx, true) +- return derive.NewResetError(fmt.Errorf("need reset to resolve pre-state problem: %w", err)) +- case engine.BlockInsertPayloadErr: +- _ = eq.ec.CancelPayload(ctx, true) +- eq.log.Warn("could not process payload derived from L1 data, dropping batch", "err", err) +- // Count the number of deposits to see if the tx list is deposit only. +- depositCount := 0 +- for _, tx := range attrs.Transactions { +- if len(tx) > 0 && tx[0] == types.DepositTxType { +- depositCount += 1 +- } +- } +- // Deposit transaction execution errors are suppressed in the execution engine, but if the +- // block is somehow invalid, there is nothing we can do to recover & we should exit. +- if len(attrs.Transactions) == depositCount { +- eq.log.Error("deposit only block was invalid", "parent", attributes.Parent, "err", err) +- return derive.NewCriticalError(fmt.Errorf("failed to process block with only deposit transactions: %w", err)) +- } +- // Revert the pending safe head to the safe head. +- eq.ec.SetPendingSafeL2Head(eq.ec.SafeL2Head()) +- // suppress the error b/c we want to retry with the next batch from the batch queue +- // If there is no valid batch the node will eventually force a deposit only block. If +- // the deposit only block fails, this will return the critical error above. +- +- // Try to restore to previous known unsafe chain. +- eq.ec.SetBackupUnsafeL2Head(eq.ec.BackupUnsafeL2Head(), true) +- +- // drop the payload (by returning no error) without inserting it into the engine +- return nil +- default: +- return derive.NewCriticalError(fmt.Errorf("unknown InsertHeadBlock error type %d: %w", errType, err)) +- } +- } +- return nil + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+165
+
-192
+ +
+ +
+
+
diff --git OP/op-node/rollup/attributes/attributes_test.go CELO/op-node/rollup/attributes/attributes_test.go +index 62931af8c3786f351a55bfbf338fac3b9251c674..1833604ff31755ac9df06190a88c28270a50fa97 100644 +--- OP/op-node/rollup/attributes/attributes_test.go ++++ CELO/op-node/rollup/attributes/attributes_test.go +@@ -2,7 +2,6 @@ package attributes +  + import ( + "context" +- "io" + "math/big" + "math/rand" // nosemgrep + "testing" +@@ -14,11 +13,9 @@ "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +  +- "github.com/ethereum-optimism/optimism/op-node/metrics" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" +- "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" +@@ -27,6 +24,13 @@ + func TestAttributesHandler(t *testing.T) { + rng := rand.New(rand.NewSource(1234)) + refA := testutils.RandomBlockRef(rng) ++ ++ refB := eth.L1BlockRef{ ++ Hash: testutils.RandomHash(rng), ++ Number: refA.Number + 1, ++ ParentHash: refA.Hash, ++ Time: refA.Time + 12, ++ } +  + aL1Info := &testutils.MockBlockInfo{ + InfoParentHash: refA.ParentHash, +@@ -153,161 +157,149 @@ + refA1Alt, err := derive.PayloadToBlockRef(cfg, payloadA1Alt.ExecutionPayload) + require.NoError(t, err) +  +- refA2 := eth.L2BlockRef{ +- Hash: testutils.RandomHash(rng), +- Number: refA1.Number + 1, +- ParentHash: refA1.Hash, +- Time: refA1.Time + cfg.BlockTime, +- L1Origin: refA.ID(), +- SequenceNumber: 1, +- } ++ t.Run("drop invalid attributes", func(t *testing.T) { ++ logger := testlog.Logger(t, log.LevelInfo) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  +- a2L1Info, err := derive.L1InfoDepositBytes(cfg, cfg.Genesis.SystemConfig, refA2.SequenceNumber, aL1Info, refA2.Time) +- require.NoError(t, err) +- attrA2 := &derive.AttributesWithParent{ +- Attributes: &eth.PayloadAttributes{ +- Timestamp: eth.Uint64Quantity(refA2.Time), +- PrevRandao: eth.Bytes32{}, +- SuggestedFeeRecipient: common.Address{}, +- Withdrawals: nil, +- ParentBeaconBlockRoot: &common.Hash{}, +- Transactions: []eth.Data{a2L1Info}, +- NoTxPool: false, +- GasLimit: &gasLimit, +- }, +- Parent: refA1, +- IsLastInSpan: true, +- } ++ emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(derive.DerivedAttributesEvent{ ++ Attributes: attrA1, ++ }) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "queue the invalid attributes") +  ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(engine.InvalidPayloadAttributesEvent{ ++ Attributes: attrA1, ++ }) ++ emitter.AssertExpectations(t) ++ require.Nil(t, ah.attributes, "drop the invalid attributes") ++ }) + t.Run("drop stale attributes", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) +- defer eng.AssertExpectations(t) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  +- ec.SetPendingSafeL2Head(refA1Alt) +- ah.SetAttributes(attrA1) +- require.True(t, ah.HasAttributes()) +- require.NoError(t, ah.Proceed(context.Background()), "drop stale attributes") +- require.False(t, ah.HasAttributes()) ++ emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(derive.DerivedAttributesEvent{ ++ Attributes: attrA1, ++ }) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes) ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA1Alt, ++ Unsafe: refA1Alt, ++ }) ++ l2.AssertExpectations(t) ++ emitter.AssertExpectations(t) ++ require.Nil(t, ah.attributes, "drop stale attributes") + }) +  + t.Run("pending gets reorged", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) +- defer eng.AssertExpectations(t) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) ++ ++ emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(derive.DerivedAttributesEvent{ ++ Attributes: attrA1, ++ }) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes) +  +- ec.SetPendingSafeL2Head(refA0Alt) +- ah.SetAttributes(attrA1) +- require.True(t, ah.HasAttributes()) +- require.ErrorIs(t, ah.Proceed(context.Background()), derive.ErrReset, "A1 does not fit on A0Alt") +- require.True(t, ah.HasAttributes(), "detected reorg does not clear state, reset is required") ++ emitter.ExpectOnceType("ResetEvent") ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA0Alt, ++ Unsafe: refA0Alt, ++ }) ++ l2.AssertExpectations(t) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "detected reorg does not clear state, reset is required") + }) +  + t.Run("pending older than unsafe", func(t *testing.T) { + t.Run("consolidation fails", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  +- ec.SetUnsafeHead(refA1) +- ec.SetSafeHead(refA0) +- ec.SetFinalizedHead(refA0) +- ec.SetPendingSafeL2Head(refA0) +- +- defer eng.AssertExpectations(t) ++ // attrA1Alt does not match block A1, so will cause force-reorg. ++ emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(derive.DerivedAttributesEvent{Attributes: attrA1Alt}) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "queued up derived attributes") +  + // Call during consolidation. + // The payloadA1 is going to get reorged out in favor of attrA1Alt (turns into payloadA1Alt) +- eng.ExpectPayloadByNumber(refA1.Number, payloadA1, nil) ++ l2.ExpectPayloadByNumber(refA1.Number, payloadA1, nil) ++ // fail consolidation, perform force reorg ++ emitter.ExpectOnce(engine.ProcessAttributesEvent{Attributes: attrA1Alt}) ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA0, ++ Unsafe: refA1, ++ }) ++ l2.AssertExpectations(t) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "still have attributes, processing still unconfirmed") +  +- // attrA1Alt does not match block A1, so will cause force-reorg. +- { +- eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{ +- HeadBlockHash: payloadA1Alt.ExecutionPayload.ParentHash, // reorg +- SafeBlockHash: refA0.Hash, +- FinalizedBlockHash: refA0.Hash, +- }, attrA1Alt.Attributes, &eth.ForkchoiceUpdatedResult{ +- PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}, +- PayloadID: &eth.PayloadID{1, 2, 3}, +- }, nil) // to build the block +- eng.ExpectGetPayload(eth.PayloadID{1, 2, 3}, payloadA1Alt, nil) +- eng.ExpectNewPayload(payloadA1Alt.ExecutionPayload, payloadA1Alt.ParentBeaconBlockRoot, +- &eth.PayloadStatusV1{Status: eth.ExecutionValid}, nil) // to persist the block +- eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{ +- HeadBlockHash: payloadA1Alt.ExecutionPayload.BlockHash, +- SafeBlockHash: payloadA1Alt.ExecutionPayload.BlockHash, +- FinalizedBlockHash: refA0.Hash, +- }, nil, &eth.ForkchoiceUpdatedResult{ +- PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}, +- PayloadID: nil, +- }, nil) // to make it canonical +- } +- +- ah.SetAttributes(attrA1Alt) +- +- require.True(t, ah.HasAttributes()) +- require.NoError(t, ah.Proceed(context.Background()), "fail consolidation, perform force reorg") +- require.False(t, ah.HasAttributes()) +- +- require.Equal(t, refA1Alt.Hash, payloadA1Alt.ExecutionPayload.BlockHash, "hash") +- t.Log("ref A1: ", refA1.Hash) +- t.Log("ref A0: ", refA0.Hash) +- t.Log("ref alt: ", refA1Alt.Hash) +- require.Equal(t, refA1Alt, ec.UnsafeL2Head(), "unsafe head reorg complete") +- require.Equal(t, refA1Alt, ec.SafeL2Head(), "safe head reorg complete and updated") ++ // recognize reorg as complete ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA1Alt, ++ Unsafe: refA1Alt, ++ }) ++ emitter.AssertExpectations(t) ++ require.Nil(t, ah.attributes, "drop when attributes are successful") + }) + t.Run("consolidation passes", func(t *testing.T) { + fn := func(t *testing.T, lastInSpan bool) { + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) +- +- ec.SetUnsafeHead(refA1) +- ec.SetSafeHead(refA0) +- ec.SetFinalizedHead(refA0) +- ec.SetPendingSafeL2Head(refA0) +- +- defer eng.AssertExpectations(t) +- +- // Call during consolidation. +- eng.ExpectPayloadByNumber(refA1.Number, payloadA1, nil) +- +- expectedSafeHash := refA0.Hash +- if lastInSpan { // if last in span, then it becomes safe +- expectedSafeHash = refA1.Hash +- } +- eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{ +- HeadBlockHash: refA1.Hash, +- SafeBlockHash: expectedSafeHash, +- FinalizedBlockHash: refA0.Hash, +- }, nil, &eth.ForkchoiceUpdatedResult{ +- PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}, +- PayloadID: nil, +- }, nil) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  + attr := &derive.AttributesWithParent{ + Attributes: attrA1.Attributes, // attributes will match, passing consolidation + Parent: attrA1.Parent, + IsLastInSpan: lastInSpan, ++ DerivedFrom: refB, + } +- ah.SetAttributes(attr) ++ emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(derive.DerivedAttributesEvent{Attributes: attr}) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "queued up derived attributes") ++ ++ // Call during consolidation. ++ l2.ExpectPayloadByNumber(refA1.Number, payloadA1, nil) ++ ++ emitter.ExpectOnce(engine.PromotePendingSafeEvent{ ++ Ref: refA1, ++ Safe: lastInSpan, // last in span becomes safe instantaneously ++ DerivedFrom: refB, ++ }) ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA0, ++ Unsafe: refA1, ++ }) ++ l2.AssertExpectations(t) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "still have attributes, processing still unconfirmed") +  +- require.True(t, ah.HasAttributes()) +- require.NoError(t, ah.Proceed(context.Background()), "consolidate") +- require.False(t, ah.HasAttributes()) +- require.NoError(t, ec.TryUpdateEngine(context.Background()), "update to handle safe bump (lastinspan case)") +- if lastInSpan { +- require.Equal(t, refA1, ec.SafeL2Head(), "last in span becomes safe instantaneously") +- } else { +- require.Equal(t, refA1, ec.PendingSafeL2Head(), "pending as safe") +- require.Equal(t, refA0, ec.SafeL2Head(), "A1 not yet safe") +- } ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA1, ++ Unsafe: refA1, ++ }) ++ emitter.AssertExpectations(t) ++ require.Nil(t, ah.attributes, "drop when attributes are successful") + } + t.Run("is last span", func(t *testing.T) { + fn(t, true) +@@ -321,89 +313,70 @@ }) +  + t.Run("pending equals unsafe", func(t *testing.T) { + // no consolidation to do, just force next attributes on tip of chain +- + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) +- +- ec.SetUnsafeHead(refA0) +- ec.SetSafeHead(refA0) +- ec.SetFinalizedHead(refA0) +- ec.SetPendingSafeL2Head(refA0) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  +- defer eng.AssertExpectations(t) ++ emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) ++ emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) ++ ah.OnEvent(derive.DerivedAttributesEvent{Attributes: attrA1Alt}) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes, "queued up derived attributes") +  + // sanity check test setup + require.True(t, attrA1Alt.IsLastInSpan, "must be last in span for attributes to become safe") +  +- // process attrA1Alt on top +- { +- eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{ +- HeadBlockHash: payloadA1Alt.ExecutionPayload.ParentHash, // reorg +- SafeBlockHash: refA0.Hash, +- FinalizedBlockHash: refA0.Hash, +- }, attrA1Alt.Attributes, &eth.ForkchoiceUpdatedResult{ +- PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}, +- PayloadID: &eth.PayloadID{1, 2, 3}, +- }, nil) // to build the block +- eng.ExpectGetPayload(eth.PayloadID{1, 2, 3}, payloadA1Alt, nil) +- eng.ExpectNewPayload(payloadA1Alt.ExecutionPayload, payloadA1Alt.ParentBeaconBlockRoot, +- &eth.PayloadStatusV1{Status: eth.ExecutionValid}, nil) // to persist the block +- eng.ExpectForkchoiceUpdate(&eth.ForkchoiceState{ +- HeadBlockHash: payloadA1Alt.ExecutionPayload.BlockHash, +- SafeBlockHash: payloadA1Alt.ExecutionPayload.BlockHash, // it becomes safe +- FinalizedBlockHash: refA0.Hash, +- }, nil, &eth.ForkchoiceUpdatedResult{ +- PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}, +- PayloadID: nil, +- }, nil) // to make it canonical +- } ++ // attrA1Alt will fit right on top of A0 ++ emitter.ExpectOnce(engine.ProcessAttributesEvent{Attributes: attrA1Alt}) ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA0, ++ Unsafe: refA0, ++ }) ++ l2.AssertExpectations(t) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, ah.attributes) +  +- ah.SetAttributes(attrA1Alt) +- +- require.True(t, ah.HasAttributes()) +- require.NoError(t, ah.Proceed(context.Background()), "insert new block") +- require.False(t, ah.HasAttributes()) +- +- require.Equal(t, refA1Alt, ec.SafeL2Head(), "processing complete") ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA1Alt, ++ Unsafe: refA1Alt, ++ }) ++ emitter.AssertExpectations(t) ++ require.Nil(t, ah.attributes, "clear attributes after successful processing") + }) +  + t.Run("pending ahead of unsafe", func(t *testing.T) { + // Legacy test case: if attributes fit on top of the pending safe block as expected, +- // but if the unsafe block is older, then we can recover by updating the unsafe head. +- ++ // but if the unsafe block is older, then we can recover by resetting. + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) +- +- ec.SetUnsafeHead(refA0) +- ec.SetSafeHead(refA0) +- ec.SetFinalizedHead(refA0) +- ec.SetPendingSafeL2Head(refA1) +- +- defer eng.AssertExpectations(t) +- +- ah.SetAttributes(attrA2) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  +- require.True(t, ah.HasAttributes()) +- require.NoError(t, ah.Proceed(context.Background()), "detect unsafe - pending safe inconsistency") +- require.True(t, ah.HasAttributes(), "still need the attributes, after unsafe head is corrected") +- +- require.Equal(t, refA0, ec.SafeL2Head(), "still same safe head") +- require.Equal(t, refA1, ec.PendingSafeL2Head(), "still same pending safe head") +- require.Equal(t, refA1, ec.UnsafeL2Head(), "updated unsafe head") ++ emitter.ExpectOnceType("ResetEvent") ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA1, ++ Unsafe: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ l2.AssertExpectations(t) + }) +  + t.Run("no attributes", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) +- eng := &testutils.MockEngine{} +- ec := engine.NewEngineController(eng, logger, metrics.NoopMetrics, cfg, sync.CLSync) +- ah := NewAttributesHandler(logger, cfg, ec, eng) +- defer eng.AssertExpectations(t) ++ l2 := &testutils.MockL2Client{} ++ emitter := &testutils.MockEmitter{} ++ ah := NewAttributesHandler(logger, cfg, context.Background(), l2, emitter) +  +- require.Equal(t, ah.Proceed(context.Background()), io.EOF, "no attributes to process") ++ // If there are no attributes, we expect the pipeline to be requested to generate attributes. ++ emitter.ExpectOnce(derive.PipelineStepEvent{PendingSafe: refA1}) ++ ah.OnEvent(engine.PendingSafeUpdateEvent{ ++ PendingSafe: refA1, ++ Unsafe: refA1, ++ }) ++ // no calls to L2 or emitter when there is nothing to process ++ l2.AssertExpectations(t) ++ emitter.AssertExpectations(t) + }) +  + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+111
+
-57
+ +
+ +
+
+
diff --git OP/op-node/rollup/clsync/clsync.go CELO/op-node/rollup/clsync/clsync.go +index 989f1c7c98b6023fd09ec254ba750b7a91e7948a..faa4586105e30b924e046e7a1295bad96d534d15 100644 +--- OP/op-node/rollup/clsync/clsync.go ++++ CELO/op-node/rollup/clsync/clsync.go +@@ -1,9 +1,7 @@ + package clsync +  + import ( +- "context" +- "errors" +- "io" ++ "sync" +  + "github.com/ethereum/go-ethereum/log" +  +@@ -20,27 +18,26 @@ type Metrics interface { + RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) + } +  +-type Engine interface { +- engine.EngineState +- InsertUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope, ref eth.L2BlockRef) error +-} +- + // CLSync holds on to a queue of received unsafe payloads, + // and tries to apply them to the tip of the chain when requested to. + type CLSync struct { +- log log.Logger +- cfg *rollup.Config +- metrics Metrics +- ec Engine ++ log log.Logger ++ cfg *rollup.Config ++ metrics Metrics ++ ++ emitter rollup.EventEmitter ++ ++ mu sync.Mutex ++ + unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates + } +  +-func NewCLSync(log log.Logger, cfg *rollup.Config, metrics Metrics, ec Engine) *CLSync { ++func NewCLSync(log log.Logger, cfg *rollup.Config, metrics Metrics, emitter rollup.EventEmitter) *CLSync { + return &CLSync{ + log: log, + cfg: cfg, + metrics: metrics, +- ec: ec, ++ emitter: emitter, + unsafePayloads: NewPayloadsQueue(log, maxUnsafePayloadsMemory, payloadMemSize), + } + } +@@ -58,67 +55,124 @@ } + return ref + } +  +-// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1. +-func (eq *CLSync) AddUnsafePayload(envelope *eth.ExecutionPayloadEnvelope) { +- if envelope == nil { +- eq.log.Warn("cannot add nil unsafe payload") +- return ++type ReceivedUnsafePayloadEvent struct { ++ Envelope *eth.ExecutionPayloadEnvelope ++} ++ ++func (ev ReceivedUnsafePayloadEvent) String() string { ++ return "received-unsafe-payload" ++} ++ ++func (eq *CLSync) OnEvent(ev rollup.Event) { ++ // Events may be concurrent in the future. Prevent unsafe concurrent modifications to the payloads queue. ++ eq.mu.Lock() ++ defer eq.mu.Unlock() ++ ++ switch x := ev.(type) { ++ case engine.InvalidPayloadEvent: ++ eq.onInvalidPayload(x) ++ case engine.ForkchoiceUpdateEvent: ++ eq.onForkchoiceUpdate(x) ++ case ReceivedUnsafePayloadEvent: ++ eq.onUnsafePayload(x) + } ++} +  +- if err := eq.unsafePayloads.Push(envelope); err != nil { +- eq.log.Warn("Could not add unsafe payload", "id", envelope.ExecutionPayload.ID(), "timestamp", uint64(envelope.ExecutionPayload.Timestamp), "err", err) +- return ++// onInvalidPayload checks if the first next-up payload matches the invalid payload. ++// If so, the payload is dropped, to give the next payloads a try. ++func (eq *CLSync) onInvalidPayload(x engine.InvalidPayloadEvent) { ++ eq.log.Debug("CL sync received invalid-payload report", x.Envelope.ExecutionPayload.ID()) ++ ++ block := x.Envelope.ExecutionPayload ++ if peek := eq.unsafePayloads.Peek(); peek != nil && ++ block.BlockHash == peek.ExecutionPayload.BlockHash { ++ eq.log.Warn("Dropping invalid unsafe payload", ++ "hash", block.BlockHash, "number", uint64(block.BlockNumber), ++ "timestamp", uint64(block.Timestamp)) ++ eq.unsafePayloads.Pop() + } +- p := eq.unsafePayloads.Peek() +- eq.metrics.RecordUnsafePayloadsBuffer(uint64(eq.unsafePayloads.Len()), eq.unsafePayloads.MemSize(), p.ExecutionPayload.ID()) +- eq.log.Trace("Next unsafe payload to process", "next", p.ExecutionPayload.ID(), "timestamp", uint64(p.ExecutionPayload.Timestamp)) + } +  +-// Proceed dequeues the next applicable unsafe payload, if any, to apply to the tip of the chain. +-// EOF error means we can't process the next unsafe payload. The caller should then try a different form of syncing. +-func (eq *CLSync) Proceed(ctx context.Context) error { ++// onForkchoiceUpdate peeks at the next applicable unsafe payload, if any, ++// to apply on top of the received forkchoice pre-state. ++// The payload is held on to until the forkchoice changes (success case) or the payload is reported to be invalid. ++func (eq *CLSync) onForkchoiceUpdate(x engine.ForkchoiceUpdateEvent) { ++ eq.log.Debug("CL sync received forkchoice update", ++ "unsafe", x.UnsafeL2Head, "safe", x.SafeL2Head, "finalized", x.FinalizedL2Head) ++ ++ for { ++ pop, abort := eq.fromQueue(x) ++ if abort { ++ return ++ } ++ if pop { ++ eq.unsafePayloads.Pop() ++ } else { ++ break ++ } ++ } ++ ++ firstEnvelope := eq.unsafePayloads.Peek() ++ ++ // We don't pop from the queue. If there is a temporary error then we can retry. ++ // Upon next forkchoice update or invalid-payload event we can remove it from the queue. ++ eq.emitter.Emit(engine.ProcessUnsafePayloadEvent{Envelope: firstEnvelope}) ++} ++ ++// fromQueue determines what to do with the tip of the payloads-queue, given the forkchoice pre-state. ++// If abort, there is nothing to process (either due to empty queue, or unsuitable tip). ++// If pop, the tip should be dropped, and processing can repeat from there. ++// If not abort or pop, the tip is ready to process. ++func (eq *CLSync) fromQueue(x engine.ForkchoiceUpdateEvent) (pop bool, abort bool) { + if eq.unsafePayloads.Len() == 0 { +- return io.EOF ++ return false, true + } + firstEnvelope := eq.unsafePayloads.Peek() + first := firstEnvelope.ExecutionPayload +  +- if uint64(first.BlockNumber) <= eq.ec.SafeL2Head().Number { +- eq.log.Info("skipping unsafe payload, since it is older than safe head", "safe", eq.ec.SafeL2Head().ID(), "unsafe", eq.ec.UnsafeL2Head().ID(), "unsafe_payload", first.ID()) +- eq.unsafePayloads.Pop() +- return nil ++ if first.BlockHash == x.UnsafeL2Head.Hash { ++ eq.log.Debug("successfully processed payload, removing it from the payloads queue now") ++ return true, false ++ } ++ ++ if uint64(first.BlockNumber) <= x.SafeL2Head.Number { ++ eq.log.Info("skipping unsafe payload, since it is older than safe head", "safe", x.SafeL2Head.ID(), "unsafe", x.UnsafeL2Head.ID(), "unsafe_payload", first.ID()) ++ return true, false + } +- if uint64(first.BlockNumber) <= eq.ec.UnsafeL2Head().Number { +- eq.log.Info("skipping unsafe payload, since it is older than unsafe head", "unsafe", eq.ec.UnsafeL2Head().ID(), "unsafe_payload", first.ID()) +- eq.unsafePayloads.Pop() +- return nil ++ if uint64(first.BlockNumber) <= x.UnsafeL2Head.Number { ++ eq.log.Info("skipping unsafe payload, since it is older than unsafe head", "unsafe", x.UnsafeL2Head.ID(), "unsafe_payload", first.ID()) ++ return true, false + } +  + // Ensure that the unsafe payload builds upon the current unsafe head +- if first.ParentHash != eq.ec.UnsafeL2Head().Hash { +- if uint64(first.BlockNumber) == eq.ec.UnsafeL2Head().Number+1 { +- eq.log.Info("skipping unsafe payload, since it does not build onto the existing unsafe chain", "safe", eq.ec.SafeL2Head().ID(), "unsafe", eq.ec.UnsafeL2Head().ID(), "unsafe_payload", first.ID()) +- eq.unsafePayloads.Pop() ++ if first.ParentHash != x.UnsafeL2Head.Hash { ++ if uint64(first.BlockNumber) == x.UnsafeL2Head.Number+1 { ++ eq.log.Info("skipping unsafe payload, since it does not build onto the existing unsafe chain", "safe", x.SafeL2Head.ID(), "unsafe", x.UnsafeL2Head.ID(), "unsafe_payload", first.ID()) ++ return true, false + } +- return io.EOF // time to go to next stage if we cannot process the first unsafe payload ++ return false, true // rollup-node should try something different if it cannot process the first unsafe payload + } +  +- ref, err := derive.PayloadToBlockRef(eq.cfg, first) +- if err != nil { +- eq.log.Error("failed to decode L2 block ref from payload", "err", err) +- eq.unsafePayloads.Pop() +- return nil ++ return false, false ++} ++ ++// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1. ++func (eq *CLSync) onUnsafePayload(x ReceivedUnsafePayloadEvent) { ++ eq.log.Debug("CL sync received payload", "payload", x.Envelope.ExecutionPayload.ID()) ++ envelope := x.Envelope ++ if envelope == nil { ++ eq.log.Warn("cannot add nil unsafe payload") ++ return + } +  +- if err := eq.ec.InsertUnsafePayload(ctx, firstEnvelope, ref); errors.Is(err, derive.ErrTemporary) { +- eq.log.Debug("Temporary error while inserting unsafe payload", "hash", ref.Hash, "number", ref.Number, "timestamp", ref.Time, "l1Origin", ref.L1Origin) +- return err +- } else if err != nil { +- eq.log.Warn("Dropping invalid unsafe payload", "hash", ref.Hash, "number", ref.Number, "timestamp", ref.Time, "l1Origin", ref.L1Origin) +- eq.unsafePayloads.Pop() +- return err ++ if err := eq.unsafePayloads.Push(envelope); err != nil { ++ eq.log.Warn("Could not add unsafe payload", "id", envelope.ExecutionPayload.ID(), "timestamp", uint64(envelope.ExecutionPayload.Timestamp), "err", err) ++ return + } +- eq.unsafePayloads.Pop() +- eq.log.Trace("Executed unsafe payload", "hash", ref.Hash, "number", ref.Number, "timestamp", ref.Time, "l1Origin", ref.L1Origin) +- return nil ++ p := eq.unsafePayloads.Peek() ++ eq.metrics.RecordUnsafePayloadsBuffer(uint64(eq.unsafePayloads.Len()), eq.unsafePayloads.MemSize(), p.ExecutionPayload.ID()) ++ eq.log.Trace("Next unsafe payload to process", "next", p.ExecutionPayload.ID(), "timestamp", uint64(p.ExecutionPayload.Timestamp)) ++ ++ // request forkchoice signal, so we can process the payload maybe ++ eq.emitter.Emit(engine.ForkchoiceRequestEvent{}) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+183
+
-118
+ +
+ +
+
+
diff --git OP/op-node/rollup/clsync/clsync_test.go CELO/op-node/rollup/clsync/clsync_test.go +index 67bcc25f82eaa8416d6fc45a9a4ae09634dffe12..f42c67f9220e58556d13b6e924982191ed11d592 100644 +--- OP/op-node/rollup/clsync/clsync_test.go ++++ CELO/op-node/rollup/clsync/clsync_test.go +@@ -1,9 +1,6 @@ + package clsync +  + import ( +- "context" +- "errors" +- "io" + "math/big" + "math/rand" // nosemgrep + "testing" +@@ -17,38 +14,11 @@ "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" + ) +- +-type fakeEngine struct { +- unsafe, safe, finalized eth.L2BlockRef +- +- err error +-} +- +-func (f *fakeEngine) Finalized() eth.L2BlockRef { +- return f.finalized +-} +- +-func (f *fakeEngine) UnsafeL2Head() eth.L2BlockRef { +- return f.unsafe +-} +- +-func (f *fakeEngine) SafeL2Head() eth.L2BlockRef { +- return f.safe +-} +- +-func (f *fakeEngine) InsertUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope, ref eth.L2BlockRef) error { +- if f.err != nil { +- return f.err +- } +- f.unsafe = ref +- return nil +-} +- +-var _ Engine = (*fakeEngine)(nil) +  + func TestCLSync(t *testing.T) { + rng := rand.New(rand.NewSource(1234)) +@@ -155,157 +125,252 @@ + // When a previously received unsafe block is older than the tip of the chain, we want to drop it. + t.Run("drop old", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: refA2, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) +  +- cl.AddUnsafePayload(payloadA1) +- require.NoError(t, cl.Proceed(context.Background())) ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) ++ ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) ++ ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA2, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) // no new events expected to be emitted +  + require.Nil(t, cl.unsafePayloads.Peek(), "pop because too old") +- require.Equal(t, refA2, eng.unsafe, "keep unsafe head") + }) +  + // When we already have the exact payload as tip, then no need to process it + t.Run("drop equal", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: refA1, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) +  +- cl.AddUnsafePayload(payloadA1) +- require.NoError(t, cl.Proceed(context.Background())) ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) ++ ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) ++ ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) // no new events expected to be emitted +  + require.Nil(t, cl.unsafePayloads.Peek(), "pop because seen") +- require.Equal(t, refA1, eng.unsafe, "keep unsafe head") + }) +  + // When we have a different payload, at the same height, then we want to keep it. + // The unsafe chain consensus preserves the first-seen payload. + t.Run("ignore conflict", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: altRefA1, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) ++ ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) +  +- cl.AddUnsafePayload(payloadA1) +- require.NoError(t, cl.Proceed(context.Background())) ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) ++ ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: altRefA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) // no new events expected to be emitted +  + require.Nil(t, cl.unsafePayloads.Peek(), "pop because alternative") +- require.Equal(t, altRefA1, eng.unsafe, "keep unsafe head") + }) +  + t.Run("ignore unsafe reorg", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: altRefA1, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) ++ ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) ++ ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA2}) ++ emitter.AssertExpectations(t) +  +- cl.AddUnsafePayload(payloadA2) +- require.ErrorIs(t, cl.Proceed(context.Background()), io.EOF, "payload2 does not fit onto alt1, thus retrieve next input from L1") ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: altRefA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) // no new events expected, since A2 does not fit onto altA1 +  + require.Nil(t, cl.unsafePayloads.Peek(), "pop because not applicable") +- require.Equal(t, altRefA1, eng.unsafe, "keep unsafe head") + }) +  + t.Run("success", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: refA0, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) ++ ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) ++ emitter.AssertExpectations(t) // nothing to process yet +  +- require.ErrorIs(t, cl.Proceed(context.Background()), io.EOF, "nothing to process yet") + require.Nil(t, cl.unsafePayloads.Peek(), "no payloads yet") +  +- cl.AddUnsafePayload(payloadA1) ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) ++ + lowest := cl.LowestQueuedUnsafeBlock() + require.Equal(t, refA1, lowest, "expecting A1 next") +- require.NoError(t, cl.Proceed(context.Background())) ++ ++ // payload A1 should be possible to process on top of A0 ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA0, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ ++ // now pretend the payload was processed: we can drop A1 now ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) + require.Nil(t, cl.unsafePayloads.Peek(), "pop because applied") +- require.Equal(t, refA1, eng.unsafe, "new unsafe head") +  +- cl.AddUnsafePayload(payloadA2) ++ // repeat for A2 ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA2}) ++ emitter.AssertExpectations(t) ++ + lowest = cl.LowestQueuedUnsafeBlock() + require.Equal(t, refA2, lowest, "expecting A2 next") +- require.NoError(t, cl.Proceed(context.Background())) ++ ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA2}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ ++ // now pretend the payload was processed: we can drop A2 now ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA2, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) + require.Nil(t, cl.unsafePayloads.Peek(), "pop because applied") +- require.Equal(t, refA2, eng.unsafe, "new unsafe head") + }) +  + t.Run("double buffer", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: refA0, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) ++ ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) +  +- cl.AddUnsafePayload(payloadA1) +- cl.AddUnsafePayload(payloadA2) ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA2}) ++ emitter.AssertExpectations(t) +  + lowest := cl.LowestQueuedUnsafeBlock() + require.Equal(t, refA1, lowest, "expecting A1 next") +  +- require.NoError(t, cl.Proceed(context.Background())) +- require.NotNil(t, cl.unsafePayloads.Peek(), "next is ready") +- require.Equal(t, refA1, eng.unsafe, "new unsafe head") +- require.NoError(t, cl.Proceed(context.Background())) ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA0, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ require.Equal(t, 2, cl.unsafePayloads.Len(), "still holding on to A1, and queued A2") ++ ++ // Now pretend the payload was processed: we can drop A1 now. ++ // The CL-sync will try to immediately continue with A2. ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA2}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ ++ // now pretend the payload was processed: we can drop A2 now ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA2, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) + require.Nil(t, cl.unsafePayloads.Peek(), "done") +- require.Equal(t, refA2, eng.unsafe, "new unsafe head") + }) +  + t.Run("temporary error", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: refA0, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) ++ ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) ++ ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) +  +- testErr := derive.NewTemporaryError(errors.New("test error")) +- eng.err = testErr +- cl.AddUnsafePayload(payloadA1) +- require.ErrorIs(t, cl.Proceed(context.Background()), testErr) +- require.Equal(t, refA0, eng.unsafe, "old unsafe head after error") ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA0, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ ++ // On temporary errors we don't need any feedback from the engine. ++ // We just hold on to what payloads there are in the queue. + require.NotNil(t, cl.unsafePayloads.Peek(), "no pop because temporary error") +  +- eng.err = nil +- require.NoError(t, cl.Proceed(context.Background())) +- require.Equal(t, refA1, eng.unsafe, "new unsafe head after resolved error") ++ // Pretend we are still stuck on the same forkchoice. The CL-sync will retry sneding the payload. ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA0, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) ++ require.NotNil(t, cl.unsafePayloads.Peek(), "no pop because retry still unconfirmed") ++ ++ // Now confirm we got the payload this time ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA1, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) + require.Nil(t, cl.unsafePayloads.Peek(), "pop because valid") + }) +  + t.Run("invalid payload error", func(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) +- eng := &fakeEngine{ +- unsafe: refA0, +- safe: refA0, +- finalized: refA0, +- } +- cl := NewCLSync(logger, cfg, metrics, eng) ++ emitter := &testutils.MockEmitter{} ++ cl := NewCLSync(logger, cfg, metrics, emitter) ++ ++ // CLSync gets payload and requests engine state, to later determine if payload should be forwarded ++ emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) ++ cl.OnEvent(ReceivedUnsafePayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) ++ ++ // Engine signals, CLSync sends the payload ++ emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) ++ cl.OnEvent(engine.ForkchoiceUpdateEvent{ ++ UnsafeL2Head: refA0, ++ SafeL2Head: refA0, ++ FinalizedL2Head: refA0, ++ }) ++ emitter.AssertExpectations(t) +  +- testErr := errors.New("test error") +- eng.err = testErr +- cl.AddUnsafePayload(payloadA1) +- require.ErrorIs(t, cl.Proceed(context.Background()), testErr) +- require.Equal(t, refA0, eng.unsafe, "old unsafe head after error") ++ // Pretend the payload is bad. It should not be retried after this. ++ cl.OnEvent(engine.InvalidPayloadEvent{Envelope: payloadA1}) ++ emitter.AssertExpectations(t) + require.Nil(t, cl.unsafePayloads.Peek(), "pop because invalid") + }) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+12
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/conductor/conductor.go CELO/op-node/rollup/conductor/conductor.go +index 912b08cf071e2c954b686cc31e5ffbf52287fa8a..927d88035ccb146d571f100242ece76da41f402b 100644 +--- OP/op-node/rollup/conductor/conductor.go ++++ CELO/op-node/rollup/conductor/conductor.go +@@ -9,14 +9,21 @@ + // SequencerConductor is an interface for the driver to communicate with the sequencer conductor. + // It is used to determine if the current node is the active sequencer, and to commit unsafe payloads to the conductor log. + type SequencerConductor interface { ++ // Leader returns true if this node is the leader sequencer. + Leader(ctx context.Context) (bool, error) ++ // CommitUnsafePayload commits an unsafe payload to the conductor FSM. + CommitUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error ++ // OverrideLeader forces current node to be considered leader and be able to start sequencing during disaster situations in HA mode. ++ OverrideLeader(ctx context.Context) error ++ // Close closes the conductor client. + Close() + } +  + // NoOpConductor is a no-op conductor that assumes this node is the leader sequencer. + type NoOpConductor struct{} +  ++var _ SequencerConductor = &NoOpConductor{} ++ + // Leader returns true if this node is the leader sequencer. NoOpConductor always returns true. + func (c *NoOpConductor) Leader(ctx context.Context) (bool, error) { + return true, nil +@@ -24,6 +31,11 @@ } +  + // CommitUnsafePayload commits an unsafe payload to the conductor log. + func (c *NoOpConductor) CommitUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { ++ return nil ++} ++ ++// OverrideLeader implements SequencerConductor. ++func (c *NoOpConductor) OverrideLeader(ctx context.Context) error { + return nil + } +
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-3
+ +
+ +
+
+
diff --git OP/op-node/rollup/derive/channel_test.go CELO/op-node/rollup/derive/channel_test.go +index e853d622aa04ddf5f080a5867f2a81115e6ad05e..a6594492837b0e8d426d82c338bce1a848b9ebcb 100644 +--- OP/op-node/rollup/derive/channel_test.go ++++ CELO/op-node/rollup/derive/channel_test.go +@@ -7,9 +7,9 @@ "math/big" + "math/rand" + "testing" +  +- "github.com/DataDog/zstd" + "github.com/andybalholm/brotli" + "github.com/ethereum-optimism/optimism/op-service/eth" ++ "github.com/klauspost/compress/zstd" + "github.com/stretchr/testify/require" + ) +  +@@ -143,8 +143,9 @@ } + case ca == Zstd: // invalid algo + return func(buf *bytes.Buffer, t *testing.T) { + buf.WriteByte(0x02) // invalid channel version byte +- writer := zstd.NewWriter(buf) +- _, err := writer.Write(encodedBatch.Bytes()) ++ writer, err := zstd.NewWriter(buf) ++ require.NoError(t, err) ++ _, err = writer.Write(encodedBatch.Bytes()) + require.NoError(t, err) + require.NoError(t, writer.Close()) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/op-node/rollup/derive/data_source.go CELO/op-node/rollup/derive/data_source.go +index 5f2e48199bf3704b229c4fb0549e92b521c892d3..39b62de2e54f0888ca30c6341d3c5aa49d2d0463 100644 +--- OP/op-node/rollup/derive/data_source.go ++++ CELO/op-node/rollup/derive/data_source.go +@@ -28,7 +28,7 @@ } +  + type PlasmaInputFetcher interface { + // GetInput fetches the input for the given commitment at the given block number from the DA storage service. +- GetInput(ctx context.Context, l1 plasma.L1Fetcher, c plasma.CommitmentData, blockId eth.BlockID) (eth.Data, error) ++ GetInput(ctx context.Context, l1 plasma.L1Fetcher, c plasma.CommitmentData, blockId eth.L1BlockRef) (eth.Data, error) + // AdvanceL1Origin advances the L1 origin to the given block number, syncing the DA challenge events. + AdvanceL1Origin(ctx context.Context, l1 plasma.L1Fetcher, blockId eth.BlockID) error + // Reset the challenge origin in case of L1 reorg +@@ -78,7 +78,7 @@ src = NewCalldataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ref, batcherAddr) + } + if ds.dsCfg.plasmaEnabled { + // plasma([calldata | blobdata](l1Ref)) -> data +- return NewPlasmaDataSource(ds.log, src, ds.fetcher, ds.plasmaFetcher, ref.ID()), nil ++ return NewPlasmaDataSource(ds.log, src, ds.fetcher, ds.plasmaFetcher, ref), nil + } + return src, nil + }
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+118
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/derive/deriver.go CELO/op-node/rollup/derive/deriver.go +new file mode 100644 +index 0000000000000000000000000000000000000000..da3a71577725c19954617ac3074e1e77cd0f1331 +--- /dev/null ++++ CELO/op-node/rollup/derive/deriver.go +@@ -0,0 +1,118 @@ ++package derive ++ ++import ( ++ "context" ++ "errors" ++ "io" ++ ++ "github.com/ethereum-optimism/optimism/op-node/rollup" ++ "github.com/ethereum-optimism/optimism/op-service/eth" ++) ++ ++type DeriverIdleEvent struct { ++ Origin eth.L1BlockRef ++} ++ ++func (d DeriverIdleEvent) String() string { ++ return "derivation-idle" ++} ++ ++type DeriverMoreEvent struct{} ++ ++func (d DeriverMoreEvent) String() string { ++ return "deriver-more" ++} ++ ++// ConfirmReceivedAttributesEvent signals that the derivation pipeline may generate new attributes. ++// After emitting DerivedAttributesEvent, no new attributes will be generated until a confirmation of reception. ++type ConfirmReceivedAttributesEvent struct{} ++ ++func (d ConfirmReceivedAttributesEvent) String() string { ++ return "confirm-received-attributes" ++} ++ ++type ConfirmPipelineResetEvent struct{} ++ ++func (d ConfirmPipelineResetEvent) String() string { ++ return "confirm-pipeline-reset" ++} ++ ++// DerivedAttributesEvent is emitted when new attributes are available to apply to the engine. ++type DerivedAttributesEvent struct { ++ Attributes *AttributesWithParent ++} ++ ++func (ev DerivedAttributesEvent) String() string { ++ return "derived-attributes" ++} ++ ++type PipelineStepEvent struct { ++ PendingSafe eth.L2BlockRef ++} ++ ++func (ev PipelineStepEvent) String() string { ++ return "pipeline-step" ++} ++ ++type PipelineDeriver struct { ++ pipeline *DerivationPipeline ++ ++ ctx context.Context ++ ++ emitter rollup.EventEmitter ++ ++ needAttributesConfirmation bool ++} ++ ++func NewPipelineDeriver(ctx context.Context, pipeline *DerivationPipeline, emitter rollup.EventEmitter) *PipelineDeriver { ++ return &PipelineDeriver{ ++ pipeline: pipeline, ++ ctx: ctx, ++ emitter: emitter, ++ } ++} ++ ++func (d *PipelineDeriver) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case rollup.ResetEvent: ++ d.pipeline.Reset() ++ case PipelineStepEvent: ++ // Don't generate attributes if there are already attributes in-flight ++ if d.needAttributesConfirmation { ++ d.pipeline.log.Debug("Previously sent attributes are unconfirmed to be received") ++ return ++ } ++ d.pipeline.log.Trace("Derivation pipeline step", "onto_origin", d.pipeline.Origin()) ++ attrib, err := d.pipeline.Step(d.ctx, x.PendingSafe) ++ if err == io.EOF { ++ d.pipeline.log.Debug("Derivation process went idle", "progress", d.pipeline.Origin(), "err", err) ++ d.emitter.Emit(DeriverIdleEvent{Origin: d.pipeline.Origin()}) ++ } else if err != nil && errors.Is(err, EngineELSyncing) { ++ d.pipeline.log.Debug("Derivation process went idle because the engine is syncing", "progress", d.pipeline.Origin(), "err", err) ++ d.emitter.Emit(DeriverIdleEvent{Origin: d.pipeline.Origin()}) ++ } else if err != nil && errors.Is(err, ErrReset) { ++ d.emitter.Emit(rollup.ResetEvent{Err: err}) ++ } else if err != nil && errors.Is(err, ErrTemporary) { ++ d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err}) ++ } else if err != nil && errors.Is(err, ErrCritical) { ++ d.emitter.Emit(rollup.CriticalErrorEvent{Err: err}) ++ } else if err != nil && errors.Is(err, NotEnoughData) { ++ // don't do a backoff for this error ++ d.emitter.Emit(DeriverMoreEvent{}) ++ } else if err != nil { ++ d.pipeline.log.Error("Derivation process error", "err", err) ++ d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err}) ++ } else { ++ if attrib != nil { ++ d.needAttributesConfirmation = true ++ d.emitter.Emit(DerivedAttributesEvent{Attributes: attrib}) ++ } else { ++ d.emitter.Emit(DeriverMoreEvent{}) // continue with the next step if we can ++ } ++ } ++ case ConfirmPipelineResetEvent: ++ d.pipeline.ConfirmEngineReset() ++ case ConfirmReceivedAttributesEvent: ++ d.needAttributesConfirmation = false ++ } ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/derive/pipeline.go CELO/op-node/rollup/derive/pipeline.go +index 31295870f40c9df5bbb620d01a6455e5e7190695..7d74fb9da800c19fb3473605953cb0042fe38bc5 100644 +--- OP/op-node/rollup/derive/pipeline.go ++++ CELO/op-node/rollup/derive/pipeline.go +@@ -22,6 +22,7 @@ RecordChannelTimedOut() + RecordFrame() + RecordDerivedBatches(batchType string) + SetDerivationIdle(idle bool) ++ RecordPipelineReset() + } +  + type L1Fetcher interface { +@@ -194,6 +195,8 @@ + // initialReset does the initial reset work of finding the L1 point to rewind back to + func (dp *DerivationPipeline) initialReset(ctx context.Context, resetL2Safe eth.L2BlockRef) error { + dp.log.Info("Rewinding derivation-pipeline L1 traversal to handle reset") ++ ++ dp.metrics.RecordPipelineReset() +  + // Walk back L2 chain to find the L1 origin that is old enough to start buffering channel data from. + pipelineL2 := resetL2Safe
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+5
+
-5
+ +
+ +
+
+
diff --git OP/op-node/rollup/derive/plasma_data_source.go CELO/op-node/rollup/derive/plasma_data_source.go +index 9db4dd1cc55371e6475540106e7186fc6668326b..19beb145999b60c7b597166c7f1b3ec8dc07eded 100644 +--- OP/op-node/rollup/derive/plasma_data_source.go ++++ CELO/op-node/rollup/derive/plasma_data_source.go +@@ -17,12 +17,12 @@ log log.Logger + src DataIter + fetcher PlasmaInputFetcher + l1 L1Fetcher +- id eth.BlockID ++ id eth.L1BlockRef + // keep track of a pending commitment so we can keep trying to fetch the input. + comm plasma.CommitmentData + } +  +-func NewPlasmaDataSource(log log.Logger, src DataIter, l1 L1Fetcher, fetcher PlasmaInputFetcher, id eth.BlockID) *PlasmaDataSource { ++func NewPlasmaDataSource(log log.Logger, src DataIter, l1 L1Fetcher, fetcher PlasmaInputFetcher, id eth.L1BlockRef) *PlasmaDataSource { + return &PlasmaDataSource{ + log: log, + src: src, +@@ -37,7 +37,7 @@ // Process origin syncs the challenge contract events and updates the local challenge states + // before we can proceed to fetch the input data. This function can be called multiple times + // for the same origin and noop if the origin was already processed. It is also called if + // there is not commitment in the current origin. +- if err := s.fetcher.AdvanceL1Origin(ctx, s.l1, s.id); err != nil { ++ if err := s.fetcher.AdvanceL1Origin(ctx, s.l1, s.id.ID()); err != nil { + if errors.Is(err, plasma.ErrReorgRequired) { + return nil, NewResetError(fmt.Errorf("new expired challenge")) + } +@@ -83,13 +83,13 @@ s.comm = nil + // skip the input + return s.Next(ctx) + } else if errors.Is(err, plasma.ErrMissingPastWindow) { +- return nil, NewCriticalError(fmt.Errorf("data for comm %x not available: %w", s.comm, err)) ++ return nil, NewCriticalError(fmt.Errorf("data for comm %s not available: %w", s.comm, err)) + } else if errors.Is(err, plasma.ErrPendingChallenge) { + // continue stepping without slowing down. + return nil, NotEnoughData + } else if err != nil { + // return temporary error so we can keep retrying. +- return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %x from da service: %w", s.comm, err)) ++ return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %s from da service: %w", s.comm, err)) + } + // inputs are limited to a max size to ensure they can be challenged in the DA contract. + if s.comm.CommitmentType() == plasma.Keccak256CommitmentType && len(data) > plasma.MaxInputSize {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+11
+
-8
+ +
+ +
+
+
diff --git OP/op-node/rollup/derive/plasma_data_source_test.go CELO/op-node/rollup/derive/plasma_data_source_test.go +index ae13be8e37f877ca6e3043d8d8a71781ca71af13..606f754f9930f9969721eeb22b23c6b428219d77 100644 +--- OP/op-node/rollup/derive/plasma_data_source_test.go ++++ CELO/op-node/rollup/derive/plasma_data_source_test.go +@@ -56,7 +56,7 @@ ChallengeWindow: 90, ResolveWindow: 90, + } + metrics := &plasma.NoopMetrics{} +  +- daState := plasma.NewState(logger, metrics) ++ daState := plasma.NewState(logger, metrics, pcfg) +  + da := plasma.NewPlasmaDAWithState(logger, pcfg, storage, metrics, daState) +  +@@ -97,6 +97,7 @@ } + // keep track of random input data to validate against + var inputs [][]byte + var comms []plasma.CommitmentData ++ var inclusionBlocks []eth.L1BlockRef +  + signer := cfg.L1Signer() +  +@@ -131,6 +132,7 @@ // plasma da tests are designed for keccak256 commitments, so we type assert here + kComm := comm.(plasma.Keccak256Commitment) + inputs = append(inputs, input) + comms = append(comms, kComm) ++ inclusionBlocks = append(inclusionBlocks, ref) +  + tx, err := types.SignNewTx(batcherPriv, signer, &types.DynamicFeeTx{ + ChainID: signer.ChainID(), +@@ -161,7 +163,7 @@ // challenge the first 4 commitments as soon as we have collected them all + if len(comms) >= 4 && nc < 7 { + // skip a block between each challenge transaction + if nc%2 == 0 { +- daState.SetActiveChallenge(comms[nc/2].Encode(), ref.Number, pcfg.ResolveWindow) ++ daState.CreateChallenge(comms[nc/2], ref.ID(), inclusionBlocks[nc/2].Number) + logger.Info("setting active challenge", "comm", comms[nc/2]) + } + nc++ +@@ -275,11 +277,9 @@ } +  + } +  +- // trigger l1 finalization signal +- da.Finalize(l1Refs[len(l1Refs)-32]) +- ++ // finalize based on the second to last block, which will prune the commitment on block 2, and make it finalized ++ da.Finalize(l1Refs[len(l1Refs)-2]) + finalitySignal.AssertExpectations(t) +- l1F.AssertExpectations(t) + } +  + // This tests makes sure the pipeline returns a temporary error if data is not found. +@@ -299,7 +299,7 @@ } +  + metrics := &plasma.NoopMetrics{} +  +- daState := plasma.NewState(logger, metrics) ++ daState := plasma.NewState(logger, metrics, pcfg) +  + da := plasma.NewPlasmaDAWithState(logger, pcfg, storage, metrics, daState) +  +@@ -396,8 +396,11 @@ // not enough data + _, err = src.Next(ctx) + require.ErrorIs(t, err, NotEnoughData) +  ++ // create and resolve a challenge ++ daState.CreateChallenge(comm, ref.ID(), ref.Number) + // now challenge is resolved +- daState.SetResolvedChallenge(comm.Encode(), input, ref.Number+2) ++ err = daState.ResolveChallenge(comm, eth.BlockID{Number: ref.Number + 2}, ref.Number, input) ++ require.NoError(t, err) +  + // derivation can resume + data, err := src.Next(ctx)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+51
+
-25
+ +
+ +
+
+
diff --git OP/op-node/rollup/driver/driver.go CELO/op-node/rollup/driver/driver.go +index 7213eaf98bac822e4b6620b993b221b6bfd5795e..d65f996c4e5be75abaec62f16f4fe0508183248d 100644 +--- OP/op-node/rollup/driver/driver.go ++++ CELO/op-node/rollup/driver/driver.go +@@ -77,8 +77,6 @@ } +  + type CLSync interface { + LowestQueuedUnsafeBlock() eth.L2BlockRef +- AddUnsafePayload(payload *eth.ExecutionPayloadEnvelope) +- Proceed(ctx context.Context) error + } +  + type AttributesHandler interface { +@@ -93,9 +91,8 @@ Proceed(ctx context.Context) error + } +  + type Finalizer interface { +- Finalize(ctx context.Context, ref eth.L1BlockRef) + FinalizedL1() eth.L1BlockRef +- engine.FinalizerHooks ++ rollup.Deriver + } +  + type PlasmaIface interface { +@@ -173,53 +170,68 @@ syncCfg *sync.Config, + sequencerConductor conductor.SequencerConductor, + plasma PlasmaIface, + ) *Driver { ++ driverCtx, driverCancel := context.WithCancel(context.Background()) ++ rootDeriver := &rollup.SynchronousDerivers{} ++ synchronousEvents := rollup.NewSynchronousEvents(log, driverCtx, rootDeriver) ++ + l1 = NewMeteredL1Fetcher(l1, metrics) + l1State := NewL1State(log, metrics) + sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1) + findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth) + verifConfDepth := NewConfDepth(driverCfg.VerifierConfDepth, l1State.L1Head, l1) +- engine := engine.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode) +- clSync := clsync.NewCLSync(log, cfg, metrics, engine) ++ ec := engine.NewEngineController(l2, log, metrics, cfg, syncCfg.SyncMode, synchronousEvents) ++ engineResetDeriver := engine.NewEngineResetDeriver(driverCtx, log, cfg, l1, l2, syncCfg, synchronousEvents) ++ clSync := clsync.NewCLSync(log, cfg, metrics, synchronousEvents) +  + var finalizer Finalizer + if cfg.PlasmaEnabled() { +- finalizer = finality.NewPlasmaFinalizer(log, cfg, l1, engine, plasma) ++ finalizer = finality.NewPlasmaFinalizer(driverCtx, log, cfg, l1, synchronousEvents, plasma) + } else { +- finalizer = finality.NewFinalizer(log, cfg, l1, engine) ++ finalizer = finality.NewFinalizer(driverCtx, log, cfg, l1, synchronousEvents) + } +  +- attributesHandler := attributes.NewAttributesHandler(log, cfg, engine, l2) ++ attributesHandler := attributes.NewAttributesHandler(log, cfg, driverCtx, l2, synchronousEvents) + derivationPipeline := derive.NewDerivationPipeline(log, cfg, verifConfDepth, l1Blobs, plasma, l2, metrics) ++ pipelineDeriver := derive.NewPipelineDeriver(driverCtx, derivationPipeline, synchronousEvents) + attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2) +- meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics. ++ meteredEngine := NewMeteredEngine(cfg, ec, metrics, log) // Only use the metered engine in the sequencer b/c it records sequencing metrics. + sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics) +- driverCtx, driverCancel := context.WithCancel(context.Background()) + asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics) +- return &Driver{ +- l1State: l1State, +- SyncDeriver: &SyncDeriver{ +- Derivation: derivationPipeline, +- Finalizer: finalizer, +- AttributesHandler: attributesHandler, +- SafeHeadNotifs: safeHeadListener, +- CLSync: clSync, +- Engine: engine, +- }, ++ ++ syncDeriver := &SyncDeriver{ ++ Derivation: derivationPipeline, ++ Finalizer: finalizer, ++ SafeHeadNotifs: safeHeadListener, ++ CLSync: clSync, ++ Engine: ec, ++ SyncCfg: syncCfg, ++ Config: cfg, ++ L1: l1, ++ L2: l2, ++ Emitter: synchronousEvents, ++ Log: log, ++ Ctx: driverCtx, ++ Drain: synchronousEvents.Drain, ++ } ++ engDeriv := engine.NewEngDeriver(log, driverCtx, cfg, ec, synchronousEvents) ++ schedDeriv := NewStepSchedulingDeriver(log, synchronousEvents) ++ ++ driver := &Driver{ ++ l1State: l1State, ++ SyncDeriver: syncDeriver, ++ sched: schedDeriv, ++ synchronousEvents: synchronousEvents, + stateReq: make(chan chan struct{}), + forceReset: make(chan chan struct{}, 10), + startSequencer: make(chan hashAndErrorChannel, 10), + stopSequencer: make(chan chan hashAndError, 10), + sequencerActive: make(chan chan bool, 10), + sequencerNotifs: sequencerStateListener, +- config: cfg, +- syncCfg: syncCfg, + driverConfig: driverCfg, + driverCtx: driverCtx, + driverCancel: driverCancel, + log: log, + snapshotLog: snapshotLog, +- l1: l1, +- l2: l2, + sequencer: sequencer, + network: network, + metrics: metrics, +@@ -231,4 +243,18 @@ altSync: altSync, + asyncGossiper: asyncGossiper, + sequencerConductor: sequencerConductor, + } ++ ++ *rootDeriver = []rollup.Deriver{ ++ syncDeriver, ++ engineResetDeriver, ++ engDeriv, ++ schedDeriv, ++ driver, ++ clSync, ++ pipelineDeriver, ++ attributesHandler, ++ finalizer, ++ } ++ ++ return driver + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+191
+
-150
+ +
+ +
+
+
diff --git OP/op-node/rollup/driver/state.go CELO/op-node/rollup/driver/state.go +index 7af9656bd3866e6723ba3908de62e4d03c03fa12..73ab3a0923bd03cd1112d6f8bbbd354b65f7af60 100644 +--- OP/op-node/rollup/driver/state.go ++++ CELO/op-node/rollup/driver/state.go +@@ -6,7 +6,6 @@ "context" + "encoding/json" + "errors" + "fmt" +- "io" + gosync "sync" + "time" +  +@@ -15,12 +14,13 @@ "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/async" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/clsync" + "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/finality" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" +- "github.com/ethereum-optimism/optimism/op-service/retry" + ) +  + var ( +@@ -39,6 +39,10 @@ l1State L1StateIface +  + *SyncDeriver +  ++ sched *StepSchedulingDeriver ++ ++ synchronousEvents *rollup.SynchronousEvents ++ + // Requests to block the event loop for synchronous execution to avoid reading an inconsistent state + stateReq chan chan struct{} +  +@@ -61,18 +65,12 @@ sequencerActive chan chan bool +  + // sequencerNotifs is notified when the sequencer is started or stopped + sequencerNotifs SequencerStateListener +- +- // Rollup config: rollup chain configuration +- config *rollup.Config +  + sequencerConductor conductor.SequencerConductor +  + // Driver config: verifier and sequencer settings + driverConfig *Config +  +- // Sync Mod Config +- syncCfg *sync.Config +- + // L1 Signals: + // + // Not all L1 blocks, or all changes, have to be signalled: +@@ -94,8 +92,6 @@ // L2 Signals: +  + unsafeL2Payloads chan *eth.ExecutionPayloadEnvelope +  +- l1 L1Chain +- l2 L2Chain + sequencer SequencerIface + network Network // may be nil, network for is optional +  +@@ -191,39 +187,9 @@ defer s.log.Info("State loop returned") +  + defer s.driverCancel() +  +- // stepReqCh is used to request that the driver attempts to step forward by one L1 block. +- stepReqCh := make(chan struct{}, 1) +- +- // channel, nil by default (not firing), but used to schedule re-attempts with delay +- var delayedStepReq <-chan time.Time +- +- // keep track of consecutive failed attempts, to adjust the backoff time accordingly +- bOffStrategy := retry.Exponential() +- stepAttempts := 0 +- +- // step requests a derivation step to be taken. Won't deadlock if the channel is full. +- step := func() { +- select { +- case stepReqCh <- struct{}{}: +- // Don't deadlock if the channel is already full +- default: +- } +- } +- + // reqStep requests a derivation step nicely, with a delay if this is a reattempt, or not at all if we already scheduled a reattempt. + reqStep := func() { +- if stepAttempts > 0 { +- // if this is not the first attempt, we re-schedule with a backoff, *without blocking other events* +- if delayedStepReq == nil { +- delay := bOffStrategy.Duration(stepAttempts) +- s.log.Debug("scheduling re-attempt with delay", "attempts", stepAttempts, "delay", delay) +- delayedStepReq = time.After(delay) +- } else { +- s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", stepAttempts) +- } +- } else { +- step() +- } ++ s.Emit(StepReqEvent{}) + } +  + // We call reqStep right away to finish syncing to the tip of the chain if we're behind. +@@ -244,7 +210,7 @@ } +  + // Create a ticker to check if there is a gap in the engine queue. Whenever + // there is, we send requests to sync source to retrieve the missing payloads. +- syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2 ++ syncCheckInterval := time.Duration(s.Config.BlockTime) * time.Second * 2 + altSyncTicker := time.NewTicker(syncCheckInterval) + defer altSyncTicker.Stop() + lastUnsafeL2 := s.Engine.UnsafeL2Head() +@@ -254,6 +220,15 @@ if s.driverCtx.Err() != nil { // don't try to schedule/handle more work when we are closing. + return + } +  ++ // While event-processing is synchronous we have to drain ++ // (i.e. process all queued-up events) before creating any new events. ++ if err := s.synchronousEvents.Drain(); err != nil { ++ if s.driverCtx.Err() != nil { ++ return ++ } ++ s.log.Error("unexpected error from event-draining", "err", err) ++ } ++ + // If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action. + // This may adjust at any time based on fork-choice changes or previous errors. + // And avoid sequencing if the derivation pipeline indicates the engine is not ready. +@@ -294,7 +269,7 @@ // the payload publishing is handled by the async gossiper, which will begin gossiping as soon as available + // so, we don't need to receive the payload here + _, err := s.sequencer.RunNextSequencerAction(s.driverCtx, s.asyncGossiper, s.sequencerConductor) + if errors.Is(err, derive.ErrReset) { +- s.Derivation.Reset() ++ s.Emitter.Emit(rollup.ResetEvent{}) + } else if err != nil { + s.log.Error("Sequencer critical error", "err", err) + return +@@ -311,13 +286,13 @@ } + case envelope := <-s.unsafeL2Payloads: + s.snapshot("New unsafe payload") + // If we are doing CL sync or done with engine syncing, fallback to the unsafe payload queue & CL P2P sync. +- if s.syncCfg.SyncMode == sync.CLSync || !s.Engine.IsEngineSyncing() { ++ if s.SyncCfg.SyncMode == sync.CLSync || !s.Engine.IsEngineSyncing() { + s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", envelope.ExecutionPayload.ID()) +- s.CLSync.AddUnsafePayload(envelope) ++ s.Emitter.Emit(clsync.ReceivedUnsafePayloadEvent{Envelope: envelope}) + s.metrics.RecordReceivedUnsafePayload(envelope) + reqStep() +- } else if s.syncCfg.SyncMode == sync.ELSync { +- ref, err := derive.PayloadToBlockRef(s.config, envelope.ExecutionPayload) ++ } else if s.SyncCfg.SyncMode == sync.ELSync { ++ ref, err := derive.PayloadToBlockRef(s.Config, envelope.ExecutionPayload) + if err != nil { + s.log.Info("Failed to turn execution payload into a block ref", "id", envelope.ExecutionPayload.ID(), "err", err) + continue +@@ -338,62 +313,12 @@ s.l1State.HandleNewL1SafeBlock(newL1Safe) + // no step, justified L1 information does not do anything for L2 derivation or status + case newL1Finalized := <-s.l1FinalizedSig: + s.l1State.HandleNewL1FinalizedBlock(newL1Finalized) +- ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*5) +- s.Finalizer.Finalize(ctx, newL1Finalized) +- cancel() ++ s.Emit(finality.FinalizeL1Event{FinalizedL1: newL1Finalized}) + reqStep() // we may be able to mark more L2 data as finalized now +- case <-delayedStepReq: +- delayedStepReq = nil +- step() +- case <-stepReqCh: +- // Don't start the derivation pipeline until we are done with EL sync +- if s.Engine.IsEngineSyncing() { +- continue +- } +- s.log.Debug("Sync process step", "onto_origin", s.Derivation.Origin(), "attempts", stepAttempts) +- err := s.SyncStep(s.driverCtx) +- stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress. +- if err == io.EOF { +- s.log.Debug("Derivation process went idle", "progress", s.Derivation.Origin(), "err", err) +- stepAttempts = 0 +- continue +- } else if err != nil && errors.Is(err, derive.EngineELSyncing) { +- s.log.Debug("Derivation process went idle because the engine is syncing", "progress", s.Derivation.Origin(), "unsafe_head", s.Engine.UnsafeL2Head(), "err", err) +- stepAttempts = 0 +- continue +- } else if err != nil && errors.Is(err, derive.ErrReset) { +- // If the pipeline corrupts, e.g. due to a reorg, simply reset it +- s.log.Warn("Derivation pipeline is reset", "err", err) +- s.Derivation.Reset() +- s.Finalizer.Reset() +- s.metrics.RecordPipelineReset() +- reqStep() +- if err := engine.ResetEngine(s.driverCtx, s.log, s.config, s.Engine, s.l1, s.l2, s.syncCfg, s.SafeHeadNotifs); err != nil { +- s.log.Error("Derivation pipeline not ready, failed to reset engine", "err", err) +- // Derivation-pipeline will return a new ResetError until we confirm the engine has been successfully reset. +- continue +- } +- s.Derivation.ConfirmEngineReset() +- continue +- } else if err != nil && errors.Is(err, derive.ErrTemporary) { +- s.log.Warn("Derivation process temporary error", "attempts", stepAttempts, "err", err) +- reqStep() +- continue +- } else if err != nil && errors.Is(err, derive.ErrCritical) { +- s.log.Error("Derivation process critical error", "err", err) +- return +- } else if err != nil && errors.Is(err, derive.NotEnoughData) { +- stepAttempts = 0 // don't do a backoff for this error +- reqStep() +- continue +- } else if err != nil { +- s.log.Error("Derivation process error", "attempts", stepAttempts, "err", err) +- reqStep() +- continue +- } else { +- stepAttempts = 0 +- reqStep() // continue with the next step if we can +- } ++ case <-s.sched.NextDelayedStep(): ++ s.Emit(StepAttemptEvent{}) ++ case <-s.sched.NextStep(): ++ s.Emit(StepAttemptEvent{}) + case respCh := <-s.stateReq: + respCh <- struct{}{} + case respCh := <-s.forceReset: +@@ -440,6 +365,29 @@ } + } + } +  ++// OnEvent handles broadcasted events. ++// The Driver itself is a deriver to catch system-critical events. ++// Other event-handling should be encapsulated into standalone derivers. ++func (s *Driver) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case rollup.CriticalErrorEvent: ++ s.Log.Error("Derivation process critical error", "err", x.Err) ++ // we need to unblock event-processing to be able to close ++ go func() { ++ logger := s.Log ++ err := s.Close() ++ if err != nil { ++ logger.Error("Failed to shutdown driver on critical error", "err", err) ++ } ++ }() ++ return ++ } ++} ++ ++func (s *Driver) Emit(ev rollup.Event) { ++ s.synchronousEvents.Emit(ev) ++} ++ + type SyncDeriver struct { + // The derivation pipeline is reset whenever we reorg. + // The derivation pipeline determines the new l2Safe. +@@ -447,74 +395,164 @@ Derivation DerivationPipeline +  + Finalizer Finalizer +  +- AttributesHandler AttributesHandler +- +- SafeHeadNotifs rollup.SafeHeadListener // notified when safe head is updated +- lastNotifiedSafeHead eth.L2BlockRef ++ SafeHeadNotifs rollup.SafeHeadListener // notified when safe head is updated +  + CLSync CLSync +  + // The engine controller is used by the sequencer & Derivation components. + // We will also use it for EL sync in a future PR. + Engine EngineController ++ ++ // Sync Mod Config ++ SyncCfg *sync.Config ++ ++ Config *rollup.Config ++ ++ L1 L1Chain ++ L2 L2Chain ++ ++ Emitter rollup.EventEmitter ++ ++ Log log.Logger ++ ++ Ctx context.Context ++ ++ Drain func() error + } +  +-// SyncStep performs the sequence of encapsulated syncing steps. +-// Warning: this sequence will be broken apart as outlined in op-node derivers design doc. +-func (s *SyncDeriver) SyncStep(ctx context.Context) error { +- // If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c +- // this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called). +- if fcuCalled, err := s.Engine.TryBackupUnsafeReorg(ctx); fcuCalled { +- // If we needed to perform a network call, then we should yield even if we did not encounter an error. +- return err +- } +- // If we don't need to call FCU, keep going b/c this was a no-op. If we needed to +- // perform a network call, then we should yield even if we did not encounter an error. +- if err := s.Engine.TryUpdateEngine(ctx); !errors.Is(err, engine.ErrNoFCUNeeded) { +- return err +- } ++func (s *SyncDeriver) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case StepEvent: ++ s.onStepEvent() ++ case rollup.ResetEvent: ++ s.onResetEvent(x) ++ case rollup.L1TemporaryErrorEvent: ++ s.Log.Warn("L1 temporary error", "err", x.Err) ++ s.Emitter.Emit(StepReqEvent{}) ++ case rollup.EngineTemporaryErrorEvent: ++ s.Log.Warn("Engine temporary error", "err", x.Err) ++ ++ // Make sure that for any temporarily failed attributes we retry processing. ++ s.Emitter.Emit(engine.PendingSafeRequestEvent{}) +  +- if s.Engine.IsEngineSyncing() { +- // The pipeline cannot move forwards if doing EL sync. +- return derive.EngineELSyncing ++ s.Emitter.Emit(StepReqEvent{}) ++ case engine.EngineResetConfirmedEvent: ++ s.onEngineConfirmedReset(x) ++ case derive.DeriverIdleEvent: ++ // Once derivation is idle the system is healthy ++ // and we can wait for new inputs. No backoff necessary. ++ s.Emitter.Emit(ResetStepBackoffEvent{}) ++ case derive.DeriverMoreEvent: ++ // If there is more data to process, ++ // continue derivation quickly ++ s.Emitter.Emit(StepReqEvent{ResetBackoff: true}) ++ case engine.SafeDerivedEvent: ++ s.onSafeDerivedBlock(x) + } ++} +  +- // Trying unsafe payload should be done before safe attributes +- // It allows the unsafe head to move forward while the long-range consolidation is in progress. +- if err := s.CLSync.Proceed(ctx); err != io.EOF { +- // EOF error means we can't process the next unsafe payload. Then we should process next safe attributes. +- return err +- } +- // Try safe attributes now. +- if err := s.AttributesHandler.Proceed(ctx); err != io.EOF { +- // EOF error means we can't process the next attributes. Then we should derive the next attributes. +- return err +- } +- derivationOrigin := s.Derivation.Origin() +- if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() && s.Derivation.DerivationReady() && +- s.lastNotifiedSafeHead != s.Engine.SafeL2Head() { +- s.lastNotifiedSafeHead = s.Engine.SafeL2Head() +- // make sure we track the last L2 safe head for every new L1 block +- if err := s.SafeHeadNotifs.SafeHeadUpdated(s.lastNotifiedSafeHead, derivationOrigin.ID()); err != nil { ++func (s *SyncDeriver) onSafeDerivedBlock(x engine.SafeDerivedEvent) { ++ if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() { ++ if err := s.SafeHeadNotifs.SafeHeadUpdated(x.Safe, x.DerivedFrom.ID()); err != nil { + // At this point our state is in a potentially inconsistent state as we've updated the safe head + // in the execution client but failed to post process it. Reset the pipeline so the safe head rolls back + // a little (it always rolls back at least 1 block) and then it will retry storing the entry +- return derive.NewResetError(fmt.Errorf("safe head notifications failed: %w", err)) ++ s.Emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("safe head notifications failed: %w", err)}) + } + } +- s.Finalizer.PostProcessSafeL2(s.Engine.SafeL2Head(), derivationOrigin) ++} +  +- // try to finalize the L2 blocks we have synced so far (no-op if L1 finality is behind) +- if err := s.Finalizer.OnDerivationL1End(ctx, derivationOrigin); err != nil { +- return fmt.Errorf("finalizer OnDerivationL1End error: %w", err) ++func (s *SyncDeriver) onEngineConfirmedReset(x engine.EngineResetConfirmedEvent) { ++ // If the listener update fails, we return, ++ // and don't confirm the engine-reset with the derivation pipeline. ++ // The pipeline will re-trigger a reset as necessary. ++ if s.SafeHeadNotifs != nil { ++ if err := s.SafeHeadNotifs.SafeHeadReset(x.Safe); err != nil { ++ s.Log.Error("Failed to warn safe-head notifier of safe-head reset", "safe", x.Safe) ++ return ++ } ++ if s.SafeHeadNotifs.Enabled() && x.Safe.ID() == s.Config.Genesis.L2 { ++ // The rollup genesis block is always safe by definition. So if the pipeline resets this far back we know ++ // we will process all safe head updates and can record genesis as always safe from L1 genesis. ++ // Note that it is not safe to use cfg.Genesis.L1 here as it is the block immediately before the L2 genesis ++ // but the contracts may have been deployed earlier than that, allowing creating a dispute game ++ // with a L1 head prior to cfg.Genesis.L1 ++ l1Genesis, err := s.L1.L1BlockRefByNumber(s.Ctx, 0) ++ if err != nil { ++ s.Log.Error("Failed to retrieve L1 genesis, cannot notify genesis as safe block", "err", err) ++ return ++ } ++ if err := s.SafeHeadNotifs.SafeHeadUpdated(x.Safe, l1Genesis.ID()); err != nil { ++ s.Log.Error("Failed to notify safe-head listener of safe-head", "err", err) ++ return ++ } ++ } + } ++ s.Emitter.Emit(derive.ConfirmPipelineResetEvent{}) ++} +  +- attr, err := s.Derivation.Step(ctx, s.Engine.PendingSafeL2Head()) +- if err != nil { ++func (s *SyncDeriver) onStepEvent() { ++ s.Log.Debug("Sync process step") ++ // Note: while we refactor the SyncStep to be entirely event-based we have an intermediate phase ++ // where some things are triggered through events, and some through this synchronous step function. ++ // We just translate the results into their equivalent events, ++ // to merge the error-handling with that of the new event-based system. ++ err := s.SyncStep() ++ if err != nil && errors.Is(err, derive.EngineELSyncing) { ++ s.Log.Debug("Derivation process went idle because the engine is syncing", "unsafe_head", s.Engine.UnsafeL2Head(), "err", err) ++ s.Emitter.Emit(ResetStepBackoffEvent{}) ++ } else if err != nil && errors.Is(err, derive.ErrReset) { ++ s.Emitter.Emit(rollup.ResetEvent{Err: err}) ++ } else if err != nil && errors.Is(err, derive.ErrTemporary) { ++ s.Emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err}) ++ } else if err != nil && errors.Is(err, derive.ErrCritical) { ++ s.Emitter.Emit(rollup.CriticalErrorEvent{Err: err}) ++ } else if err != nil { ++ s.Log.Error("Derivation process error", "err", err) ++ s.Emitter.Emit(StepReqEvent{}) ++ } else { ++ s.Emitter.Emit(StepReqEvent{ResetBackoff: true}) // continue with the next step if we can ++ } ++} ++ ++func (s *SyncDeriver) onResetEvent(x rollup.ResetEvent) { ++ // If the system corrupts, e.g. due to a reorg, simply reset it ++ s.Log.Warn("Deriver system is resetting", "err", x.Err) ++ s.Emitter.Emit(StepReqEvent{}) ++ s.Emitter.Emit(engine.ResetEngineRequestEvent{}) ++} ++ ++// SyncStep performs the sequence of encapsulated syncing steps. ++// Warning: this sequence will be broken apart as outlined in op-node derivers design doc. ++func (s *SyncDeriver) SyncStep() error { ++ if err := s.Drain(); err != nil { ++ return err ++ } ++ ++ s.Emitter.Emit(engine.TryBackupUnsafeReorgEvent{}) ++ if err := s.Drain(); err != nil { + return err + } +  +- s.AttributesHandler.SetAttributes(attr) ++ s.Emitter.Emit(engine.TryUpdateEngineEvent{}) ++ if err := s.Drain(); err != nil { ++ return err ++ } ++ ++ if s.Engine.IsEngineSyncing() { ++ // The pipeline cannot move forwards if doing EL sync. ++ return derive.EngineELSyncing ++ } ++ ++ // Any now processed forkchoice updates will trigger CL-sync payload processing, if any payload is queued up. ++ ++ // Since we don't force attributes to be processed at this point, ++ // we cannot safely directly trigger the derivation, as that may generate new attributes that ++ // conflict with what attributes have not been applied yet. ++ // Instead, we request the engine to repeat where its pending-safe head is at. ++ // Upon the pending-safe signal the attributes deriver can then ask the pipeline ++ // to generate new attributes, if no attributes are known already. ++ s.Emitter.Emit(engine.PendingSafeRequestEvent{}) + return nil + } +  +@@ -598,6 +636,10 @@ } + } + } +  ++func (s *Driver) OverrideLeader(ctx context.Context) error { ++ return s.sequencerConductor.OverrideLeader(ctx) ++} ++ + // syncStatus returns the current sync status, and should only be called synchronously with + // the driver event loop to avoid retrieval of an inconsistent status. + func (s *Driver) syncStatus() *eth.SyncStatus { +@@ -636,7 +678,7 @@ wait := make(chan struct{}) + select { + case s.stateReq <- wait: + resp := s.syncStatus() +- ref, err := s.l2.L2BlockRefByNumber(ctx, num) ++ ref, err := s.L2.L2BlockRefByNumber(ctx, num) + <-wait + return ref, resp, err + case <-ctx.Done(): +@@ -658,7 +700,6 @@ func (s *Driver) snapshot(event string) { + s.snapshotLog.Info("Rollup State Snapshot", + "event", event, + "l1Head", deferJSONString{s.l1State.L1Head()}, +- "l1Current", deferJSONString{s.Derivation.Origin()}, + "l2Head", deferJSONString{s.Engine.UnsafeL2Head()}, + "l2Safe", deferJSONString{s.Engine.SafeL2Head()}, + "l2FinalizedHead", deferJSONString{s.Engine.Finalized()})
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+130
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/driver/steps.go CELO/op-node/rollup/driver/steps.go +new file mode 100644 +index 0000000000000000000000000000000000000000..8f29203adcc34894c0e31bbaf0d439952fa6675d +--- /dev/null ++++ CELO/op-node/rollup/driver/steps.go +@@ -0,0 +1,130 @@ ++package driver ++ ++import ( ++ "time" ++ ++ "github.com/ethereum/go-ethereum/log" ++ ++ "github.com/ethereum-optimism/optimism/op-node/rollup" ++ "github.com/ethereum-optimism/optimism/op-service/retry" ++) ++ ++type ResetStepBackoffEvent struct { ++} ++ ++func (ev ResetStepBackoffEvent) String() string { ++ return "reset-step-backoff" ++} ++ ++type StepReqEvent struct { ++ ResetBackoff bool ++} ++ ++func (ev StepReqEvent) String() string { ++ return "step-req" ++} ++ ++type StepAttemptEvent struct{} ++ ++func (ev StepAttemptEvent) String() string { ++ return "step-attempt" ++} ++ ++type StepEvent struct{} ++ ++func (ev StepEvent) String() string { ++ return "step" ++} ++ ++// StepSchedulingDeriver is a deriver that emits StepEvent events. ++// The deriver can be requested to schedule a step with a StepReqEvent. ++// ++// It is then up to the caller to translate scheduling into StepAttemptEvent emissions, by waiting for ++// NextStep or NextDelayedStep channels (nil if there is nothing to wait for, for channel-merging purposes). ++// ++// Upon StepAttemptEvent the scheduler will then emit a StepEvent, ++// while maintaining backoff state, to not spam steps. ++// ++// Backoff can be reset by sending a request with StepReqEvent.ResetBackoff ++// set to true, or by sending a ResetStepBackoffEvent. ++type StepSchedulingDeriver struct { ++ ++ // keep track of consecutive failed attempts, to adjust the backoff time accordingly ++ stepAttempts int ++ bOffStrategy retry.Strategy ++ ++ // channel, nil by default (not firing), but used to schedule re-attempts with delay ++ delayedStepReq <-chan time.Time ++ ++ // stepReqCh is used to request that the driver attempts to step forward by one L1 block. ++ stepReqCh chan struct{} ++ ++ log log.Logger ++ ++ emitter rollup.EventEmitter ++} ++ ++func NewStepSchedulingDeriver(log log.Logger, emitter rollup.EventEmitter) *StepSchedulingDeriver { ++ return &StepSchedulingDeriver{ ++ stepAttempts: 0, ++ bOffStrategy: retry.Exponential(), ++ stepReqCh: make(chan struct{}, 1), ++ delayedStepReq: nil, ++ log: log, ++ emitter: emitter, ++ } ++} ++ ++// NextStep is a channel to await, and if triggered, ++// the caller should emit a StepAttemptEvent to queue up a step while maintaining backoff. ++func (s *StepSchedulingDeriver) NextStep() <-chan struct{} { ++ return s.stepReqCh ++} ++ ++// NextDelayedStep is a temporary channel to await, and if triggered, ++// the caller should emit a StepAttemptEvent to queue up a step while maintaining backoff. ++// The returned channel may be nil, if there is no requested step with delay scheduled. ++func (s *StepSchedulingDeriver) NextDelayedStep() <-chan time.Time { ++ return s.delayedStepReq ++} ++ ++func (s *StepSchedulingDeriver) OnEvent(ev rollup.Event) { ++ step := func() { ++ s.delayedStepReq = nil ++ select { ++ case s.stepReqCh <- struct{}{}: ++ // Don't deadlock if the channel is already full ++ default: ++ } ++ } ++ ++ switch x := ev.(type) { ++ case StepReqEvent: ++ if x.ResetBackoff { ++ s.stepAttempts = 0 ++ } ++ if s.stepAttempts > 0 { ++ // if this is not the first attempt, we re-schedule with a backoff, *without blocking other events* ++ if s.delayedStepReq == nil { ++ delay := s.bOffStrategy.Duration(s.stepAttempts) ++ s.log.Debug("scheduling re-attempt with delay", "attempts", s.stepAttempts, "delay", delay) ++ s.delayedStepReq = time.After(delay) ++ } else { ++ s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", s.stepAttempts) ++ } ++ } else { ++ step() ++ } ++ case StepAttemptEvent: ++ // clear the delayed-step channel ++ s.delayedStepReq = nil ++ if s.stepAttempts > 0 { ++ s.log.Debug("Running step retry", "attempts", s.stepAttempts) ++ } ++ // count as attempt by default. We reset to 0 if we are making healthy progress. ++ s.stepAttempts += 1 ++ s.emitter.Emit(StepEvent{}) ++ case ResetStepBackoffEvent: ++ s.stepAttempts = 0 ++ } ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+53
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/driver/steps_test.go CELO/op-node/rollup/driver/steps_test.go +new file mode 100644 +index 0000000000000000000000000000000000000000..5dbba314b6e1cf4170946817a1feff5c16d4dae6 +--- /dev/null ++++ CELO/op-node/rollup/driver/steps_test.go +@@ -0,0 +1,53 @@ ++package driver ++ ++import ( ++ "testing" ++ ++ "github.com/stretchr/testify/require" ++ ++ "github.com/ethereum/go-ethereum/log" ++ ++ "github.com/ethereum-optimism/optimism/op-node/rollup" ++ "github.com/ethereum-optimism/optimism/op-service/testlog" ++) ++ ++func TestStepSchedulingDeriver(t *testing.T) { ++ logger := testlog.Logger(t, log.LevelError) ++ var queued []rollup.Event ++ emitter := rollup.EmitterFunc(func(ev rollup.Event) { ++ queued = append(queued, ev) ++ }) ++ sched := NewStepSchedulingDeriver(logger, emitter) ++ require.Len(t, sched.NextStep(), 0, "start empty") ++ sched.OnEvent(StepReqEvent{}) ++ require.Len(t, sched.NextStep(), 1, "take request") ++ sched.OnEvent(StepReqEvent{}) ++ require.Len(t, sched.NextStep(), 1, "ignore duplicate request") ++ require.Empty(t, queued, "only scheduled so far, no step attempts yet") ++ <-sched.NextStep() ++ sched.OnEvent(StepAttemptEvent{}) ++ require.Equal(t, []rollup.Event{StepEvent{}}, queued, "got step event") ++ require.Nil(t, sched.NextDelayedStep(), "no delayed steps yet") ++ sched.OnEvent(StepReqEvent{}) ++ require.NotNil(t, sched.NextDelayedStep(), "2nd attempt before backoff reset causes delayed step to be scheduled") ++ sched.OnEvent(StepReqEvent{}) ++ require.NotNil(t, sched.NextDelayedStep(), "can continue to request attempts") ++ ++ sched.OnEvent(StepReqEvent{}) ++ require.Len(t, sched.NextStep(), 0, "no step requests accepted without delay if backoff is counting") ++ ++ sched.OnEvent(StepReqEvent{ResetBackoff: true}) ++ require.Len(t, sched.NextStep(), 1, "request accepted if backoff is reset") ++ <-sched.NextStep() ++ ++ sched.OnEvent(StepReqEvent{}) ++ require.Len(t, sched.NextStep(), 1, "no backoff, no attempt has been made yet") ++ <-sched.NextStep() ++ sched.OnEvent(StepAttemptEvent{}) ++ sched.OnEvent(StepReqEvent{}) ++ require.Len(t, sched.NextStep(), 0, "backoff again") ++ ++ sched.OnEvent(ResetStepBackoffEvent{}) ++ sched.OnEvent(StepReqEvent{}) ++ require.Len(t, sched.NextStep(), 1, "reset backoff accepted, was able to schedule non-delayed step") ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+39
+
-2
+ +
+ +
+
+
diff --git OP/op-node/rollup/engine/engine_controller.go CELO/op-node/rollup/engine/engine_controller.go +index 6e382cc367f2a3f186816d9a86270f2c36cba780..8121dc3800aa61aab6a0edf593e5df89cf28fdb7 100644 +--- OP/op-node/rollup/engine/engine_controller.go ++++ CELO/op-node/rollup/engine/engine_controller.go +@@ -54,6 +54,8 @@ rollupCfg *rollup.Config + elStart time.Time + clock clock.Clock +  ++ emitter rollup.EventEmitter ++ + // Block Head State + unsafeHead eth.L2BlockRef + pendingSafeHead eth.L2BlockRef // L2 block processed from the middle of a span batch, but not marked as the safe block yet. +@@ -75,7 +77,8 @@ buildingSafe bool + safeAttrs *derive.AttributesWithParent + } +  +-func NewEngineController(engine ExecEngine, log log.Logger, metrics derive.Metrics, rollupCfg *rollup.Config, syncMode sync.Mode) *EngineController { ++func NewEngineController(engine ExecEngine, log log.Logger, metrics derive.Metrics, ++ rollupCfg *rollup.Config, syncMode sync.Mode, emitter rollup.EventEmitter) *EngineController { + syncStatus := syncStatusCL + if syncMode == sync.ELSync { + syncStatus = syncStatusWillStartEL +@@ -90,6 +93,7 @@ rollupCfg: rollupCfg, + syncMode: syncMode, + syncStatus: syncStatus, + clock: clock.SystemClock, ++ emitter: emitter, + } + } +  +@@ -224,6 +228,11 @@ id, errTyp, err := startPayload(ctx, e.engine, fc, attrs.Attributes) + if err != nil { + return errTyp, err + } ++ e.emitter.Emit(ForkchoiceUpdateEvent{ ++ UnsafeL2Head: parent, ++ SafeL2Head: e.safeHead, ++ FinalizedL2Head: e.finalizedHead, ++ }) +  + e.buildingInfo = eth.PayloadInfo{ID: id, Timestamp: uint64(attrs.Attributes.Timestamp)} + e.buildingSafe = updateSafe +@@ -280,6 +289,11 @@ // Remove backupUnsafeHead because this backup will be never used after consolidation. + e.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) + } + } ++ e.emitter.Emit(ForkchoiceUpdateEvent{ ++ UnsafeL2Head: e.unsafeHead, ++ SafeL2Head: e.safeHead, ++ FinalizedL2Head: e.finalizedHead, ++ }) +  + e.resetBuildingState() + return envelope, BlockInsertOK, nil +@@ -353,7 +367,7 @@ FinalizedBlockHash: e.finalizedHead.Hash, + } + logFn := e.logSyncProgressMaybe() + defer logFn() +- _, err := e.engine.ForkchoiceUpdate(ctx, &fc, nil) ++ fcRes, err := e.engine.ForkchoiceUpdate(ctx, &fc, nil) + if err != nil { + var inputErr eth.InputError + if errors.As(err, &inputErr) { +@@ -367,6 +381,13 @@ } else { + return derive.NewTemporaryError(fmt.Errorf("failed to sync forkchoice with engine: %w", err)) + } + } ++ if fcRes.PayloadStatus.Status == eth.ExecutionValid { ++ e.emitter.Emit(ForkchoiceUpdateEvent{ ++ UnsafeL2Head: e.unsafeHead, ++ SafeL2Head: e.safeHead, ++ FinalizedL2Head: e.finalizedHead, ++ }) ++ } + e.needFCUCall = false + return nil + } +@@ -392,6 +413,9 @@ // Insert the payload & then call FCU + status, err := e.engine.NewPayload(ctx, envelope.ExecutionPayload, envelope.ParentBeaconBlockRoot) + if err != nil { + return derive.NewTemporaryError(fmt.Errorf("failed to update insert payload: %w", err)) ++ } ++ if status.Status == eth.ExecutionInvalid { ++ e.emitter.Emit(InvalidPayloadEvent{Envelope: envelope}) + } + if !e.checkNewPayloadStatus(status.Status) { + payload := envelope.ExecutionPayload +@@ -440,6 +464,14 @@ e.log.Info("Finished EL sync", "sync_duration", e.clock.Since(e.elStart), "finalized_block", ref.ID().String()) + e.syncStatus = syncStatusFinishedEL + } +  ++ if fcRes.PayloadStatus.Status == eth.ExecutionValid { ++ e.emitter.Emit(ForkchoiceUpdateEvent{ ++ UnsafeL2Head: e.unsafeHead, ++ SafeL2Head: e.safeHead, ++ FinalizedL2Head: e.finalizedHead, ++ }) ++ } ++ + return nil + } +  +@@ -501,6 +533,11 @@ return true, derive.NewTemporaryError(fmt.Errorf("failed to sync forkchoice with engine: %w", err)) + } + } + if fcRes.PayloadStatus.Status == eth.ExecutionValid { ++ e.emitter.Emit(ForkchoiceUpdateEvent{ ++ UnsafeL2Head: e.backupUnsafeHead, ++ SafeL2Head: e.safeHead, ++ FinalizedL2Head: e.finalizedHead, ++ }) + // Execution engine accepted the reorg. + e.log.Info("successfully reorged unsafe head using backupUnsafe", "unsafe", e.backupUnsafeHead.ID()) + e.SetUnsafeHead(e.BackupUnsafeL2Head())
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+38
+
-56
+ +
+ +
+
+
diff --git OP/op-node/rollup/engine/engine_reset.go CELO/op-node/rollup/engine/engine_reset.go +index 700e44e11c30185318f6f346121d2c8645a00eb2..c0985d8ddb39347fd6e0c23cf95087b8b2a15de2 100644 +--- OP/op-node/rollup/engine/engine_reset.go ++++ CELO/op-node/rollup/engine/engine_reset.go +@@ -7,72 +7,54 @@ + "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" +- "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" +- "github.com/ethereum-optimism/optimism/op-service/eth" + ) +  +-type ResetL2 interface { +- sync.L2Chain +- derive.SystemConfigL2Fetcher ++// ResetEngineRequestEvent requests the EngineResetDeriver to walk ++// the L2 chain backwards until it finds a plausible unsafe head, ++// and find an L2 safe block that is guaranteed to still be from the L1 chain. ++type ResetEngineRequestEvent struct{} ++ ++func (ev ResetEngineRequestEvent) String() string { ++ return "reset-engine-request" + } +  +-type ResetEngineControl interface { +- SetUnsafeHead(eth.L2BlockRef) +- SetSafeHead(eth.L2BlockRef) +- SetFinalizedHead(eth.L2BlockRef) ++type EngineResetDeriver struct { ++ ctx context.Context ++ log log.Logger ++ cfg *rollup.Config ++ l1 sync.L1Chain ++ l2 sync.L2Chain ++ syncCfg *sync.Config +  +- SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool) +- SetPendingSafeL2Head(eth.L2BlockRef) +- +- ResetBuildingState() ++ emitter rollup.EventEmitter + } +  +-// ResetEngine walks the L2 chain backwards until it finds a plausible unsafe head, +-// and an L2 safe block that is guaranteed to still be from the L1 chain. +-func ResetEngine(ctx context.Context, log log.Logger, cfg *rollup.Config, ec ResetEngineControl, l1 sync.L1Chain, l2 ResetL2, syncCfg *sync.Config, safeHeadNotifs rollup.SafeHeadListener) error { +- result, err := sync.FindL2Heads(ctx, cfg, l1, l2, log, syncCfg) +- if err != nil { +- return derive.NewTemporaryError(fmt.Errorf("failed to find the L2 Heads to start from: %w", err)) +- } +- finalized, safe, unsafe := result.Finalized, result.Safe, result.Unsafe +- l1Origin, err := l1.L1BlockRefByHash(ctx, safe.L1Origin.Hash) +- if err != nil { +- return derive.NewTemporaryError(fmt.Errorf("failed to fetch the new L1 progress: origin: %v; err: %w", safe.L1Origin, err)) +- } +- if safe.Time < l1Origin.Time { +- return derive.NewResetError(fmt.Errorf("cannot reset block derivation to start at L2 block %s with time %d older than its L1 origin %s with time %d, time invariant is broken", +- safe, safe.Time, l1Origin, l1Origin.Time)) ++func NewEngineResetDeriver(ctx context.Context, log log.Logger, cfg *rollup.Config, ++ l1 sync.L1Chain, l2 sync.L2Chain, syncCfg *sync.Config, emitter rollup.EventEmitter) *EngineResetDeriver { ++ return &EngineResetDeriver{ ++ ctx: ctx, ++ log: log, ++ cfg: cfg, ++ l1: l1, ++ l2: l2, ++ syncCfg: syncCfg, ++ emitter: emitter, + } +- +- ec.SetUnsafeHead(unsafe) +- ec.SetSafeHead(safe) +- ec.SetPendingSafeL2Head(safe) +- ec.SetFinalizedHead(finalized) +- ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) +- ec.ResetBuildingState() +- +- log.Debug("Reset of Engine is completed", "safeHead", safe, "unsafe", unsafe, "safe_timestamp", safe.Time, +- "unsafe_timestamp", unsafe.Time, "l1Origin", l1Origin) ++} +  +- if safeHeadNotifs != nil { +- if err := safeHeadNotifs.SafeHeadReset(safe); err != nil { +- return err ++func (d *EngineResetDeriver) OnEvent(ev rollup.Event) { ++ switch ev.(type) { ++ case ResetEngineRequestEvent: ++ result, err := sync.FindL2Heads(d.ctx, d.cfg, d.l1, d.l2, d.log, d.syncCfg) ++ if err != nil { ++ d.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("failed to find the L2 Heads to start from: %w", err)}) ++ return + } +- if safeHeadNotifs.Enabled() && safe.Number == cfg.Genesis.L2.Number && safe.Hash == cfg.Genesis.L2.Hash { +- // The rollup genesis block is always safe by definition. So if the pipeline resets this far back we know +- // we will process all safe head updates and can record genesis as always safe from L1 genesis. +- // Note that it is not safe to use cfg.Genesis.L1 here as it is the block immediately before the L2 genesis +- // but the contracts may have been deployed earlier than that, allowing creating a dispute game +- // with a L1 head prior to cfg.Genesis.L1 +- l1Genesis, err := l1.L1BlockRefByNumber(ctx, 0) +- if err != nil { +- return fmt.Errorf("failed to retrieve L1 genesis: %w", err) +- } +- if err := safeHeadNotifs.SafeHeadUpdated(safe, l1Genesis.ID()); err != nil { +- return err +- } +- } ++ d.emitter.Emit(ForceEngineResetEvent{ ++ Unsafe: result.Unsafe, ++ Safe: result.Safe, ++ Finalized: result.Finalized, ++ }) + } +- return nil + }
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+360
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/engine/events.go CELO/op-node/rollup/engine/events.go +new file mode 100644 +index 0000000000000000000000000000000000000000..4c2320310a86a3fe44aeb0e8ee87cb8289ba7511 +--- /dev/null ++++ CELO/op-node/rollup/engine/events.go +@@ -0,0 +1,360 @@ ++package engine ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "time" ++ ++ "github.com/ethereum/go-ethereum/core/types" ++ "github.com/ethereum/go-ethereum/log" ++ ++ "github.com/ethereum-optimism/optimism/op-node/rollup" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/async" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ "github.com/ethereum-optimism/optimism/op-service/eth" ++) ++ ++type InvalidPayloadEvent struct { ++ Envelope *eth.ExecutionPayloadEnvelope ++} ++ ++func (ev InvalidPayloadEvent) String() string { ++ return "invalid-payload" ++} ++ ++type InvalidPayloadAttributesEvent struct { ++ Attributes *derive.AttributesWithParent ++} ++ ++func (ev InvalidPayloadAttributesEvent) String() string { ++ return "invalid-payload-attributes" ++} ++ ++// ForkchoiceRequestEvent signals to the engine that it should emit an artificial ++// forkchoice-update event, to signal the latest forkchoice to other derivers. ++// This helps decouple derivers from the actual engine state, ++// while also not making the derivers wait for a forkchoice update at random. ++type ForkchoiceRequestEvent struct { ++} ++ ++func (ev ForkchoiceRequestEvent) String() string { ++ return "forkchoice-request" ++} ++ ++type ForkchoiceUpdateEvent struct { ++ UnsafeL2Head, SafeL2Head, FinalizedL2Head eth.L2BlockRef ++} ++ ++func (ev ForkchoiceUpdateEvent) String() string { ++ return "forkchoice-update" ++} ++ ++type PendingSafeUpdateEvent struct { ++ PendingSafe eth.L2BlockRef ++ Unsafe eth.L2BlockRef // tip, added to the signal, to determine if there are existing blocks to consolidate ++} ++ ++func (ev PendingSafeUpdateEvent) String() string { ++ return "pending-safe-update" ++} ++ ++// PromotePendingSafeEvent signals that a block can be marked as pending-safe, and/or safe. ++type PromotePendingSafeEvent struct { ++ Ref eth.L2BlockRef ++ Safe bool ++ DerivedFrom eth.L1BlockRef ++} ++ ++func (ev PromotePendingSafeEvent) String() string { ++ return "promote-pending-safe" ++} ++ ++// SafeDerivedEvent signals that a block was determined to be safe, and derived from the given L1 block ++type SafeDerivedEvent struct { ++ Safe eth.L2BlockRef ++ DerivedFrom eth.L1BlockRef ++} ++ ++func (ev SafeDerivedEvent) String() string { ++ return "safe-derived" ++} ++ ++type ProcessAttributesEvent struct { ++ Attributes *derive.AttributesWithParent ++} ++ ++func (ev ProcessAttributesEvent) String() string { ++ return "process-attributes" ++} ++ ++type PendingSafeRequestEvent struct { ++} ++ ++func (ev PendingSafeRequestEvent) String() string { ++ return "pending-safe-request" ++} ++ ++type ProcessUnsafePayloadEvent struct { ++ Envelope *eth.ExecutionPayloadEnvelope ++} ++ ++func (ev ProcessUnsafePayloadEvent) String() string { ++ return "process-unsafe-payload" ++} ++ ++type TryBackupUnsafeReorgEvent struct { ++} ++ ++func (ev TryBackupUnsafeReorgEvent) String() string { ++ return "try-backup-unsafe-reorg" ++} ++ ++type TryUpdateEngineEvent struct { ++} ++ ++func (ev TryUpdateEngineEvent) String() string { ++ return "try-update-engine" ++} ++ ++type ForceEngineResetEvent struct { ++ Unsafe, Safe, Finalized eth.L2BlockRef ++} ++ ++func (ev ForceEngineResetEvent) String() string { ++ return "force-engine-reset" ++} ++ ++type EngineResetConfirmedEvent struct { ++ Unsafe, Safe, Finalized eth.L2BlockRef ++} ++ ++func (ev EngineResetConfirmedEvent) String() string { ++ return "engine-reset-confirmed" ++} ++ ++// PromoteFinalizedEvent signals that a block can be marked as finalized. ++type PromoteFinalizedEvent struct { ++ Ref eth.L2BlockRef ++} ++ ++func (ev PromoteFinalizedEvent) String() string { ++ return "promote-finalized" ++} ++ ++type EngDeriver struct { ++ log log.Logger ++ cfg *rollup.Config ++ ec *EngineController ++ ctx context.Context ++ emitter rollup.EventEmitter ++} ++ ++var _ rollup.Deriver = (*EngDeriver)(nil) ++ ++func NewEngDeriver(log log.Logger, ctx context.Context, cfg *rollup.Config, ++ ec *EngineController, emitter rollup.EventEmitter) *EngDeriver { ++ return &EngDeriver{ ++ log: log, ++ cfg: cfg, ++ ec: ec, ++ ctx: ctx, ++ emitter: emitter, ++ } ++} ++ ++func (d *EngDeriver) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case TryBackupUnsafeReorgEvent: ++ // If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c ++ // this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called). ++ fcuCalled, err := d.ec.TryBackupUnsafeReorg(d.ctx) ++ // Dealing with legacy here: it used to skip over the error-handling if fcuCalled was false. ++ // But that combination is not actually a code-path in TryBackupUnsafeReorg. ++ // We should drop fcuCalled, and make the function emit events directly, ++ // once there are no more synchronous callers. ++ if !fcuCalled && err != nil { ++ d.log.Crit("unexpected TryBackupUnsafeReorg error after no FCU call", "err", err) ++ } ++ if err != nil { ++ // If we needed to perform a network call, then we should yield even if we did not encounter an error. ++ if errors.Is(err, derive.ErrReset) { ++ d.emitter.Emit(rollup.ResetEvent{Err: err}) ++ } else if errors.Is(err, derive.ErrTemporary) { ++ d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err}) ++ } else { ++ d.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unexpected TryBackupUnsafeReorg error type: %w", err)}) ++ } ++ } ++ case TryUpdateEngineEvent: ++ // If we don't need to call FCU, keep going b/c this was a no-op. If we needed to ++ // perform a network call, then we should yield even if we did not encounter an error. ++ if err := d.ec.TryUpdateEngine(d.ctx); err != nil && !errors.Is(err, ErrNoFCUNeeded) { ++ if errors.Is(err, derive.ErrReset) { ++ d.emitter.Emit(rollup.ResetEvent{Err: err}) ++ } else if errors.Is(err, derive.ErrTemporary) { ++ d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err}) ++ } else { ++ d.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unexpected TryUpdateEngine error type: %w", err)}) ++ } ++ } ++ case ProcessUnsafePayloadEvent: ++ ref, err := derive.PayloadToBlockRef(d.cfg, x.Envelope.ExecutionPayload) ++ if err != nil { ++ d.log.Error("failed to decode L2 block ref from payload", "err", err) ++ return ++ } ++ if err := d.ec.InsertUnsafePayload(d.ctx, x.Envelope, ref); err != nil { ++ d.log.Info("failed to insert payload", "ref", ref, ++ "txs", len(x.Envelope.ExecutionPayload.Transactions), "err", err) ++ // yes, duplicate error-handling. After all derivers are interacting with the engine ++ // through events, we can drop the engine-controller interface: ++ // unify the events handler with the engine-controller, ++ // remove a lot of code, and not do this error translation. ++ if errors.Is(err, derive.ErrReset) { ++ d.emitter.Emit(rollup.ResetEvent{Err: err}) ++ } else if errors.Is(err, derive.ErrTemporary) { ++ d.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: err}) ++ } else { ++ d.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unexpected InsertUnsafePayload error type: %w", err)}) ++ } ++ } else { ++ d.log.Info("successfully processed payload", "ref", ref, "txs", len(x.Envelope.ExecutionPayload.Transactions)) ++ } ++ case ForkchoiceRequestEvent: ++ d.emitter.Emit(ForkchoiceUpdateEvent{ ++ UnsafeL2Head: d.ec.UnsafeL2Head(), ++ SafeL2Head: d.ec.SafeL2Head(), ++ FinalizedL2Head: d.ec.Finalized(), ++ }) ++ case ForceEngineResetEvent: ++ ForceEngineReset(d.ec, x) ++ ++ // Time to apply the changes to the underlying engine ++ d.emitter.Emit(TryUpdateEngineEvent{}) ++ ++ log.Debug("Reset of Engine is completed", ++ "safeHead", x.Safe, "unsafe", x.Unsafe, "safe_timestamp", x.Safe.Time, ++ "unsafe_timestamp", x.Unsafe.Time) ++ d.emitter.Emit(EngineResetConfirmedEvent(x)) ++ case ProcessAttributesEvent: ++ d.onForceNextSafeAttributes(x.Attributes) ++ case PendingSafeRequestEvent: ++ d.emitter.Emit(PendingSafeUpdateEvent{ ++ PendingSafe: d.ec.PendingSafeL2Head(), ++ Unsafe: d.ec.UnsafeL2Head(), ++ }) ++ case PromotePendingSafeEvent: ++ // Only promote if not already stale. ++ // Resets/overwrites happen through engine-resets, not through promotion. ++ if x.Ref.Number > d.ec.PendingSafeL2Head().Number { ++ d.ec.SetPendingSafeL2Head(x.Ref) ++ } ++ if x.Safe && x.Ref.Number > d.ec.SafeL2Head().Number { ++ d.ec.SetSafeHead(x.Ref) ++ d.emitter.Emit(SafeDerivedEvent{Safe: x.Ref, DerivedFrom: x.DerivedFrom}) ++ } ++ case PromoteFinalizedEvent: ++ if x.Ref.Number < d.ec.Finalized().Number { ++ d.log.Error("Cannot rewind finality,", "ref", x.Ref, "finalized", d.ec.Finalized()) ++ return ++ } ++ if x.Ref.Number > d.ec.SafeL2Head().Number { ++ d.log.Error("Block must be safe before it can be finalized", "ref", x.Ref, "safe", d.ec.SafeL2Head()) ++ return ++ } ++ d.ec.SetFinalizedHead(x.Ref) ++ } ++} ++ ++// onForceNextSafeAttributes inserts the provided attributes, reorging away any conflicting unsafe chain. ++func (eq *EngDeriver) onForceNextSafeAttributes(attributes *derive.AttributesWithParent) { ++ ctx, cancel := context.WithTimeout(eq.ctx, time.Second*10) ++ defer cancel() ++ ++ attrs := attributes.Attributes ++ errType, err := eq.ec.StartPayload(ctx, eq.ec.PendingSafeL2Head(), attributes, true) ++ var envelope *eth.ExecutionPayloadEnvelope ++ if err == nil { ++ envelope, errType, err = eq.ec.ConfirmPayload(ctx, async.NoOpGossiper{}, &conductor.NoOpConductor{}) ++ } ++ if err != nil { ++ switch errType { ++ case BlockInsertTemporaryErr: ++ // RPC errors are recoverable, we can retry the buffered payload attributes later. ++ eq.emitter.Emit(rollup.EngineTemporaryErrorEvent{Err: fmt.Errorf("temporarily cannot insert new safe block: %w", err)}) ++ return ++ case BlockInsertPrestateErr: ++ _ = eq.ec.CancelPayload(ctx, true) ++ eq.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("need reset to resolve pre-state problem: %w", err)}) ++ return ++ case BlockInsertPayloadErr: ++ if !errors.Is(err, derive.ErrTemporary) { ++ eq.emitter.Emit(InvalidPayloadAttributesEvent{Attributes: attributes}) ++ } ++ _ = eq.ec.CancelPayload(ctx, true) ++ eq.log.Warn("could not process payload derived from L1 data, dropping attributes", "err", err) ++ // Count the number of deposits to see if the tx list is deposit only. ++ depositCount := 0 ++ for _, tx := range attrs.Transactions { ++ if len(tx) > 0 && tx[0] == types.DepositTxType { ++ depositCount += 1 ++ } ++ } ++ // Deposit transaction execution errors are suppressed in the execution engine, but if the ++ // block is somehow invalid, there is nothing we can do to recover & we should exit. ++ if len(attrs.Transactions) == depositCount { ++ eq.log.Error("deposit only block was invalid", "parent", attributes.Parent, "err", err) ++ eq.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("failed to process block with only deposit transactions: %w", err)}) ++ return ++ } ++ // Revert the pending safe head to the safe head. ++ eq.ec.SetPendingSafeL2Head(eq.ec.SafeL2Head()) ++ // suppress the error b/c we want to retry with the next batch from the batch queue ++ // If there is no valid batch the node will eventually force a deposit only block. If ++ // the deposit only block fails, this will return the critical error above. ++ ++ // Try to restore to previous known unsafe chain. ++ eq.ec.SetBackupUnsafeL2Head(eq.ec.BackupUnsafeL2Head(), true) ++ ++ // drop the payload without inserting it into the engine ++ return ++ default: ++ eq.emitter.Emit(rollup.CriticalErrorEvent{Err: fmt.Errorf("unknown InsertHeadBlock error type %d: %w", errType, err)}) ++ } ++ } ++ ref, err := derive.PayloadToBlockRef(eq.cfg, envelope.ExecutionPayload) ++ if err != nil { ++ eq.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("failed to decode L2 block ref from payload: %w", err)}) ++ return ++ } ++ eq.ec.SetPendingSafeL2Head(ref) ++ if attributes.IsLastInSpan { ++ eq.ec.SetSafeHead(ref) ++ eq.emitter.Emit(SafeDerivedEvent{Safe: ref, DerivedFrom: attributes.DerivedFrom}) ++ } ++ eq.emitter.Emit(PendingSafeUpdateEvent{ ++ PendingSafe: eq.ec.PendingSafeL2Head(), ++ Unsafe: eq.ec.UnsafeL2Head(), ++ }) ++} ++ ++type ResetEngineControl interface { ++ SetUnsafeHead(eth.L2BlockRef) ++ SetSafeHead(eth.L2BlockRef) ++ SetFinalizedHead(eth.L2BlockRef) ++ SetBackupUnsafeL2Head(block eth.L2BlockRef, triggerReorg bool) ++ SetPendingSafeL2Head(eth.L2BlockRef) ++ ResetBuildingState() ++} ++ ++// ForceEngineReset is not to be used. The op-program needs it for now, until event processing is adopted there. ++func ForceEngineReset(ec ResetEngineControl, x ForceEngineResetEvent) { ++ ec.SetUnsafeHead(x.Unsafe) ++ ec.SetSafeHead(x.Safe) ++ ec.SetPendingSafeL2Head(x.Safe) ++ ec.SetFinalizedHead(x.Finalized) ++ ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) ++ ec.ResetBuildingState() ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+101
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/events.go CELO/op-node/rollup/events.go +new file mode 100644 +index 0000000000000000000000000000000000000000..29a6acdd14172912d95aa68658529921eac97a41 +--- /dev/null ++++ CELO/op-node/rollup/events.go +@@ -0,0 +1,101 @@ ++package rollup ++ ++import "github.com/ethereum/go-ethereum/log" ++ ++type Event interface { ++ String() string ++} ++ ++type Deriver interface { ++ OnEvent(ev Event) ++} ++ ++type EventEmitter interface { ++ Emit(ev Event) ++} ++ ++type EmitterFunc func(ev Event) ++ ++func (fn EmitterFunc) Emit(ev Event) { ++ fn(ev) ++} ++ ++// L1TemporaryErrorEvent identifies a temporary issue with the L1 data. ++type L1TemporaryErrorEvent struct { ++ Err error ++} ++ ++var _ Event = L1TemporaryErrorEvent{} ++ ++func (ev L1TemporaryErrorEvent) String() string { ++ return "l1-temporary-error" ++} ++ ++// EngineTemporaryErrorEvent identifies a temporary processing issue. ++// It applies to both L1 and L2 data, often inter-related. ++// This scope will be reduced over time, to only capture L2-engine specific temporary errors. ++// See L1TemporaryErrorEvent for L1 related temporary errors. ++type EngineTemporaryErrorEvent struct { ++ Err error ++} ++ ++var _ Event = EngineTemporaryErrorEvent{} ++ ++func (ev EngineTemporaryErrorEvent) String() string { ++ return "engine-temporary-error" ++} ++ ++type ResetEvent struct { ++ Err error ++} ++ ++var _ Event = ResetEvent{} ++ ++func (ev ResetEvent) String() string { ++ return "reset-event" ++} ++ ++type CriticalErrorEvent struct { ++ Err error ++} ++ ++var _ Event = CriticalErrorEvent{} ++ ++func (ev CriticalErrorEvent) String() string { ++ return "critical-error" ++} ++ ++type SynchronousDerivers []Deriver ++ ++func (s *SynchronousDerivers) OnEvent(ev Event) { ++ for _, d := range *s { ++ d.OnEvent(ev) ++ } ++} ++ ++var _ Deriver = (*SynchronousDerivers)(nil) ++ ++type DebugDeriver struct { ++ Log log.Logger ++} ++ ++func (d DebugDeriver) OnEvent(ev Event) { ++ d.Log.Debug("on-event", "event", ev) ++} ++ ++type NoopDeriver struct{} ++ ++func (d NoopDeriver) OnEvent(ev Event) {} ++ ++// DeriverFunc implements the Deriver interface as a function, ++// similar to how the std-lib http HandlerFunc implements a Handler. ++// This can be used for small in-place derivers, test helpers, etc. ++type DeriverFunc func(ev Event) ++ ++func (fn DeriverFunc) OnEvent(ev Event) { ++ fn(ev) ++} ++ ++type NoopEmitter struct{} ++ ++func (e NoopEmitter) Emit(ev Event) {}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+50
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/events_test.go CELO/op-node/rollup/events_test.go +new file mode 100644 +index 0000000000000000000000000000000000000000..d405883d3cce648691529c237006ce7641f3f9d6 +--- /dev/null ++++ CELO/op-node/rollup/events_test.go +@@ -0,0 +1,50 @@ ++package rollup ++ ++import ( ++ "fmt" ++ "testing" ++ ++ "github.com/stretchr/testify/require" ++) ++ ++type TestEvent struct{} ++ ++func (ev TestEvent) String() string { ++ return "X" ++} ++ ++func TestSynchronousDerivers_OnEvent(t *testing.T) { ++ result := "" ++ a := DeriverFunc(func(ev Event) { ++ result += fmt.Sprintf("A:%s\n", ev) ++ }) ++ b := DeriverFunc(func(ev Event) { ++ result += fmt.Sprintf("B:%s\n", ev) ++ }) ++ c := DeriverFunc(func(ev Event) { ++ result += fmt.Sprintf("C:%s\n", ev) ++ }) ++ ++ x := SynchronousDerivers{} ++ x.OnEvent(TestEvent{}) ++ require.Equal(t, "", result) ++ ++ x = SynchronousDerivers{a} ++ x.OnEvent(TestEvent{}) ++ require.Equal(t, "A:X\n", result) ++ ++ result = "" ++ x = SynchronousDerivers{a, a} ++ x.OnEvent(TestEvent{}) ++ require.Equal(t, "A:X\nA:X\n", result) ++ ++ result = "" ++ x = SynchronousDerivers{a, b} ++ x.OnEvent(TestEvent{}) ++ require.Equal(t, "A:X\nB:X\n", result) ++ ++ result = "" ++ x = SynchronousDerivers{a, b, c} ++ x.OnEvent(TestEvent{}) ++ require.Equal(t, "A:X\nB:X\nC:X\n", result) ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+76
+
-32
+ +
+ +
+
+
diff --git OP/op-node/rollup/finality/finalizer.go CELO/op-node/rollup/finality/finalizer.go +index 100ed96bb520096de9e43e0639eb3f7861dca00f..483110083d467724c5cb125a1c701ae4ed585fd4 100644 +--- OP/op-node/rollup/finality/finalizer.go ++++ CELO/op-node/rollup/finality/finalizer.go +@@ -4,11 +4,13 @@ import ( + "context" + "fmt" + "sync" ++ "time" +  + "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-service/eth" + ) +  +@@ -34,7 +36,7 @@ + // calcFinalityLookback calculates the default finality lookback based on DA challenge window if plasma + // mode is activated or L1 finality lookback. + func calcFinalityLookback(cfg *rollup.Config) uint64 { +- // in plasma mode the longest finality lookback is a commitment is challenged on the last block of ++ // in alt-da mode the longest finality lookback is a commitment is challenged on the last block of + // the challenge window in which case it will be both challenge + resolve window. + if cfg.PlasmaEnabled() { + lkb := cfg.PlasmaConfig.DAChallengeWindow + cfg.PlasmaConfig.DAResolveWindow + 1 +@@ -68,9 +70,16 @@ mu sync.Mutex +  + log log.Logger +  ++ ctx context.Context ++ ++ emitter rollup.EventEmitter ++ + // finalizedL1 is the currently perceived finalized L1 block. + // This may be ahead of the current traversed origin when syncing. + finalizedL1 eth.L1BlockRef ++ ++ // lastFinalizedL2 maintains how far we finalized, so we don't have to emit re-attempts. ++ lastFinalizedL2 eth.L2BlockRef +  + // triedFinalizeAt tracks at which L1 block number we last tried to finalize during sync. + triedFinalizeAt uint64 +@@ -82,20 +91,19 @@ // Maximum amount of L2 blocks to store in finalityData. + finalityLookback uint64 +  + l1Fetcher FinalizerL1Interface +- +- ec FinalizerEngine + } +  +-func NewFinalizer(log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface, ec FinalizerEngine) *Finalizer { ++func NewFinalizer(ctx context.Context, log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface, emitter rollup.EventEmitter) *Finalizer { + lookback := calcFinalityLookback(cfg) + return &Finalizer{ ++ ctx: ctx, + log: log, + finalizedL1: eth.L1BlockRef{}, + triedFinalizeAt: 0, + finalityData: make([]FinalityData, 0, lookback), + finalityLookback: lookback, + l1Fetcher: l1Fetcher, +- ec: ec, ++ emitter: emitter, + } + } +  +@@ -108,8 +116,39 @@ out = fi.finalizedL1 + return + } +  +-// Finalize applies a L1 finality signal, without any fork-choice or L2 state changes. +-func (fi *Finalizer) Finalize(ctx context.Context, l1Origin eth.L1BlockRef) { ++type FinalizeL1Event struct { ++ FinalizedL1 eth.L1BlockRef ++} ++ ++func (ev FinalizeL1Event) String() string { ++ return "finalized-l1" ++} ++ ++type TryFinalizeEvent struct{} ++ ++func (ev TryFinalizeEvent) String() string { ++ return "try-finalize" ++} ++ ++func (fi *Finalizer) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case FinalizeL1Event: ++ fi.onL1Finalized(x.FinalizedL1) ++ case engine.SafeDerivedEvent: ++ fi.onDerivedSafeBlock(x.Safe, x.DerivedFrom) ++ case derive.DeriverIdleEvent: ++ fi.onDerivationIdle(x.Origin) ++ case rollup.ResetEvent: ++ fi.onReset() ++ case TryFinalizeEvent: ++ fi.tryFinalize() ++ case engine.ForkchoiceUpdateEvent: ++ fi.lastFinalizedL2 = x.FinalizedL2Head ++ } ++} ++ ++// onL1Finalized applies a L1 finality signal ++func (fi *Finalizer) onL1Finalized(l1Origin eth.L1BlockRef) { + fi.mu.Lock() + defer fi.mu.Unlock() + prevFinalizedL1 := fi.finalizedL1 +@@ -127,13 +166,11 @@ // remember the L1 finalization signal + fi.finalizedL1 = l1Origin + } +  +- // remnant of finality in EngineQueue: the finalization work does not inherit a context from the caller. +- if err := fi.tryFinalize(ctx); err != nil { +- fi.log.Warn("received L1 finalization signal, but was unable to determine and apply L2 finality", "err", err) +- } ++ // when the L1 change we can suggest to try to finalize, as the pre-condition for L2 finality has now changed ++ fi.emitter.Emit(TryFinalizeEvent{}) + } +  +-// OnDerivationL1End is called when a L1 block has been fully exhausted (i.e. no more L2 blocks to derive from). ++// onDerivationIdle is called when the pipeline is exhausted of new data (i.e. no more L2 blocks to derive from). + // + // Since finality applies to all L2 blocks fully derived from the same block, + // it optimal to only check after the derivation from the L1 block has been exhausted. +@@ -141,24 +178,27 @@ // + // This will look at what has been buffered so far, + // sanity-check we are on the finalizing L1 chain, + // and finalize any L2 blocks that were fully derived from known finalized L1 blocks. +-func (fi *Finalizer) OnDerivationL1End(ctx context.Context, derivedFrom eth.L1BlockRef) error { ++func (fi *Finalizer) onDerivationIdle(derivedFrom eth.L1BlockRef) { + fi.mu.Lock() + defer fi.mu.Unlock() + if fi.finalizedL1 == (eth.L1BlockRef{}) { +- return nil // if no L1 information is finalized yet, then skip this ++ return // if no L1 information is finalized yet, then skip this + } + // If we recently tried finalizing, then don't try again just yet, but traverse more of L1 first. + if fi.triedFinalizeAt != 0 && derivedFrom.Number <= fi.triedFinalizeAt+finalityDelay { +- return nil ++ return + } +- fi.log.Info("processing L1 finality information", "l1_finalized", fi.finalizedL1, "derived_from", derivedFrom, "previous", fi.triedFinalizeAt) ++ fi.log.Debug("processing L1 finality information", "l1_finalized", fi.finalizedL1, "derived_from", derivedFrom, "previous", fi.triedFinalizeAt) + fi.triedFinalizeAt = derivedFrom.Number +- return fi.tryFinalize(ctx) ++ fi.emitter.Emit(TryFinalizeEvent{}) + } +  +-func (fi *Finalizer) tryFinalize(ctx context.Context) error { +- // default to keep the same finalized block +- finalizedL2 := fi.ec.Finalized() ++func (fi *Finalizer) tryFinalize() { ++ fi.mu.Lock() ++ defer fi.mu.Unlock() ++ ++ // overwritten if we finalize ++ finalizedL2 := fi.lastFinalizedL2 // may be zeroed if nothing was finalized since startup. + var finalizedDerivedFrom eth.BlockID + // go through the latest inclusion data, and find the last L2 block that was derived from a finalized L1 block + for _, fd := range fi.finalityData { +@@ -169,37 +209,41 @@ // keep iterating, there may be later L2 blocks that can also be finalized + } + } + if finalizedDerivedFrom != (eth.BlockID{}) { ++ ctx, cancel := context.WithTimeout(fi.ctx, time.Second*10) ++ defer cancel() + // Sanity check the finality signal of L1. + // Even though the signal is trusted and we do the below check also, + // the signal itself has to be canonical to proceed. + // TODO(#10724): This check could be removed if the finality signal is fully trusted, and if tests were more flexible for this case. + signalRef, err := fi.l1Fetcher.L1BlockRefByNumber(ctx, fi.finalizedL1.Number) + if err != nil { +- return derive.NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", fi.finalizedL1.Number, err)) ++ fi.emitter.Emit(rollup.L1TemporaryErrorEvent{Err: fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", fi.finalizedL1.Number, err)}) ++ return + } + if signalRef.Hash != fi.finalizedL1.Hash { +- return derive.NewResetError(fmt.Errorf("need to reset, we assumed %s is finalized, but canonical chain is %s", fi.finalizedL1, signalRef)) ++ fi.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("need to reset, we assumed %s is finalized, but canonical chain is %s", fi.finalizedL1, signalRef)}) ++ return + } +  + // Sanity check we are indeed on the finalizing chain, and not stuck on something else. + // We assume that the block-by-number query is consistent with the previously received finalized chain signal + derivedRef, err := fi.l1Fetcher.L1BlockRefByNumber(ctx, finalizedDerivedFrom.Number) + if err != nil { +- return derive.NewTemporaryError(fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", finalizedDerivedFrom.Number, err)) ++ fi.emitter.Emit(rollup.L1TemporaryErrorEvent{Err: fmt.Errorf("failed to check if on finalizing L1 chain, could not fetch block %d: %w", finalizedDerivedFrom.Number, err)}) ++ return + } + if derivedRef.Hash != finalizedDerivedFrom.Hash { +- return derive.NewResetError(fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)", +- finalizedDerivedFrom, derivedRef, fi.finalizedL1)) ++ fi.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("need to reset, we are on %s, not on the finalizing L1 chain %s (towards %s)", ++ finalizedDerivedFrom, derivedRef, fi.finalizedL1)}) ++ return + } +- +- fi.ec.SetFinalizedHead(finalizedL2) ++ fi.emitter.Emit(engine.PromoteFinalizedEvent{Ref: finalizedL2}) + } +- return nil + } +  +-// PostProcessSafeL2 buffers the L1 block the safe head was fully derived from, ++// onDerivedSafeBlock buffers the L1 block the safe head was fully derived from, + // to finalize it once the derived-from L1 block, or a later L1 block, finalizes. +-func (fi *Finalizer) PostProcessSafeL2(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) { ++func (fi *Finalizer) onDerivedSafeBlock(l2Safe eth.L2BlockRef, derivedFrom eth.L1BlockRef) { + fi.mu.Lock() + defer fi.mu.Unlock() + // remember the last L2 block that we fully derived from the given finality data +@@ -225,9 +269,9 @@ } + } + } +  +-// Reset clears the recent history of safe-L2 blocks used for finalization, ++// onReset clears the recent history of safe-L2 blocks used for finalization, + // to avoid finalizing any reorged-out L2 blocks. +-func (fi *Finalizer) Reset() { ++func (fi *Finalizer) onReset() { + fi.mu.Lock() + defer fi.mu.Unlock() + fi.finalityData = fi.finalityData[:0]
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+170
+
-100
+ +
+ +
+
+
diff --git OP/op-node/rollup/finality/finalizer_test.go CELO/op-node/rollup/finality/finalizer_test.go +index e577efe1952af9c6cf8b980a38fc656b833cf9a1..d4dc76fb4ca55c2fc1577b78ae4e98f0763a1094 100644 +--- OP/op-node/rollup/finality/finalizer_test.go ++++ CELO/op-node/rollup/finality/finalizer_test.go +@@ -13,24 +13,11 @@ "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" + ) +- +-type fakeEngine struct { +- finalized eth.L2BlockRef +-} +- +-func (f *fakeEngine) Finalized() eth.L2BlockRef { +- return f.finalized +-} +- +-func (f *fakeEngine) SetFinalizedHead(ref eth.L2BlockRef) { +- f.finalized = ref +-} +- +-var _ FinalizerEngine = (*fakeEngine)(nil) +  + func TestEngineQueue_Finalize(t *testing.T) { + rng := rand.New(rand.NewSource(1234)) +@@ -80,12 +67,12 @@ Number: refG.Number + 1, + ParentHash: refG.Hash, + Time: refG.Time + l1Time, + } +- refI := eth.L1BlockRef{ +- Hash: testutils.RandomHash(rng), +- Number: refH.Number + 1, +- ParentHash: refH.Hash, +- Time: refH.Time + l1Time, +- } ++ //refI := eth.L1BlockRef{ ++ // Hash: testutils.RandomHash(rng), ++ // Number: refH.Number + 1, ++ // ParentHash: refH.Hash, ++ // Time: refH.Time + l1Time, ++ //} +  + refA0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), +@@ -203,22 +190,29 @@ defer l1F.AssertExpectations(t) + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) +  +- ec := &fakeEngine{} +- ec.SetFinalizedHead(refA1) +- +- fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec) ++ emitter := &testutils.MockEmitter{} ++ fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, emitter) +  + // now say C1 was included in D and became the new safe head +- fi.PostProcessSafeL2(refC1, refD) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refD)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) ++ emitter.AssertExpectations(t) +  + // now say D0 was included in E and became the new safe head +- fi.PostProcessSafeL2(refD0, refE) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refE)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, DerivedFrom: refE}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) ++ emitter.AssertExpectations(t) ++ ++ // Let's finalize D from which we fully derived C1, but not D0 ++ // This will trigger an attempt of L2 finalization. ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refD}) ++ emitter.AssertExpectations(t) +  +- // let's finalize D from which we fully derived C1, but not D0 +- fi.Finalize(context.Background(), refD) +- require.Equal(t, refC1, ec.Finalized(), "C1 was included in finalized D, and should now be finalized, as finality signal is instantly picked up") ++ // C1 was included in finalized D, and should now be finalized ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC1}) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) + }) +  + // Finality signal is received, but couldn't immediately be checked +@@ -230,25 +224,37 @@ l1F.ExpectL1BlockRefByNumber(refD.Number, refD, errors.New("fake error")) + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // to check finality signal + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // to check what was derived from (same in this case) +  +- ec := &fakeEngine{} +- ec.SetFinalizedHead(refA1) +- +- fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec) ++ emitter := &testutils.MockEmitter{} ++ fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, emitter) +  + // now say C1 was included in D and became the new safe head +- fi.PostProcessSafeL2(refC1, refD) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refD)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) ++ emitter.AssertExpectations(t) +  + // now say D0 was included in E and became the new safe head +- fi.PostProcessSafeL2(refD0, refE) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refE)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, DerivedFrom: refE}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) ++ emitter.AssertExpectations(t) +  + // let's finalize D from which we fully derived C1, but not D0 +- fi.Finalize(context.Background(), refD) +- require.Equal(t, refA1, ec.Finalized(), "C1 was included in finalized D, but finality could not be verified yet, due to temporary test error") ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refD}) ++ emitter.AssertExpectations(t) ++ // C1 was included in finalized D, but finality could not be verified yet, due to temporary test error ++ emitter.ExpectOnceType("L1TemporaryErrorEvent") ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) +  +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refF)) +- require.Equal(t, refC1, ec.Finalized(), "C1 was included in finalized D, and should now be finalized, as check can succeed when revisited") ++ // upon the next signal we should schedule a finalization re-attempt ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refF}) ++ emitter.AssertExpectations(t) ++ ++ // C1 was included in finalized D, and should now be finalized, as check can succeed when revisited ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC1}) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) + }) +  + // Test that finality progression can repeat a few times. +@@ -257,43 +263,80 @@ logger := testlog.Logger(t, log.LevelInfo) + l1F := &testutils.MockL1Source{} + defer l1F.AssertExpectations(t) +  ++ emitter := &testutils.MockEmitter{} ++ fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, emitter) ++ ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) ++ emitter.AssertExpectations(t) ++ ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, DerivedFrom: refE}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) ++ emitter.AssertExpectations(t) ++ ++ // L1 finality signal will trigger L2 finality attempt ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refD}) ++ emitter.AssertExpectations(t) ++ ++ // C1 was included in D, and should be finalized now ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC1}) + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) ++ l1F.AssertExpectations(t) ++ ++ // Another L1 finality event, trigger L2 finality attempt again ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refE}) ++ emitter.AssertExpectations(t) ++ ++ // D0 was included in E, and should be finalized now ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refD0}) + l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) + l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) +- l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil) +- l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil) +- +- ec := &fakeEngine{} +- ec.SetFinalizedHead(refA1) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) ++ l1F.AssertExpectations(t) +  +- fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec) ++ // D0 is still there in the buffer, and may be finalized again, if it were not for the latest forkchoice update. ++ fi.OnEvent(engine.ForkchoiceUpdateEvent{FinalizedL2Head: refD0}) ++ emitter.AssertExpectations(t) // should trigger no events +  +- fi.PostProcessSafeL2(refC1, refD) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refD)) ++ // we expect a finality attempt, since we have not idled on something yet ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refG}) ++ emitter.AssertExpectations(t) +  +- fi.PostProcessSafeL2(refD0, refE) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refE)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refD1, DerivedFrom: refH}) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refE0, DerivedFrom: refH}) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refE1, DerivedFrom: refH}) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refF0, DerivedFrom: refH}) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refF1, DerivedFrom: refH}) ++ emitter.AssertExpectations(t) // above updates add data, but no attempt is made until idle or L1 signal +  +- fi.Finalize(context.Background(), refD) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refF)) +- require.Equal(t, refC1, ec.Finalized(), "C1 was included in D, and should be finalized now") ++ // We recently finalized already, and there is no new L1 finality data ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refH}) ++ emitter.AssertExpectations(t) +  +- fi.Finalize(context.Background(), refE) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refG)) +- require.Equal(t, refD0, ec.Finalized(), "D0 was included in E, and should be finalized now") ++ // D1-F1 were included in L1 blocks that have not been finalized yet. ++ // D0 is known to be finalized already. ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) +  +- fi.PostProcessSafeL2(refD1, refH) +- fi.PostProcessSafeL2(refE0, refH) +- fi.PostProcessSafeL2(refE1, refH) +- fi.PostProcessSafeL2(refF0, refH) +- fi.PostProcessSafeL2(refF1, refH) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refH)) +- require.Equal(t, refD0, ec.Finalized(), "D1-F1 were included in L1 blocks that have not been finalized yet") ++ // Now L1 block H is actually finalized, and we can proceed with another attempt ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refH}) ++ emitter.AssertExpectations(t) +  +- fi.Finalize(context.Background(), refH) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refI)) +- require.Equal(t, refF1, ec.Finalized(), "F1 should be finalized now") ++ // F1 should be finalized now, since it was included in H ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refF1}) ++ l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil) ++ l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) ++ l1F.AssertExpectations(t) + }) +  + // In this test the finality signal is for a block more than +@@ -305,22 +348,28 @@ defer l1F.AssertExpectations(t) + l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // check the signal + l1F.ExpectL1BlockRefByNumber(refC.Number, refC, nil) // check what we derived the L2 block from +  +- ec := &fakeEngine{} +- ec.SetFinalizedHead(refA1) +- +- fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec) ++ emitter := &testutils.MockEmitter{} ++ fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, emitter) +  + // now say B1 was included in C and became the new safe head +- fi.PostProcessSafeL2(refB1, refC) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refC)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refB1, DerivedFrom: refC}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refC}) ++ emitter.AssertExpectations(t) +  + // now say C0 was included in E and became the new safe head +- fi.PostProcessSafeL2(refC0, refE) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refE)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, DerivedFrom: refE}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) ++ emitter.AssertExpectations(t) +  + // let's finalize D, from which we fully derived B1, but not C0 (referenced L1 origin in L2 block != inclusion of L2 block in L1 chain) +- fi.Finalize(context.Background(), refD) +- require.Equal(t, refB1, ec.Finalized(), "B1 was included in finalized D, and should now be finalized") ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refD}) ++ emitter.AssertExpectations(t) ++ ++ // B1 was included in finalized D, and should now be finalized ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refB1}) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) + }) +  + // Test that reorg race condition is handled. +@@ -335,14 +384,13 @@ l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // shows reorg to OnDerivationL1End attempt + l1F.ExpectL1BlockRefByNumber(refF.Number, refF, nil) // check signal + l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) // post-reorg +  +- ec := &fakeEngine{} +- ec.SetFinalizedHead(refA1) +- +- fi := NewFinalizer(logger, &rollup.Config{}, l1F, ec) ++ emitter := &testutils.MockEmitter{} ++ fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, emitter) +  + // now say B1 was included in C and became the new safe head +- fi.PostProcessSafeL2(refB1, refC) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refC)) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refB1, DerivedFrom: refC}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refC}) ++ emitter.AssertExpectations(t) +  + // temporary fork of the L1, and derived safe L2 blocks from. + refC0Alt := eth.L2BlockRef{ +@@ -367,34 +415,56 @@ Number: refC.Number + 1, + ParentHash: refC.Hash, + Time: refC.Time + l1Time, + } +- fi.PostProcessSafeL2(refC0Alt, refDAlt) +- fi.PostProcessSafeL2(refC1Alt, refDAlt) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0Alt, DerivedFrom: refDAlt}) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1Alt, DerivedFrom: refDAlt}) +  + // We get an early finality signal for F, of the chain that did not include refC0Alt and refC1Alt, + // as L1 block F does not build on DAlt. + // The finality signal was for a new chain, while derivation is on an old stale chain. + // It should be detected that C0Alt and C1Alt cannot actually be finalized, + // even though they are older than the latest finality signal. +- fi.Finalize(context.Background(), refF) +- require.Equal(t, refA1, ec.Finalized(), "cannot verify refC0Alt and refC1Alt, and refB1 is older and not checked") ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: refF}) ++ emitter.AssertExpectations(t) ++ // cannot verify refC0Alt and refC1Alt, and refB1 is older and not checked ++ emitter.ExpectOnceType("ResetEvent") ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) // no change in finality ++ + // And process DAlt, still stuck on old chain. +- require.ErrorIs(t, derive.ErrReset, fi.OnDerivationL1End(context.Background(), refDAlt)) +- require.Equal(t, refA1, ec.Finalized(), "no new finalized L2 blocks after early finality signal with stale chain") ++ ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refDAlt}) ++ emitter.AssertExpectations(t) ++ // no new finalized L2 blocks after early finality signal with stale chain ++ emitter.ExpectOnceType("ResetEvent") ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) ++ // Now reset, because of the reset error ++ fi.OnEvent(rollup.ResetEvent{}) + require.Equal(t, refF, fi.FinalizedL1(), "remember the new finality signal for later however") +- // Now reset, because of the reset error +- fi.Reset() +  + // And process the canonical chain, with empty block D (no post-processing of canonical C0 blocks yet) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refD)) ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) ++ emitter.AssertExpectations(t) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) // no new finality +  + // Include C0 in E +- fi.PostProcessSafeL2(refC0, refE) +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refE)) +- // Due to the "finalityDelay" we don't repeat finality checks shortly after one another. +- require.Equal(t, refA1, ec.Finalized()) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, DerivedFrom: refE}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) ++ // Due to the "finalityDelay" we don't repeat finality checks shortly after one another, ++ // and don't expect a finality attempt. ++ emitter.AssertExpectations(t) ++ + // if we reset the attempt, then we can finalize however. + fi.triedFinalizeAt = 0 +- require.NoError(t, fi.OnDerivationL1End(context.Background(), refE)) +- require.Equal(t, refC0, ec.Finalized()) ++ emitter.ExpectOnce(TryFinalizeEvent{}) ++ fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) ++ emitter.AssertExpectations(t) ++ emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC0}) ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) + }) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+12
+
-7
+ +
+ +
+
+
diff --git OP/op-node/rollup/finality/plasma.go CELO/op-node/rollup/finality/plasma.go +index e7826cda71832da86c5793041ac242a27c8a32b8..e62c96169c5ab57a09b9a26ca861ff5b44f58020 100644 +--- OP/op-node/rollup/finality/plasma.go ++++ CELO/op-node/rollup/finality/plasma.go +@@ -26,17 +26,17 @@ *Finalizer + backend PlasmaBackend + } +  +-func NewPlasmaFinalizer(log log.Logger, cfg *rollup.Config, +- l1Fetcher FinalizerL1Interface, ec FinalizerEngine, ++func NewPlasmaFinalizer(ctx context.Context, log log.Logger, cfg *rollup.Config, ++ l1Fetcher FinalizerL1Interface, emitter rollup.EventEmitter, + backend PlasmaBackend) *PlasmaFinalizer { +  +- inner := NewFinalizer(log, cfg, l1Fetcher, ec) ++ inner := NewFinalizer(ctx, log, cfg, l1Fetcher, emitter) +  +- // In plasma mode, the finalization signal is proxied through the plasma manager. ++ // In alt-da mode, the finalization signal is proxied through the plasma manager. + // Finality signal will come from the DA contract or L1 finality whichever is last. + // The plasma module will then call the inner.Finalize function when applicable. + backend.OnFinalizedHeadSignal(func(ref eth.L1BlockRef) { +- inner.Finalize(context.Background(), ref) // plasma backend context passing can be improved ++ inner.OnEvent(FinalizeL1Event{FinalizedL1: ref}) + }) +  + return &PlasmaFinalizer{ +@@ -45,6 +45,11 @@ backend: backend, + } + } +  +-func (fi *PlasmaFinalizer) Finalize(ctx context.Context, l1Origin eth.L1BlockRef) { +- fi.backend.Finalize(l1Origin) ++func (fi *PlasmaFinalizer) OnEvent(ev rollup.Event) { ++ switch x := ev.(type) { ++ case FinalizeL1Event: ++ fi.backend.Finalize(x.FinalizedL1) ++ default: ++ fi.Finalizer.OnEvent(ev) ++ } + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+55
+
-20
+ +
+ +
+
+
diff --git OP/op-node/rollup/finality/plasma_test.go CELO/op-node/rollup/finality/plasma_test.go +index 4c1ecc00acccb7c592ee95e4e592a6b30d1407ef..291fa026dcb49771889ae388ef65f9cedad0ff16 100644 +--- OP/op-node/rollup/finality/plasma_test.go ++++ CELO/op-node/rollup/finality/plasma_test.go +@@ -11,6 +11,8 @@ "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/derive" ++ "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + plasma "github.com/ethereum-optimism/optimism/op-plasma" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" +@@ -83,9 +85,6 @@ L1Origin: refA.ID(), + SequenceNumber: 1, + } +  +- ec := &fakeEngine{} +- ec.SetFinalizedHead(refA1) +- + // Simulate plasma finality by waiting for the finalized-inclusion + // of a commitment to turn into undisputed finalized data. + commitmentInclusionFinalized := eth.L1BlockRef{} +@@ -95,7 +94,9 @@ commitmentInclusionFinalized = ref + }, + forwardTo: nil, + } +- fi := NewPlasmaFinalizer(logger, cfg, l1F, ec, plasmaBackend) ++ ++ emitter := &testutils.MockEmitter{} ++ fi := NewPlasmaFinalizer(context.Background(), logger, cfg, l1F, emitter, plasmaBackend) + require.NotNil(t, plasmaBackend.forwardTo, "plasma backend must have access to underlying standard finalizer") +  + require.Equal(t, expFinalityLookback, cap(fi.finalityData)) +@@ -107,7 +108,9 @@ // advance over 200 l1 origins each time incrementing new l2 safe heads + // and post processing. + for i := uint64(0); i < 200; i++ { + if i == 10 { // finalize a L1 commitment +- fi.Finalize(context.Background(), l1parent) ++ fi.OnEvent(FinalizeL1Event{FinalizedL1: l1parent}) ++ emitter.AssertExpectations(t) // no events emitted upon L1 finality ++ require.Equal(t, l1parent, commitmentInclusionFinalized, "plasma backend received L1 signal") + } +  + previous := l1parent +@@ -127,24 +130,56 @@ Time: l2parent.Time + cfg.BlockTime, + L1Origin: previous.ID(), // reference previous origin, not the block the batch was included in + SequenceNumber: j, + } +- fi.PostProcessSafeL2(l2parent, l1parent) ++ fi.OnEvent(engine.SafeDerivedEvent{Safe: l2parent, DerivedFrom: l1parent}) ++ emitter.AssertExpectations(t) + } +- require.NoError(t, fi.OnDerivationL1End(context.Background(), l1parent)) ++ // might trigger finalization attempt, if expired finality delay ++ emitter.ExpectMaybeRun(func(ev rollup.Event) { ++ require.IsType(t, TryFinalizeEvent{}, ev) ++ }) ++ fi.OnEvent(derive.DeriverIdleEvent{}) ++ emitter.AssertExpectations(t) ++ // clear expectations ++ emitter.Mock.ExpectedCalls = nil ++ ++ // no L2 finalize event, as no L1 finality signal has been forwarded by plasma backend yet ++ fi.OnEvent(TryFinalizeEvent{}) ++ emitter.AssertExpectations(t) ++ ++ // Pretend to be the plasma backend, ++ // send the original finalization signal to the underlying finalizer, ++ // now that we are sure the commitment itself is not just finalized, ++ // but the referenced data cannot be disputed anymore. + plasmaFinalization := commitmentInclusionFinalized.Number + cfg.PlasmaConfig.DAChallengeWindow +- if i == plasmaFinalization { +- // Pretend to be the plasma backend, +- // send the original finalization signal to the underlying finalizer, +- // now that we are sure the commitment itself is not just finalized, +- // but the referenced data cannot be disputed anymore. ++ if commitmentInclusionFinalized != (eth.L1BlockRef{}) && l1parent.Number == plasmaFinalization { ++ // When the signal is forwarded, a finalization attempt will be scheduled ++ emitter.ExpectOnce(TryFinalizeEvent{}) + plasmaBackend.forwardTo(commitmentInclusionFinalized) +- } +- // The next time OnDerivationL1End is called, after the finality signal was triggered by plasma backend, +- // we should have a finalized L2 block. +- // The L1 origin of the simulated L2 blocks lags 1 behind the block the L2 block is included in on L1. +- // So to check the L2 finality progress, we check if the next L1 block after the L1 origin +- // of the safe block matches that of the finalized L1 block. +- if i == plasmaFinalization+1 { +- require.Equal(t, plasmaFinalization, ec.Finalized().L1Origin.Number+1) ++ emitter.AssertExpectations(t) ++ require.Equal(t, commitmentInclusionFinalized, fi.finalizedL1, "finality signal now made its way in regular finalizer") ++ ++ // As soon as a finalization attempt is made, after the finality signal was triggered by plasma backend, ++ // we should get an attempt to get a finalized L2 block. ++ // In this test the L1 origin of the simulated L2 blocks lags 1 behind the block the L2 block is included in on L1. ++ // So to check the L2 finality progress, we check if the next L1 block after the L1 origin ++ // of the safe block matches that of the finalized L1 block. ++ l1F.ExpectL1BlockRefByNumber(commitmentInclusionFinalized.Number, commitmentInclusionFinalized, nil) ++ l1F.ExpectL1BlockRefByNumber(commitmentInclusionFinalized.Number, commitmentInclusionFinalized, nil) ++ var finalizedL2 eth.L2BlockRef ++ emitter.ExpectOnceRun(func(ev rollup.Event) { ++ if x, ok := ev.(engine.PromoteFinalizedEvent); ok { ++ finalizedL2 = x.Ref ++ } else { ++ t.Fatalf("expected L2 finalization, but got: %s", ev) ++ } ++ }) ++ fi.OnEvent(TryFinalizeEvent{}) ++ l1F.AssertExpectations(t) ++ emitter.AssertExpectations(t) ++ require.Equal(t, commitmentInclusionFinalized.Number, finalizedL2.L1Origin.Number+1) ++ // Confirm finalization, so there will be no repeats of the PromoteFinalizedEvent ++ fi.OnEvent(engine.ForkchoiceUpdateEvent{FinalizedL2Head: finalizedL2}) ++ emitter.AssertExpectations(t) + } + } +
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+103
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/synchronous.go CELO/op-node/rollup/synchronous.go +new file mode 100644 +index 0000000000000000000000000000000000000000..68943381e24eee966b9cd257aeeb559c457f513d +--- /dev/null ++++ CELO/op-node/rollup/synchronous.go +@@ -0,0 +1,103 @@ ++package rollup ++ ++import ( ++ "context" ++ "io" ++ "sync" ++ ++ "github.com/ethereum/go-ethereum/log" ++) ++ ++// Don't queue up an endless number of events. ++// At some point it's better to drop events and warn something is exploding the number of events. ++const sanityEventLimit = 1000 ++ ++// SynchronousEvents is a rollup.EventEmitter that a rollup.Deriver can emit events to. ++// The events will be queued up, and can then be executed synchronously by calling the Drain function, ++// which will apply all events to the root Deriver. ++// New events may be queued up while events are being processed by the root rollup.Deriver. ++type SynchronousEvents struct { ++ // The lock is no-op in FP execution, if running in synchronous FP-VM. ++ // This lock ensures that all emitted events are merged together correctly, ++ // if this util is used in a concurrent context. ++ evLock sync.Mutex ++ ++ events []Event ++ ++ log log.Logger ++ ++ ctx context.Context ++ ++ root Deriver ++} ++ ++func NewSynchronousEvents(log log.Logger, ctx context.Context, root Deriver) *SynchronousEvents { ++ return &SynchronousEvents{ ++ log: log, ++ ctx: ctx, ++ root: root, ++ } ++} ++ ++func (s *SynchronousEvents) Emit(event Event) { ++ s.evLock.Lock() ++ defer s.evLock.Unlock() ++ ++ if s.ctx.Err() != nil { ++ s.log.Warn("Ignoring emitted event during shutdown", "event", event) ++ return ++ } ++ ++ // sanity limit, never queue too many events ++ if len(s.events) >= sanityEventLimit { ++ s.log.Error("Something is very wrong, queued up too many events! Dropping event", "ev", event) ++ return ++ } ++ s.events = append(s.events, event) ++} ++ ++func (s *SynchronousEvents) Drain() error { ++ for { ++ if s.ctx.Err() != nil { ++ return s.ctx.Err() ++ } ++ if len(s.events) == 0 { ++ return nil ++ } ++ ++ s.evLock.Lock() ++ first := s.events[0] ++ s.events = s.events[1:] ++ s.evLock.Unlock() ++ ++ s.root.OnEvent(first) ++ } ++} ++ ++func (s *SynchronousEvents) DrainUntil(fn func(ev Event) bool, excl bool) error { ++ for { ++ if s.ctx.Err() != nil { ++ return s.ctx.Err() ++ } ++ if len(s.events) == 0 { ++ return io.EOF ++ } ++ ++ s.evLock.Lock() ++ first := s.events[0] ++ stop := fn(first) ++ if excl && stop { ++ s.evLock.Unlock() ++ return nil ++ } ++ s.events = s.events[1:] ++ s.evLock.Unlock() ++ ++ s.root.OnEvent(first) ++ if stop { ++ return nil ++ } ++ } ++} ++ ++var _ EventEmitter = (*SynchronousEvents)(nil)
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+88
+
-0
+ +
+ +
+
+
diff --git OP/op-node/rollup/synchronous_test.go CELO/op-node/rollup/synchronous_test.go +new file mode 100644 +index 0000000000000000000000000000000000000000..191b4f4246a2097d47bc5fb838ae1cd8237cf98d +--- /dev/null ++++ CELO/op-node/rollup/synchronous_test.go +@@ -0,0 +1,88 @@ ++package rollup ++ ++import ( ++ "context" ++ "testing" ++ ++ "github.com/stretchr/testify/require" ++ ++ "github.com/ethereum/go-ethereum/log" ++ ++ "github.com/ethereum-optimism/optimism/op-service/testlog" ++) ++ ++func TestSynchronousEvents(t *testing.T) { ++ logger := testlog.Logger(t, log.LevelError) ++ ctx, cancel := context.WithCancel(context.Background()) ++ count := 0 ++ deriver := DeriverFunc(func(ev Event) { ++ count += 1 ++ }) ++ syncEv := NewSynchronousEvents(logger, ctx, deriver) ++ require.NoError(t, syncEv.Drain(), "can drain, even if empty") ++ ++ syncEv.Emit(TestEvent{}) ++ require.Equal(t, 0, count, "no processing yet, queued event") ++ require.NoError(t, syncEv.Drain()) ++ require.Equal(t, 1, count, "processed event") ++ ++ syncEv.Emit(TestEvent{}) ++ syncEv.Emit(TestEvent{}) ++ require.Equal(t, 1, count, "no processing yet, queued events") ++ require.NoError(t, syncEv.Drain()) ++ require.Equal(t, 3, count, "processed events") ++ ++ cancel() ++ syncEv.Emit(TestEvent{}) ++ require.Equal(t, ctx.Err(), syncEv.Drain(), "no draining after close") ++ require.Equal(t, 3, count, "didn't process event after trigger close") ++} ++ ++func TestSynchronousEventsSanityLimit(t *testing.T) { ++ logger := testlog.Logger(t, log.LevelError) ++ count := 0 ++ deriver := DeriverFunc(func(ev Event) { ++ count += 1 ++ }) ++ syncEv := NewSynchronousEvents(logger, context.Background(), deriver) ++ // emit 1 too many events ++ for i := 0; i < sanityEventLimit+1; i++ { ++ syncEv.Emit(TestEvent{}) ++ } ++ require.NoError(t, syncEv.Drain()) ++ require.Equal(t, sanityEventLimit, count, "processed all non-dropped events") ++ ++ syncEv.Emit(TestEvent{}) ++ require.NoError(t, syncEv.Drain()) ++ require.Equal(t, sanityEventLimit+1, count, "back to normal after drain") ++} ++ ++type CyclicEvent struct { ++ Count int ++} ++ ++func (ev CyclicEvent) String() string { ++ return "cyclic-event" ++} ++ ++func TestSynchronousCyclic(t *testing.T) { ++ logger := testlog.Logger(t, log.LevelError) ++ var emitter EventEmitter ++ result := false ++ deriver := DeriverFunc(func(ev Event) { ++ logger.Info("received event", "event", ev) ++ switch x := ev.(type) { ++ case CyclicEvent: ++ if x.Count < 10 { ++ emitter.Emit(CyclicEvent{Count: x.Count + 1}) ++ } else { ++ result = true ++ } ++ } ++ }) ++ syncEv := NewSynchronousEvents(logger, context.Background(), deriver) ++ emitter = syncEv ++ syncEv.Emit(CyclicEvent{Count: 0}) ++ require.NoError(t, syncEv.Drain()) ++ require.True(t, result, "expecting event processing to fully recurse") ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+10
+
-8
+ +
+ +
+
+
diff --git OP/op-node/rollup/types.go CELO/op-node/rollup/types.go +index 816181687567a074ad8aeb4666fa6e4c1b82d372..aa9dc1a3220f1f030753e6a5394effdebb21ac9c 100644 +--- OP/op-node/rollup/types.go ++++ CELO/op-node/rollup/types.go +@@ -54,10 +54,10 @@ // L1 DataAvailabilityChallenge contract proxy address + DAChallengeAddress common.Address `json:"da_challenge_contract_address,omitempty"` + // CommitmentType specifies which commitment type can be used. Defaults to Keccak (type 0) if not present + CommitmentType string `json:"da_commitment_type"` +- // DA challenge window value set on the DAC contract. Used in plasma mode ++ // DA challenge window value set on the DAC contract. Used in alt-da mode + // to compute when a commitment can no longer be challenged. + DAChallengeWindow uint64 `json:"da_challenge_window"` +- // DA resolve window value set on the DAC contract. Used in plasma mode ++ // DA resolve window value set on the DAC contract. Used in alt-da mode + // to compute when a challenge expires and trigger a reorg if needed. + DAResolveWindow uint64 `json:"da_resolve_window"` + } +@@ -92,6 +92,7 @@ // a pre-mainnet Bedrock change that addresses findings of the Sherlock contest related to deposit attributes. + // "Regolith" is the loose deposited rock that sits on top of Bedrock. + // Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise. + RegolithTime *uint64 `json:"regolith_time,omitempty"` ++ Cel2Time *uint64 `json:"cel2_time,omitempty"` +  + // CanyonTime sets the activation time of the Canyon network upgrade. + // Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise. +@@ -132,15 +133,15 @@ + // L1 DataAvailabilityChallenge contract proxy address + LegacyDAChallengeAddress common.Address `json:"da_challenge_contract_address,omitempty"` +  +- // DA challenge window value set on the DAC contract. Used in plasma mode ++ // DA challenge window value set on the DAC contract. Used in alt-da mode + // to compute when a commitment can no longer be challenged. + LegacyDAChallengeWindow uint64 `json:"da_challenge_window,omitempty"` +  +- // DA resolve window value set on the DAC contract. Used in plasma mode ++ // DA resolve window value set on the DAC contract. Used in alt-da mode + // to compute when a challenge expires and trigger a reorg if needed. + LegacyDAResolveWindow uint64 `json:"da_resolve_window,omitempty"` +  +- // LegacyUsePlasma is activated when the chain is in plasma mode. ++ // LegacyUsePlasma is activated when the chain is in alt-da mode. + LegacyUsePlasma bool `json:"use_plasma,omitempty"` + } +  +@@ -326,7 +327,7 @@ + return nil + } +  +-// validatePlasmaConfig checks the two approaches to configuring plasma mode. ++// validatePlasmaConfig checks the two approaches to configuring alt-da mode. + // If the legacy values are set, they are copied to the new location. If both are set, they are check for consistency. + func validatePlasmaConfig(cfg *Config) error { + if cfg.LegacyUsePlasma && cfg.PlasmaConfig == nil { +@@ -522,7 +523,7 @@ return c.PlasmaConfig != nil + } +  + // SyncLookback computes the number of blocks to walk back in order to find the correct L1 origin. +-// In plasma mode longest possible window is challenge + resolve windows. ++// In alt-da mode longest possible window is challenge + resolve windows. + func (c *Config) SyncLookback() uint64 { + if c.PlasmaEnabled() { + if win := (c.PlasmaConfig.DAChallengeWindow + c.PlasmaConfig.DAResolveWindow); win > c.SeqWindowSize { +@@ -567,7 +568,7 @@ banner += fmt.Sprintf(" - Interop: %s\n", fmtForkTimeOrUnset(c.InteropTime)) + // Report the protocol version + banner += fmt.Sprintf("Node supports up to OP-Stack Protocol Version: %s\n", OPStackSupport) + if c.PlasmaConfig != nil { +- banner += fmt.Sprintf("Node supports Plasma Mode with CommitmentType %v\n", c.PlasmaConfig.CommitmentType) ++ banner += fmt.Sprintf("Node supports Alt-DA Mode with CommitmentType %v\n", c.PlasmaConfig.CommitmentType) + } + return banner + } +@@ -599,6 +600,7 @@ "ecotone_time", fmtForkTimeOrUnset(c.EcotoneTime), + "fjord_time", fmtForkTimeOrUnset(c.FjordTime), + "interop_time", fmtForkTimeOrUnset(c.InteropTime), + "plasma_mode", c.PlasmaConfig != nil, ++ "cel2_time", fmtForkTimeOrUnset(c.Cel2Time), + ) + } +
+
+ + + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/ops-bedrock/Dockerfile.l2 CELO/ops-bedrock/Dockerfile.l2 +index 976ab27498598d48c61548f63c70958a1d8f3cac..34e545be69a7c17312e723c1b735d01f0580e9a6 100644 +--- OP/ops-bedrock/Dockerfile.l2 ++++ CELO/ops-bedrock/Dockerfile.l2 +@@ -1,4 +1,4 @@ +-FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:optimism ++FROM --platform=linux/amd64 us-west1-docker.pkg.dev/blockchaintestsglobaltestnet/dev-images/op-geth@sha256:fab76a990c21271419a40dfe5d28e30905869183b18ee9e6f711fe562365bc8e +  + RUN apk add --no-cache jq +
+
+ + + +
+
+ +
+
+
+ + +
+ +
+
+
+ +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-3
+ +
+ +
+
+
diff --git OP/proxyd/.gitignore CELO/proxyd/.gitignore +deleted file mode 100644 +index 65e6a826f682d6b1f4e8b188cdcecfe7e463ada3..0000000000000000000000000000000000000000 +--- OP/proxyd/.gitignore ++++ /dev/null +@@ -1,3 +0,0 @@ +-bin +- +-config.toml
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-252
+ +
+ +
+
+
diff --git OP/proxyd/CHANGELOG.md CELO/proxyd/CHANGELOG.md +deleted file mode 100644 +index dd78bfe3c8038e86dd00d6992af6f2a2601ffa4b..0000000000000000000000000000000000000000 +--- OP/proxyd/CHANGELOG.md ++++ /dev/null +@@ -1,252 +0,0 @@ +-# @eth-optimism/proxyd +- +-## 3.14.1 +- +-### Patch Changes +- +-- 5602deec7: chore(deps): bump github.com/prometheus/client_golang from 1.11.0 to 1.11.1 in /proxyd +-- 6b3cf2070: Remove useless logging +- +-## 3.14.0 +- +-### Minor Changes +- +-- 9cc39bcfa: Add support for global method override rate limit +-- 30db32862: Include nonce in sender rate limit +- +-### Patch Changes +- +-- b9bb1a98a: proxyd: Add req_id to log +- +-## 3.13.0 +- +-### Minor Changes +- +-- 6de891d3b: Add sender-based rate limiter +- +-## 3.12.0 +- +-### Minor Changes +- +-- e9f2c701: Allow disabling backend rate limiter +-- ca45a85e: Support pattern matching in exempt origins/user agents +-- f4faa44c: adds server.log_level config +- +-## 3.11.0 +- +-### Minor Changes +- +-- b3c5eeec: Fixed JSON-RPC 2.0 specification compliance by adding the optional data field on an RPCError +-- 01ae6625: Adds new Redis rate limiter +- +-## 3.10.2 +- +-### Patch Changes +- +-- 6bb35fd8: Add customizable whitelist error +-- 7121648c: Batch metrics and max batch size +- +-## 3.10.1 +- +-### Patch Changes +- +-- b82a8f48: Add logging for origin and remote IP' +-- 1bf9559c: Carry over custom limit message in batches +- +-## 3.10.0 +- +-### Minor Changes +- +-- 157ccc84: Support per-method rate limiting +- +-## 3.9.1 +- +-### Patch Changes +- +-- dc4f6a06: Add logging/metrics +- +-## 3.9.0 +- +-### Minor Changes +- +-- b6f4bfcf: Add frontend rate limiting +- +-### Patch Changes +- +-- 406a4fce: Unwrap single RPC batches +-- 915f3b28: Parameterize full RPC request logging +- +-## 3.8.9 +- +-### Patch Changes +- +-- 063c55cf: Use canned response for eth_accounts +- +-## 3.8.8 +- +-### Patch Changes +- +-- 58dc7adc: Improve robustness against unexpected JSON-RPC from upstream +-- 552cd641: Fix concurrent write panic in WS +- +-## 3.8.7 +- +-### Patch Changes +- +-- 6f458607: Bump go-ethereum to 1.10.17 +- +-## 3.8.6 +- +-### Patch Changes +- +-- d79d40c4: proxyd: Proxy requests using batch JSON-RPC +- +-## 3.8.5 +- +-### Patch Changes +- +-- 2a062b11: proxyd: Log ssanitized RPC requests +-- d9f058ce: proxyd: Reduced RPC request logging +-- a4bfd9e7: proxyd: Limit the number of concurrent RPCs to backends +- +-## 3.8.4 +- +-### Patch Changes +- +-- 08329ba2: proxyd: Record redis cache operation latency +-- ae112021: proxyd: Request-scoped context for fast batch RPC short-circuiting +- +-## 3.8.3 +- +-### Patch Changes +- +-- 160f4c3d: Update docker image to use golang 1.18.0 +- +-## 3.8.2 +- +-### Patch Changes +- +-- ae18cea1: Don't hit Redis when the out of service interval is zero +- +-## 3.8.1 +- +-### Patch Changes +- +-- acf7dbd5: Update to go-ethereum v1.10.16 +- +-## 3.8.0 +- +-### Minor Changes +- +-- 527448bb: Handle nil responses better +- +-## 3.7.0 +- +-### Minor Changes +- +-- 3c2926b1: Add debug cache status header to proxyd responses +- +-## 3.6.0 +- +-### Minor Changes +- +-- 096c5f20: proxyd: Allow cached RPCs to be evicted by redis +-- 71d64834: Add caching for block-dependent RPCs +-- fd2e1523: proxyd: Cache block-dependent RPCs +-- 1760613c: Add integration tests and batching +- +-## 3.5.0 +- +-### Minor Changes +- +-- 025a3c0d: Add request/response payload size metrics to proxyd +-- daf8db0b: cache immutable RPC responses in proxyd +-- 8aa89bf3: Add X-Forwarded-For header when proxying RPCs on proxyd +- +-## 3.4.1 +- +-### Patch Changes +- +-- 415164e1: Force proxyd build +- +-## 3.4.0 +- +-### Minor Changes +- +-- 4b56ed84: Various proxyd fixes +- +-## 3.3.0 +- +-### Minor Changes +- +-- 7b7ffd2e: Allows string RPC ids on proxyd +- +-## 3.2.0 +- +-### Minor Changes +- +-- 73484138: Adds ability to specify env vars in config +- +-## 3.1.2 +- +-### Patch Changes +- +-- 1b79aa62: Release proxyd +- +-## 3.1.1 +- +-### Patch Changes +- +-- b8802054: Trigger release of proxyd +-- 34fcb277: Bump proxyd to test release build workflow +- +-## 3.1.0 +- +-### Minor Changes +- +-- da6138fd: Updated metrics, support local rate limiter +- +-### Patch Changes +- +-- 6c7f483b: Add support for additional SSL certificates in Docker container +- +-## 3.0.0 +- +-### Major Changes +- +-- abe231bf: Make endpoints match Geth, better logging +- +-## 2.0.0 +- +-### Major Changes +- +-- 6c50098b: Update metrics, support WS +-- f827dbda: Brings back the ability to selectively route RPC methods to backend groups +- +-### Minor Changes +- +-- 8cc824e5: Updates proxyd to include additional error metrics. +-- 9ba4c5e0: Update metrics, support authenticated endpoints +-- 78d0f3f0: Put special errors in a dedicated metric, pass along the content-type header +- +-### Patch Changes +- +-- 6e6a55b1: Canary release +- +-## 1.0.2 +- +-### Patch Changes +- +-- b9d2fbee: Trigger releases +- +-## 1.0.1 +- +-### Patch Changes +- +-- 893623c9: Trigger patch releases for dockerhub +- +-## 1.0.0 +- +-### Major Changes +- +-- 28aabc41: Initial release of RPC proxy daemon
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-32
+ +
+ +
+
+
diff --git OP/proxyd/Dockerfile CELO/proxyd/Dockerfile +deleted file mode 100644 +index b066e0ecafe7e5233477abc065c27e7143ea45bf..0000000000000000000000000000000000000000 +--- OP/proxyd/Dockerfile ++++ /dev/null +@@ -1,32 +0,0 @@ +-FROM golang:1.21.3-alpine3.18 as builder +- +-ARG GITCOMMIT=docker +-ARG GITDATE=docker +-ARG GITVERSION=docker +- +-RUN apk add make jq git gcc musl-dev linux-headers +- +-COPY ./proxyd /app +- +-WORKDIR /app +- +-RUN make proxyd +- +-FROM alpine:3.18 +- +-RUN apk add bind-tools jq curl bash git redis +- +-COPY ./proxyd/entrypoint.sh /bin/entrypoint.sh +- +-RUN apk update && \ +- apk add ca-certificates && \ +- chmod +x /bin/entrypoint.sh +- +-EXPOSE 8080 +- +-VOLUME /etc/proxyd +- +-COPY --from=builder /app/bin/proxyd /bin/proxyd +- +-ENTRYPOINT ["/bin/entrypoint.sh"] +-CMD ["/bin/proxyd", "/etc/proxyd/proxyd.toml"]
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-3
+ +
+ +
+
+
diff --git OP/proxyd/Dockerfile.ignore CELO/proxyd/Dockerfile.ignore +deleted file mode 100644 +index eac1d0bc0b269fe849e171627d1534d6ae22a568..0000000000000000000000000000000000000000 +--- OP/proxyd/Dockerfile.ignore ++++ /dev/null +@@ -1,3 +0,0 @@ +-# ignore everything but proxyd, proxyd defines all its dependencies in the go.mod +-* +-!/proxyd
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-25
+ +
+ +
+
+
diff --git OP/proxyd/Makefile CELO/proxyd/Makefile +deleted file mode 100644 +index d9ffb5742cd652d0082eecc24ad9eaeb2bb56095..0000000000000000000000000000000000000000 +--- OP/proxyd/Makefile ++++ /dev/null +@@ -1,25 +0,0 @@ +-LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) +-LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) +-LDFLAGSSTRING +=-X main.GitVersion=$(GITVERSION) +-LDFLAGS := -ldflags "$(LDFLAGSSTRING)" +- +-proxyd: +- go build -v $(LDFLAGS) -o ./bin/proxyd ./cmd/proxyd +-.PHONY: proxyd +- +-fmt: +- go mod tidy +- gofmt -w . +-.PHONY: fmt +- +-test: +- go test -v ./... +-.PHONY: test +- +-lint: +- go vet ./... +-.PHONY: test +- +-test-fallback: +- go test -v ./... -test.run ^TestFallback$ +-.PHONY: test-fallback
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-146
+ +
+ +
+
+
diff --git OP/proxyd/README.md CELO/proxyd/README.md +index 4a3a84bafc493c61834ae47ac64759fcae800372..f44b815ab2dce959353a80c45e7e919875fad1a4 100644 +--- OP/proxyd/README.md ++++ CELO/proxyd/README.md +@@ -1,146 +1,2 @@ +-# rpc-proxy +- +-This tool implements `proxyd`, an RPC request router and proxy. It does the following things: +- +-1. Whitelists RPC methods. +-2. Routes RPC methods to groups of backend services. +-3. Automatically retries failed backend requests. +-4. Track backend consensus (`latest`, `safe`, `finalized` blocks), peer count and sync state. +-5. Re-write requests and responses to enforce consensus. +-6. Load balance requests across backend services. +-7. Cache immutable responses from backends. +-8. Provides metrics to measure request latency, error rates, and the like. +- +- +-## Usage +- +-Run `make proxyd` to build the binary. No additional dependencies are necessary. +- +-To configure `proxyd` for use, you'll need to create a configuration file to define your proxy backends and routing rules. Check out [example.config.toml](./example.config.toml) for how to do this alongside a full list of all options with commentary. +- +-Once you have a config file, start the daemon via `proxyd <path-to-config>.toml`. +- +- +-## Consensus awareness +- +-Starting on v4.0.0, `proxyd` is aware of the consensus state of its backends. This helps minimize chain reorgs experienced by clients. +- +-To enable this behavior, you must set `consensus_aware` value to `true` in the backend group. +- +-When consensus awareness is enabled, `proxyd` will poll the backends for their states and resolve a consensus group based on: +-* the common ancestor `latest` block, i.e. if a backend is experiencing a fork, the fork won't be visible to the clients +-* the lowest `safe` block +-* the lowest `finalized` block +-* peer count +-* sync state +- +-The backend group then acts as a round-robin load balancer distributing traffic equally across healthy backends in the consensus group, increasing the availability of the proxy. +- +-A backend is considered healthy if it meets the following criteria: +-* not banned +-* avg 1-min moving window error rate ≤ configurable threshold +-* avg 1-min moving window latency ≤ configurable threshold +-* peer count ≥ configurable threshold +-* `latest` block lag ≤ configurable threshold +-* last state update ≤ configurable threshold +-* not currently syncing +- +-When a backend is experiencing inconsistent consensus, high error rates or high latency, +-the backend will be banned for a configurable amount of time (default 5 minutes) +-and won't receive any traffic during this period. +- +- +-## Tag rewrite +- +-When consensus awareness is enabled, `proxyd` will enforce the consensus state transparently for all the clients. +- +-For example, if a client requests the `eth_getBlockByNumber` method with the `latest` tag, +-`proxyd` will rewrite the request to use the resolved latest block from the consensus group +-and forward it to the backend. +- +-The following request methods are rewritten: +-* `eth_getLogs` +-* `eth_newFilter` +-* `eth_getBalance` +-* `eth_getCode` +-* `eth_getTransactionCount` +-* `eth_call` +-* `eth_getStorageAt` +-* `eth_getBlockTransactionCountByNumber` +-* `eth_getUncleCountByBlockNumber` +-* `eth_getBlockByNumber` +-* `eth_getTransactionByBlockNumberAndIndex` +-* `eth_getUncleByBlockNumberAndIndex` +-* `debug_getRawReceipts` +- +-And `eth_blockNumber` response is overridden with current block consensus. +- +- +-## Cacheable methods +- +-Cache use Redis and can be enabled for the following immutable methods: +- +-* `eth_chainId` +-* `net_version` +-* `eth_getBlockTransactionCountByHash` +-* `eth_getUncleCountByBlockHash` +-* `eth_getBlockByHash` +-* `eth_getTransactionByBlockHashAndIndex` +-* `eth_getUncleByBlockHashAndIndex` +-* `debug_getRawReceipts` (block hash only) +- +-## Meta method `consensus_getReceipts` +- +-To support backends with different specifications in the same backend group, +-proxyd exposes a convenient method to fetch receipts abstracting away +-what specific backend will serve the request. +- +-Each backend specifies their preferred method to fetch receipts with `consensus_receipts_target` config, +-which will be translated from `consensus_getReceipts`. +- +-This method takes a `blockNumberOrHash` (i.e. `tag|qty|hash`) +-and returns the receipts for all transactions in the block. +- +-Request example +-```json +-{ +- "jsonrpc":"2.0", +- "id": 1, +- "params": ["0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"] +-} +-``` +- +-It currently supports translation to the following targets: +-* `debug_getRawReceipts(blockOrHash)` (default) +-* `alchemy_getTransactionReceipts(blockOrHash)` +-* `parity_getBlockReceipts(blockOrHash)` +-* `eth_getBlockReceipts(blockOrHash)` +- +-The selected target is returned in the response, in a wrapped result. +- +-Response example +-```json +-{ +- "jsonrpc": "2.0", +- "id": 1, +- "result": { +- "method": "debug_getRawReceipts", +- "result": { +- // the actual raw result from backend +- } +- } +-} +-``` +- +-See [op-node receipt fetcher](https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253). +- +- +-## Metrics +- +-See `metrics.go` for a list of all available metrics. +- +-The metrics port is configurable via the `metrics.port` and `metrics.host` keys in the config. +- +-## Adding Backend SSL Certificates in Docker +- +-The Docker image runs on Alpine Linux. If you get SSL errors when connecting to a backend within Docker, you may need to add additional certificates to Alpine's certificate store. To do this, bind mount the certificate bundle into a file in `/usr/local/share/ca-certificates`. The `entrypoint.sh` script will then update the store with whatever is in the `ca-certificates` directory prior to starting `proxyd`. ++# ⚠️ Important ++This project has been moved to [ethereum-optimism/infra](https://github.com/ethereum-optimism/infra)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-1272
+ +
+ +
+
+
diff --git OP/proxyd/backend.go CELO/proxyd/backend.go +deleted file mode 100644 +index 802b94ab4da7510dea3d43705995f17329fb43df..0000000000000000000000000000000000000000 +--- OP/proxyd/backend.go ++++ /dev/null +@@ -1,1272 +0,0 @@ +-package proxyd +- +-import ( +- "bytes" +- "context" +- "crypto/tls" +- "encoding/json" +- "errors" +- "fmt" +- "io" +- "math" +- "math/rand" +- "net/http" +- "sort" +- "strconv" +- "strings" +- "sync" +- "time" +- +- sw "github.com/ethereum-optimism/optimism/proxyd/pkg/avg-sliding-window" +- "github.com/ethereum/go-ethereum/common" +- "github.com/ethereum/go-ethereum/log" +- "github.com/ethereum/go-ethereum/rpc" +- "github.com/gorilla/websocket" +- "github.com/prometheus/client_golang/prometheus" +- "github.com/xaionaro-go/weightedshuffle" +- "golang.org/x/sync/semaphore" +-) +- +-const ( +- JSONRPCVersion = "2.0" +- JSONRPCErrorInternal = -32000 +- notFoundRpcError = -32601 +-) +- +-var ( +- ErrParseErr = &RPCErr{ +- Code: -32700, +- Message: "parse error", +- HTTPErrorCode: 400, +- } +- ErrInternal = &RPCErr{ +- Code: JSONRPCErrorInternal, +- Message: "internal error", +- HTTPErrorCode: 500, +- } +- ErrMethodNotWhitelisted = &RPCErr{ +- Code: notFoundRpcError, +- Message: "rpc method is not whitelisted", +- HTTPErrorCode: 403, +- } +- ErrBackendOffline = &RPCErr{ +- Code: JSONRPCErrorInternal - 10, +- Message: "backend offline", +- HTTPErrorCode: 503, +- } +- ErrNoBackends = &RPCErr{ +- Code: JSONRPCErrorInternal - 11, +- Message: "no backends available for method", +- HTTPErrorCode: 503, +- } +- ErrBackendOverCapacity = &RPCErr{ +- Code: JSONRPCErrorInternal - 12, +- Message: "backend is over capacity", +- HTTPErrorCode: 429, +- } +- ErrBackendBadResponse = &RPCErr{ +- Code: JSONRPCErrorInternal - 13, +- Message: "backend returned an invalid response", +- HTTPErrorCode: 500, +- } +- ErrTooManyBatchRequests = &RPCErr{ +- Code: JSONRPCErrorInternal - 14, +- Message: "too many RPC calls in batch request", +- } +- ErrGatewayTimeout = &RPCErr{ +- Code: JSONRPCErrorInternal - 15, +- Message: "gateway timeout", +- HTTPErrorCode: 504, +- } +- ErrOverRateLimit = &RPCErr{ +- Code: JSONRPCErrorInternal - 16, +- Message: "over rate limit", +- HTTPErrorCode: 429, +- } +- ErrOverSenderRateLimit = &RPCErr{ +- Code: JSONRPCErrorInternal - 17, +- Message: "sender is over rate limit", +- HTTPErrorCode: 429, +- } +- ErrNotHealthy = &RPCErr{ +- Code: JSONRPCErrorInternal - 18, +- Message: "backend is currently not healthy to serve traffic", +- HTTPErrorCode: 503, +- } +- ErrBlockOutOfRange = &RPCErr{ +- Code: JSONRPCErrorInternal - 19, +- Message: "block is out of range", +- HTTPErrorCode: 400, +- } +- +- ErrRequestBodyTooLarge = &RPCErr{ +- Code: JSONRPCErrorInternal - 21, +- Message: "request body too large", +- HTTPErrorCode: 413, +- } +- +- ErrBackendResponseTooLarge = &RPCErr{ +- Code: JSONRPCErrorInternal - 20, +- Message: "backend response too large", +- HTTPErrorCode: 500, +- } +- +- ErrBackendUnexpectedJSONRPC = errors.New("backend returned an unexpected JSON-RPC response") +- +- ErrConsensusGetReceiptsCantBeBatched = errors.New("consensus_getReceipts cannot be batched") +- ErrConsensusGetReceiptsInvalidTarget = errors.New("unsupported consensus_receipts_target") +-) +- +-func ErrInvalidRequest(msg string) *RPCErr { +- return &RPCErr{ +- Code: -32600, +- Message: msg, +- HTTPErrorCode: 400, +- } +-} +- +-func ErrInvalidParams(msg string) *RPCErr { +- return &RPCErr{ +- Code: -32602, +- Message: msg, +- HTTPErrorCode: 400, +- } +-} +- +-type Backend struct { +- Name string +- rpcURL string +- receiptsTarget string +- wsURL string +- authUsername string +- authPassword string +- headers map[string]string +- client *LimitedHTTPClient +- dialer *websocket.Dialer +- maxRetries int +- maxResponseSize int64 +- maxRPS int +- maxWSConns int +- outOfServiceInterval time.Duration +- stripTrailingXFF bool +- proxydIP string +- +- skipPeerCountCheck bool +- forcedCandidate bool +- +- maxDegradedLatencyThreshold time.Duration +- maxLatencyThreshold time.Duration +- maxErrorRateThreshold float64 +- +- latencySlidingWindow *sw.AvgSlidingWindow +- networkRequestsSlidingWindow *sw.AvgSlidingWindow +- networkErrorsSlidingWindow *sw.AvgSlidingWindow +- +- weight int +-} +- +-type BackendOpt func(b *Backend) +- +-func WithBasicAuth(username, password string) BackendOpt { +- return func(b *Backend) { +- b.authUsername = username +- b.authPassword = password +- } +-} +- +-func WithHeaders(headers map[string]string) BackendOpt { +- return func(b *Backend) { +- b.headers = headers +- } +-} +- +-func WithTimeout(timeout time.Duration) BackendOpt { +- return func(b *Backend) { +- b.client.Timeout = timeout +- } +-} +- +-func WithMaxRetries(retries int) BackendOpt { +- return func(b *Backend) { +- b.maxRetries = retries +- } +-} +- +-func WithMaxResponseSize(size int64) BackendOpt { +- return func(b *Backend) { +- b.maxResponseSize = size +- } +-} +- +-func WithOutOfServiceDuration(interval time.Duration) BackendOpt { +- return func(b *Backend) { +- b.outOfServiceInterval = interval +- } +-} +- +-func WithMaxRPS(maxRPS int) BackendOpt { +- return func(b *Backend) { +- b.maxRPS = maxRPS +- } +-} +- +-func WithMaxWSConns(maxConns int) BackendOpt { +- return func(b *Backend) { +- b.maxWSConns = maxConns +- } +-} +- +-func WithTLSConfig(tlsConfig *tls.Config) BackendOpt { +- return func(b *Backend) { +- if b.client.Transport == nil { +- b.client.Transport = &http.Transport{} +- } +- b.client.Transport.(*http.Transport).TLSClientConfig = tlsConfig +- } +-} +- +-func WithStrippedTrailingXFF() BackendOpt { +- return func(b *Backend) { +- b.stripTrailingXFF = true +- } +-} +- +-func WithProxydIP(ip string) BackendOpt { +- return func(b *Backend) { +- b.proxydIP = ip +- } +-} +- +-func WithConsensusSkipPeerCountCheck(skipPeerCountCheck bool) BackendOpt { +- return func(b *Backend) { +- b.skipPeerCountCheck = skipPeerCountCheck +- } +-} +- +-func WithConsensusForcedCandidate(forcedCandidate bool) BackendOpt { +- return func(b *Backend) { +- b.forcedCandidate = forcedCandidate +- } +-} +- +-func WithWeight(weight int) BackendOpt { +- return func(b *Backend) { +- b.weight = weight +- } +-} +- +-func WithMaxDegradedLatencyThreshold(maxDegradedLatencyThreshold time.Duration) BackendOpt { +- return func(b *Backend) { +- b.maxDegradedLatencyThreshold = maxDegradedLatencyThreshold +- } +-} +- +-func WithMaxLatencyThreshold(maxLatencyThreshold time.Duration) BackendOpt { +- return func(b *Backend) { +- b.maxLatencyThreshold = maxLatencyThreshold +- } +-} +- +-func WithMaxErrorRateThreshold(maxErrorRateThreshold float64) BackendOpt { +- return func(b *Backend) { +- b.maxErrorRateThreshold = maxErrorRateThreshold +- } +-} +- +-func WithConsensusReceiptTarget(receiptsTarget string) BackendOpt { +- return func(b *Backend) { +- b.receiptsTarget = receiptsTarget +- } +-} +- +-type indexedReqRes struct { +- index int +- req *RPCReq +- res *RPCRes +-} +- +-const proxydHealthzMethod = "proxyd_healthz" +- +-const ConsensusGetReceiptsMethod = "consensus_getReceipts" +- +-const ReceiptsTargetDebugGetRawReceipts = "debug_getRawReceipts" +-const ReceiptsTargetAlchemyGetTransactionReceipts = "alchemy_getTransactionReceipts" +-const ReceiptsTargetParityGetTransactionReceipts = "parity_getBlockReceipts" +-const ReceiptsTargetEthGetTransactionReceipts = "eth_getBlockReceipts" +- +-type ConsensusGetReceiptsResult struct { +- Method string `json:"method"` +- Result interface{} `json:"result"` +-} +- +-// BlockHashOrNumberParameter is a non-conventional wrapper used by alchemy_getTransactionReceipts +-type BlockHashOrNumberParameter struct { +- BlockHash *common.Hash `json:"blockHash"` +- BlockNumber *rpc.BlockNumber `json:"blockNumber"` +-} +- +-func NewBackend( +- name string, +- rpcURL string, +- wsURL string, +- rpcSemaphore *semaphore.Weighted, +- opts ...BackendOpt, +-) *Backend { +- backend := &Backend{ +- Name: name, +- rpcURL: rpcURL, +- wsURL: wsURL, +- maxResponseSize: math.MaxInt64, +- client: &LimitedHTTPClient{ +- Client: http.Client{Timeout: 5 * time.Second}, +- sem: rpcSemaphore, +- backendName: name, +- }, +- dialer: &websocket.Dialer{}, +- +- maxLatencyThreshold: 10 * time.Second, +- maxDegradedLatencyThreshold: 5 * time.Second, +- maxErrorRateThreshold: 0.5, +- +- latencySlidingWindow: sw.NewSlidingWindow(), +- networkRequestsSlidingWindow: sw.NewSlidingWindow(), +- networkErrorsSlidingWindow: sw.NewSlidingWindow(), +- } +- +- backend.Override(opts...) +- +- if !backend.stripTrailingXFF && backend.proxydIP == "" { +- log.Warn("proxied requests' XFF header will not contain the proxyd ip address") +- } +- +- return backend +-} +- +-func (b *Backend) Override(opts ...BackendOpt) { +- for _, opt := range opts { +- opt(b) +- } +-} +- +-func (b *Backend) Forward(ctx context.Context, reqs []*RPCReq, isBatch bool) ([]*RPCRes, error) { +- var lastError error +- // <= to account for the first attempt not technically being +- // a retry +- for i := 0; i <= b.maxRetries; i++ { +- RecordBatchRPCForward(ctx, b.Name, reqs, RPCRequestSourceHTTP) +- metricLabelMethod := reqs[0].Method +- if isBatch { +- metricLabelMethod = "<batch>" +- } +- timer := prometheus.NewTimer( +- rpcBackendRequestDurationSumm.WithLabelValues( +- b.Name, +- metricLabelMethod, +- strconv.FormatBool(isBatch), +- ), +- ) +- +- res, err := b.doForward(ctx, reqs, isBatch) +- switch err { +- case nil: // do nothing +- case ErrBackendResponseTooLarge: +- log.Warn( +- "backend response too large", +- "name", b.Name, +- "req_id", GetReqID(ctx), +- "max", b.maxResponseSize, +- ) +- RecordBatchRPCError(ctx, b.Name, reqs, err) +- case ErrConsensusGetReceiptsCantBeBatched: +- log.Warn( +- "Received unsupported batch request for consensus_getReceipts", +- "name", b.Name, +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- case ErrConsensusGetReceiptsInvalidTarget: +- log.Error( +- "Unsupported consensus_receipts_target for consensus_getReceipts", +- "name", b.Name, +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- // ErrBackendUnexpectedJSONRPC occurs because infura responds with a single JSON-RPC object +- // to a batch request whenever any Request Object in the batch would induce a partial error. +- // We don't label the backend offline in this case. But the error is still returned to +- // callers so failover can occur if needed. +- case ErrBackendUnexpectedJSONRPC: +- log.Debug( +- "Received unexpected JSON-RPC response", +- "name", b.Name, +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- default: +- lastError = err +- log.Warn( +- "backend request failed, trying again", +- "name", b.Name, +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- timer.ObserveDuration() +- RecordBatchRPCError(ctx, b.Name, reqs, err) +- sleepContext(ctx, calcBackoff(i)) +- continue +- } +- timer.ObserveDuration() +- +- MaybeRecordErrorsInRPCRes(ctx, b.Name, reqs, res) +- return res, err +- } +- +- return nil, wrapErr(lastError, "permanent error forwarding request") +-} +- +-func (b *Backend) ProxyWS(clientConn *websocket.Conn, methodWhitelist *StringSet) (*WSProxier, error) { +- backendConn, _, err := b.dialer.Dial(b.wsURL, nil) // nolint:bodyclose +- if err != nil { +- return nil, wrapErr(err, "error dialing backend") +- } +- +- activeBackendWsConnsGauge.WithLabelValues(b.Name).Inc() +- return NewWSProxier(b, clientConn, backendConn, methodWhitelist), nil +-} +- +-// ForwardRPC makes a call directly to a backend and populate the response into `res` +-func (b *Backend) ForwardRPC(ctx context.Context, res *RPCRes, id string, method string, params ...any) error { +- jsonParams, err := json.Marshal(params) +- if err != nil { +- return err +- } +- +- rpcReq := RPCReq{ +- JSONRPC: JSONRPCVersion, +- Method: method, +- Params: jsonParams, +- ID: []byte(id), +- } +- +- slicedRes, err := b.doForward(ctx, []*RPCReq{&rpcReq}, false) +- if err != nil { +- return err +- } +- +- if len(slicedRes) != 1 { +- return fmt.Errorf("unexpected response len for non-batched request (len != 1)") +- } +- if slicedRes[0].IsError() { +- return fmt.Errorf(slicedRes[0].Error.Error()) +- } +- +- *res = *(slicedRes[0]) +- return nil +-} +- +-func (b *Backend) doForward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, error) { +- // we are concerned about network error rates, so we record 1 request independently of how many are in the batch +- b.networkRequestsSlidingWindow.Incr() +- +- translatedReqs := make(map[string]*RPCReq, len(rpcReqs)) +- // translate consensus_getReceipts to receipts target +- // right now we only support non-batched +- if isBatch { +- for _, rpcReq := range rpcReqs { +- if rpcReq.Method == ConsensusGetReceiptsMethod { +- return nil, ErrConsensusGetReceiptsCantBeBatched +- } +- } +- } else { +- for _, rpcReq := range rpcReqs { +- if rpcReq.Method == ConsensusGetReceiptsMethod { +- translatedReqs[string(rpcReq.ID)] = rpcReq +- rpcReq.Method = b.receiptsTarget +- var reqParams []rpc.BlockNumberOrHash +- err := json.Unmarshal(rpcReq.Params, &reqParams) +- if err != nil { +- return nil, ErrInvalidRequest("invalid request") +- } +- +- var translatedParams []byte +- switch rpcReq.Method { +- case ReceiptsTargetDebugGetRawReceipts, +- ReceiptsTargetEthGetTransactionReceipts, +- ReceiptsTargetParityGetTransactionReceipts: +- // conventional methods use an array of strings having either block number or block hash +- // i.e. ["0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"] +- params := make([]string, 1) +- if reqParams[0].BlockNumber != nil { +- params[0] = reqParams[0].BlockNumber.String() +- } else { +- params[0] = reqParams[0].BlockHash.Hex() +- } +- translatedParams = mustMarshalJSON(params) +- case ReceiptsTargetAlchemyGetTransactionReceipts: +- // alchemy uses an array of object with either block number or block hash +- // i.e. [{ blockHash: "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b" }] +- params := make([]BlockHashOrNumberParameter, 1) +- if reqParams[0].BlockNumber != nil { +- params[0].BlockNumber = reqParams[0].BlockNumber +- } else { +- params[0].BlockHash = reqParams[0].BlockHash +- } +- translatedParams = mustMarshalJSON(params) +- default: +- return nil, ErrConsensusGetReceiptsInvalidTarget +- } +- +- rpcReq.Params = translatedParams +- } +- } +- } +- +- isSingleElementBatch := len(rpcReqs) == 1 +- +- // Single element batches are unwrapped before being sent +- // since Alchemy handles single requests better than batches. +- var body []byte +- if isSingleElementBatch { +- body = mustMarshalJSON(rpcReqs[0]) +- } else { +- body = mustMarshalJSON(rpcReqs) +- } +- +- httpReq, err := http.NewRequestWithContext(ctx, "POST", b.rpcURL, bytes.NewReader(body)) +- if err != nil { +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, wrapErr(err, "error creating backend request") +- } +- +- if b.authPassword != "" { +- httpReq.SetBasicAuth(b.authUsername, b.authPassword) +- } +- +- xForwardedFor := GetXForwardedFor(ctx) +- if b.stripTrailingXFF { +- xForwardedFor = stripXFF(xForwardedFor) +- } else if b.proxydIP != "" { +- xForwardedFor = fmt.Sprintf("%s, %s", xForwardedFor, b.proxydIP) +- } +- +- httpReq.Header.Set("content-type", "application/json") +- httpReq.Header.Set("X-Forwarded-For", xForwardedFor) +- +- for name, value := range b.headers { +- httpReq.Header.Set(name, value) +- } +- +- start := time.Now() +- httpRes, err := b.client.DoLimited(httpReq) +- if err != nil { +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, wrapErr(err, "error in backend request") +- } +- +- metricLabelMethod := rpcReqs[0].Method +- if isBatch { +- metricLabelMethod = "<batch>" +- } +- rpcBackendHTTPResponseCodesTotal.WithLabelValues( +- GetAuthCtx(ctx), +- b.Name, +- metricLabelMethod, +- strconv.Itoa(httpRes.StatusCode), +- strconv.FormatBool(isBatch), +- ).Inc() +- +- // Alchemy returns a 400 on bad JSONs, so handle that case +- if httpRes.StatusCode != 200 && httpRes.StatusCode != 400 { +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, fmt.Errorf("response code %d", httpRes.StatusCode) +- } +- +- defer httpRes.Body.Close() +- resB, err := io.ReadAll(LimitReader(httpRes.Body, b.maxResponseSize)) +- if errors.Is(err, ErrLimitReaderOverLimit) { +- return nil, ErrBackendResponseTooLarge +- } +- if err != nil { +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, wrapErr(err, "error reading response body") +- } +- +- var rpcRes []*RPCRes +- if isSingleElementBatch { +- var singleRes RPCRes +- if err := json.Unmarshal(resB, &singleRes); err != nil { +- return nil, ErrBackendBadResponse +- } +- rpcRes = []*RPCRes{ +- &singleRes, +- } +- } else { +- if err := json.Unmarshal(resB, &rpcRes); err != nil { +- // Infura may return a single JSON-RPC response if, for example, the batch contains a request for an unsupported method +- if responseIsNotBatched(resB) { +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, ErrBackendUnexpectedJSONRPC +- } +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, ErrBackendBadResponse +- } +- } +- +- if len(rpcReqs) != len(rpcRes) { +- b.networkErrorsSlidingWindow.Incr() +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- return nil, ErrBackendUnexpectedJSONRPC +- } +- +- // capture the HTTP status code in the response. this will only +- // ever be 400 given the status check on line 318 above. +- if httpRes.StatusCode != 200 { +- for _, res := range rpcRes { +- res.Error.HTTPErrorCode = httpRes.StatusCode +- } +- } +- duration := time.Since(start) +- b.latencySlidingWindow.Add(float64(duration)) +- RecordBackendNetworkLatencyAverageSlidingWindow(b, time.Duration(b.latencySlidingWindow.Avg())) +- RecordBackendNetworkErrorRateSlidingWindow(b, b.ErrorRate()) +- +- // enrich the response with the actual request method +- for _, res := range rpcRes { +- translatedReq, exist := translatedReqs[string(res.ID)] +- if exist { +- res.Result = ConsensusGetReceiptsResult{ +- Method: translatedReq.Method, +- Result: res.Result, +- } +- } +- } +- +- sortBatchRPCResponse(rpcReqs, rpcRes) +- +- return rpcRes, nil +-} +- +-// IsHealthy checks if the backend is able to serve traffic, based on dynamic parameters +-func (b *Backend) IsHealthy() bool { +- errorRate := b.ErrorRate() +- avgLatency := time.Duration(b.latencySlidingWindow.Avg()) +- if errorRate >= b.maxErrorRateThreshold { +- return false +- } +- if avgLatency >= b.maxLatencyThreshold { +- return false +- } +- return true +-} +- +-// ErrorRate returns the instant error rate of the backend +-func (b *Backend) ErrorRate() (errorRate float64) { +- // we only really start counting the error rate after a minimum of 10 requests +- // this is to avoid false positives when the backend is just starting up +- if b.networkRequestsSlidingWindow.Sum() >= 10 { +- errorRate = b.networkErrorsSlidingWindow.Sum() / b.networkRequestsSlidingWindow.Sum() +- } +- return errorRate +-} +- +-// IsDegraded checks if the backend is serving traffic in a degraded state (i.e. used as a last resource) +-func (b *Backend) IsDegraded() bool { +- avgLatency := time.Duration(b.latencySlidingWindow.Avg()) +- return avgLatency >= b.maxDegradedLatencyThreshold +-} +- +-func responseIsNotBatched(b []byte) bool { +- var r RPCRes +- return json.Unmarshal(b, &r) == nil +-} +- +-// sortBatchRPCResponse sorts the RPCRes slice according to the position of its corresponding ID in the RPCReq slice +-func sortBatchRPCResponse(req []*RPCReq, res []*RPCRes) { +- pos := make(map[string]int, len(req)) +- for i, r := range req { +- key := string(r.ID) +- if _, ok := pos[key]; ok { +- panic("bug! detected requests with duplicate IDs") +- } +- pos[key] = i +- } +- +- sort.Slice(res, func(i, j int) bool { +- l := res[i].ID +- r := res[j].ID +- return pos[string(l)] < pos[string(r)] +- }) +-} +- +-type BackendGroup struct { +- Name string +- Backends []*Backend +- WeightedRouting bool +- Consensus *ConsensusPoller +- FallbackBackends map[string]bool +-} +- +-func (bg *BackendGroup) Fallbacks() []*Backend { +- fallbacks := []*Backend{} +- for _, a := range bg.Backends { +- if fallback, ok := bg.FallbackBackends[a.Name]; ok && fallback { +- fallbacks = append(fallbacks, a) +- } +- } +- return fallbacks +-} +- +-func (bg *BackendGroup) Primaries() []*Backend { +- primaries := []*Backend{} +- for _, a := range bg.Backends { +- fallback, ok := bg.FallbackBackends[a.Name] +- if ok && !fallback { +- primaries = append(primaries, a) +- } +- } +- return primaries +-} +- +-// NOTE: BackendGroup Forward contains the log for balancing with consensus aware +-func (bg *BackendGroup) Forward(ctx context.Context, rpcReqs []*RPCReq, isBatch bool) ([]*RPCRes, string, error) { +- if len(rpcReqs) == 0 { +- return nil, "", nil +- } +- +- backends := bg.orderedBackendsForRequest() +- +- overriddenResponses := make([]*indexedReqRes, 0) +- rewrittenReqs := make([]*RPCReq, 0, len(rpcReqs)) +- +- if bg.Consensus != nil { +- // When `consensus_aware` is set to `true`, the backend group acts as a load balancer +- // serving traffic from any backend that agrees in the consensus group +- +- // We also rewrite block tags to enforce compliance with consensus +- rctx := RewriteContext{ +- latest: bg.Consensus.GetLatestBlockNumber(), +- safe: bg.Consensus.GetSafeBlockNumber(), +- finalized: bg.Consensus.GetFinalizedBlockNumber(), +- maxBlockRange: bg.Consensus.maxBlockRange, +- } +- +- for i, req := range rpcReqs { +- res := RPCRes{JSONRPC: JSONRPCVersion, ID: req.ID} +- result, err := RewriteTags(rctx, req, &res) +- switch result { +- case RewriteOverrideError: +- overriddenResponses = append(overriddenResponses, &indexedReqRes{ +- index: i, +- req: req, +- res: &res, +- }) +- if errors.Is(err, ErrRewriteBlockOutOfRange) { +- res.Error = ErrBlockOutOfRange +- } else if errors.Is(err, ErrRewriteRangeTooLarge) { +- res.Error = ErrInvalidParams( +- fmt.Sprintf("block range greater than %d max", rctx.maxBlockRange), +- ) +- } else { +- res.Error = ErrParseErr +- } +- case RewriteOverrideResponse: +- overriddenResponses = append(overriddenResponses, &indexedReqRes{ +- index: i, +- req: req, +- res: &res, +- }) +- case RewriteOverrideRequest, RewriteNone: +- rewrittenReqs = append(rewrittenReqs, req) +- } +- } +- rpcReqs = rewrittenReqs +- } +- +- rpcRequestsTotal.Inc() +- +- for _, back := range backends { +- res := make([]*RPCRes, 0) +- var err error +- +- servedBy := fmt.Sprintf("%s/%s", bg.Name, back.Name) +- +- if len(rpcReqs) > 0 { +- res, err = back.Forward(ctx, rpcReqs, isBatch) +- if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || +- errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) || +- errors.Is(err, ErrMethodNotWhitelisted) { +- return nil, "", err +- } +- if errors.Is(err, ErrBackendResponseTooLarge) { +- return nil, servedBy, err +- } +- if errors.Is(err, ErrBackendOffline) { +- log.Warn( +- "skipping offline backend", +- "name", back.Name, +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- ) +- continue +- } +- if errors.Is(err, ErrBackendOverCapacity) { +- log.Warn( +- "skipping over-capacity backend", +- "name", back.Name, +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- ) +- continue +- } +- if err != nil { +- log.Error( +- "error forwarding request to backend", +- "name", back.Name, +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- "err", err, +- ) +- continue +- } +- } +- +- // re-apply overridden responses +- for _, ov := range overriddenResponses { +- if len(res) > 0 { +- // insert ov.res at position ov.index +- res = append(res[:ov.index], append([]*RPCRes{ov.res}, res[ov.index:]...)...) +- } else { +- res = append(res, ov.res) +- } +- } +- +- return res, servedBy, nil +- } +- +- RecordUnserviceableRequest(ctx, RPCRequestSourceHTTP) +- return nil, "", ErrNoBackends +-} +- +-func (bg *BackendGroup) ProxyWS(ctx context.Context, clientConn *websocket.Conn, methodWhitelist *StringSet) (*WSProxier, error) { +- for _, back := range bg.Backends { +- proxier, err := back.ProxyWS(clientConn, methodWhitelist) +- if errors.Is(err, ErrBackendOffline) { +- log.Warn( +- "skipping offline backend", +- "name", back.Name, +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- ) +- continue +- } +- if errors.Is(err, ErrBackendOverCapacity) { +- log.Warn( +- "skipping over-capacity backend", +- "name", back.Name, +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- ) +- continue +- } +- if err != nil { +- log.Warn( +- "error dialing ws backend", +- "name", back.Name, +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- "err", err, +- ) +- continue +- } +- return proxier, nil +- } +- +- return nil, ErrNoBackends +-} +- +-func weightedShuffle(backends []*Backend) { +- weight := func(i int) float64 { +- return float64(backends[i].weight) +- } +- +- weightedshuffle.ShuffleInplace(backends, weight, nil) +-} +- +-func (bg *BackendGroup) orderedBackendsForRequest() []*Backend { +- if bg.Consensus != nil { +- return bg.loadBalancedConsensusGroup() +- } else if bg.WeightedRouting { +- result := make([]*Backend, len(bg.Backends)) +- copy(result, bg.Backends) +- weightedShuffle(result) +- return result +- } else { +- return bg.Backends +- } +-} +- +-func (bg *BackendGroup) loadBalancedConsensusGroup() []*Backend { +- cg := bg.Consensus.GetConsensusGroup() +- +- backendsHealthy := make([]*Backend, 0, len(cg)) +- backendsDegraded := make([]*Backend, 0, len(cg)) +- // separate into healthy, degraded and unhealthy backends +- for _, be := range cg { +- // unhealthy are filtered out and not attempted +- if !be.IsHealthy() { +- continue +- } +- if be.IsDegraded() { +- backendsDegraded = append(backendsDegraded, be) +- continue +- } +- backendsHealthy = append(backendsHealthy, be) +- } +- +- // shuffle both slices +- r := rand.New(rand.NewSource(time.Now().UnixNano())) +- r.Shuffle(len(backendsHealthy), func(i, j int) { +- backendsHealthy[i], backendsHealthy[j] = backendsHealthy[j], backendsHealthy[i] +- }) +- r.Shuffle(len(backendsDegraded), func(i, j int) { +- backendsDegraded[i], backendsDegraded[j] = backendsDegraded[j], backendsDegraded[i] +- }) +- +- if bg.WeightedRouting { +- weightedShuffle(backendsHealthy) +- } +- +- // healthy are put into a priority position +- // degraded backends are used as fallback +- backendsHealthy = append(backendsHealthy, backendsDegraded...) +- +- return backendsHealthy +-} +- +-func (bg *BackendGroup) Shutdown() { +- if bg.Consensus != nil { +- bg.Consensus.Shutdown() +- } +-} +- +-func calcBackoff(i int) time.Duration { +- jitter := float64(rand.Int63n(250)) +- ms := math.Min(math.Pow(2, float64(i))*1000+jitter, 3000) +- return time.Duration(ms) * time.Millisecond +-} +- +-type WSProxier struct { +- backend *Backend +- clientConn *websocket.Conn +- clientConnMu sync.Mutex +- backendConn *websocket.Conn +- backendConnMu sync.Mutex +- methodWhitelist *StringSet +- readTimeout time.Duration +- writeTimeout time.Duration +-} +- +-func NewWSProxier(backend *Backend, clientConn, backendConn *websocket.Conn, methodWhitelist *StringSet) *WSProxier { +- return &WSProxier{ +- backend: backend, +- clientConn: clientConn, +- backendConn: backendConn, +- methodWhitelist: methodWhitelist, +- readTimeout: defaultWSReadTimeout, +- writeTimeout: defaultWSWriteTimeout, +- } +-} +- +-func (w *WSProxier) Proxy(ctx context.Context) error { +- errC := make(chan error, 2) +- go w.clientPump(ctx, errC) +- go w.backendPump(ctx, errC) +- err := <-errC +- w.close() +- return err +-} +- +-func (w *WSProxier) clientPump(ctx context.Context, errC chan error) { +- for { +- // Block until we get a message. +- msgType, msg, err := w.clientConn.ReadMessage() +- if err != nil { +- if err := w.writeBackendConn(websocket.CloseMessage, formatWSError(err)); err != nil { +- log.Error("error writing backendConn message", "err", err) +- errC <- err +- return +- } +- } +- +- RecordWSMessage(ctx, w.backend.Name, SourceClient) +- +- // Route control messages to the backend. These don't +- // count towards the total RPC requests count. +- if msgType != websocket.TextMessage && msgType != websocket.BinaryMessage { +- err := w.writeBackendConn(msgType, msg) +- if err != nil { +- errC <- err +- return +- } +- continue +- } +- +- rpcRequestsTotal.Inc() +- +- // Don't bother sending invalid requests to the backend, +- // just handle them here. +- req, err := w.prepareClientMsg(msg) +- if err != nil { +- var id json.RawMessage +- method := MethodUnknown +- if req != nil { +- id = req.ID +- method = req.Method +- } +- log.Info( +- "error preparing client message", +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- msg = mustMarshalJSON(NewRPCErrorRes(id, err)) +- RecordRPCError(ctx, BackendProxyd, method, err) +- +- // Send error response to client +- err = w.writeClientConn(msgType, msg) +- if err != nil { +- errC <- err +- return +- } +- continue +- } +- +- // Send eth_accounts requests directly to the client +- if req.Method == "eth_accounts" { +- msg = mustMarshalJSON(NewRPCRes(req.ID, emptyArrayResponse)) +- RecordRPCForward(ctx, BackendProxyd, "eth_accounts", RPCRequestSourceWS) +- err = w.writeClientConn(msgType, msg) +- if err != nil { +- errC <- err +- return +- } +- continue +- } +- +- RecordRPCForward(ctx, w.backend.Name, req.Method, RPCRequestSourceWS) +- log.Info( +- "forwarded WS message to backend", +- "method", req.Method, +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- ) +- +- err = w.writeBackendConn(msgType, msg) +- if err != nil { +- errC <- err +- return +- } +- } +-} +- +-func (w *WSProxier) backendPump(ctx context.Context, errC chan error) { +- for { +- // Block until we get a message. +- msgType, msg, err := w.backendConn.ReadMessage() +- if err != nil { +- if err := w.writeClientConn(websocket.CloseMessage, formatWSError(err)); err != nil { +- log.Error("error writing clientConn message", "err", err) +- errC <- err +- return +- } +- } +- +- RecordWSMessage(ctx, w.backend.Name, SourceBackend) +- +- // Route control messages directly to the client. +- if msgType != websocket.TextMessage && msgType != websocket.BinaryMessage { +- err := w.writeClientConn(msgType, msg) +- if err != nil { +- errC <- err +- return +- } +- continue +- } +- +- res, err := w.parseBackendMsg(msg) +- if err != nil { +- var id json.RawMessage +- if res != nil { +- id = res.ID +- } +- msg = mustMarshalJSON(NewRPCErrorRes(id, err)) +- log.Info("backend responded with error", "err", err) +- } else { +- if res.IsError() { +- log.Info( +- "backend responded with RPC error", +- "code", res.Error.Code, +- "msg", res.Error.Message, +- "source", "ws", +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- ) +- RecordRPCError(ctx, w.backend.Name, MethodUnknown, res.Error) +- } else { +- log.Info( +- "forwarded WS message to client", +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- ) +- } +- } +- +- err = w.writeClientConn(msgType, msg) +- if err != nil { +- errC <- err +- return +- } +- } +-} +- +-func (w *WSProxier) close() { +- w.clientConn.Close() +- w.backendConn.Close() +- activeBackendWsConnsGauge.WithLabelValues(w.backend.Name).Dec() +-} +- +-func (w *WSProxier) prepareClientMsg(msg []byte) (*RPCReq, error) { +- req, err := ParseRPCReq(msg) +- if err != nil { +- return nil, err +- } +- +- if !w.methodWhitelist.Has(req.Method) { +- return req, ErrMethodNotWhitelisted +- } +- +- return req, nil +-} +- +-func (w *WSProxier) parseBackendMsg(msg []byte) (*RPCRes, error) { +- res, err := ParseRPCRes(bytes.NewReader(msg)) +- if err != nil { +- log.Warn("error parsing RPC response", "source", "ws", "err", err) +- return res, ErrBackendBadResponse +- } +- return res, nil +-} +- +-func (w *WSProxier) writeClientConn(msgType int, msg []byte) error { +- w.clientConnMu.Lock() +- defer w.clientConnMu.Unlock() +- if err := w.clientConn.SetWriteDeadline(time.Now().Add(w.writeTimeout)); err != nil { +- log.Error("ws client write timeout", "err", err) +- return err +- } +- err := w.clientConn.WriteMessage(msgType, msg) +- return err +-} +- +-func (w *WSProxier) writeBackendConn(msgType int, msg []byte) error { +- w.backendConnMu.Lock() +- defer w.backendConnMu.Unlock() +- if err := w.backendConn.SetWriteDeadline(time.Now().Add(w.writeTimeout)); err != nil { +- log.Error("ws backend write timeout", "err", err) +- return err +- } +- err := w.backendConn.WriteMessage(msgType, msg) +- return err +-} +- +-func mustMarshalJSON(in interface{}) []byte { +- out, err := json.Marshal(in) +- if err != nil { +- panic(err) +- } +- return out +-} +- +-func formatWSError(err error) []byte { +- m := websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%v", err)) +- if e, ok := err.(*websocket.CloseError); ok { +- if e.Code != websocket.CloseNoStatusReceived { +- m = websocket.FormatCloseMessage(e.Code, e.Text) +- } +- } +- return m +-} +- +-func sleepContext(ctx context.Context, duration time.Duration) { +- select { +- case <-ctx.Done(): +- case <-time.After(duration): +- } +-} +- +-type LimitedHTTPClient struct { +- http.Client +- sem *semaphore.Weighted +- backendName string +-} +- +-func (c *LimitedHTTPClient) DoLimited(req *http.Request) (*http.Response, error) { +- if err := c.sem.Acquire(req.Context(), 1); err != nil { +- tooManyRequestErrorsTotal.WithLabelValues(c.backendName).Inc() +- return nil, wrapErr(err, "too many requests") +- } +- defer c.sem.Release(1) +- return c.Do(req) +-} +- +-func RecordBatchRPCError(ctx context.Context, backendName string, reqs []*RPCReq, err error) { +- for _, req := range reqs { +- RecordRPCError(ctx, backendName, req.Method, err) +- } +-} +- +-func MaybeRecordErrorsInRPCRes(ctx context.Context, backendName string, reqs []*RPCReq, resBatch []*RPCRes) { +- log.Info("forwarded RPC request", +- "backend", backendName, +- "auth", GetAuthCtx(ctx), +- "req_id", GetReqID(ctx), +- "batch_size", len(reqs), +- ) +- +- var lastError *RPCErr +- for i, res := range resBatch { +- if res.IsError() { +- lastError = res.Error +- RecordRPCError(ctx, backendName, reqs[i].Method, res.Error) +- } +- } +- +- if lastError != nil { +- log.Info( +- "backend responded with RPC error", +- "backend", backendName, +- "last_error_code", lastError.Code, +- "last_error_msg", lastError.Message, +- "req_id", GetReqID(ctx), +- "source", "rpc", +- "auth", GetAuthCtx(ctx), +- ) +- } +-} +- +-func RecordBatchRPCForward(ctx context.Context, backendName string, reqs []*RPCReq, source string) { +- for _, req := range reqs { +- RecordRPCForward(ctx, backendName, req.Method, source) +- } +-} +- +-func stripXFF(xff string) string { +- ipList := strings.Split(xff, ",") +- return strings.TrimSpace(ipList[0]) +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-21
+ +
+ +
+
+
diff --git OP/proxyd/backend_test.go CELO/proxyd/backend_test.go +deleted file mode 100644 +index 7be23bfed7bc563174d4b7084bdb70a6b4d1c7db..0000000000000000000000000000000000000000 +--- OP/proxyd/backend_test.go ++++ /dev/null +@@ -1,21 +0,0 @@ +-package proxyd +- +-import ( +- "github.com/stretchr/testify/assert" +- "testing" +-) +- +-func TestStripXFF(t *testing.T) { +- tests := []struct { +- in, out string +- }{ +- {"1.2.3, 4.5.6, 7.8.9", "1.2.3"}, +- {"1.2.3,4.5.6", "1.2.3"}, +- {" 1.2.3 , 4.5.6 ", "1.2.3"}, +- } +- +- for _, test := range tests { +- actual := stripXFF(test.in) +- assert.Equal(t, test.out, actual) +- } +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-192
+ +
+ +
+
+
diff --git OP/proxyd/cache.go CELO/proxyd/cache.go +deleted file mode 100644 +index 5add4f23627ea7dea881c3a2a766450c78ff2d8e..0000000000000000000000000000000000000000 +--- OP/proxyd/cache.go ++++ /dev/null +@@ -1,192 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "encoding/json" +- "strings" +- "time" +- +- "github.com/ethereum/go-ethereum/rpc" +- "github.com/redis/go-redis/v9" +- +- "github.com/golang/snappy" +- lru "github.com/hashicorp/golang-lru" +-) +- +-type Cache interface { +- Get(ctx context.Context, key string) (string, error) +- Put(ctx context.Context, key string, value string) error +-} +- +-const ( +- // assuming an average RPCRes size of 3 KB +- memoryCacheLimit = 4096 +-) +- +-type cache struct { +- lru *lru.Cache +-} +- +-func newMemoryCache() *cache { +- rep, _ := lru.New(memoryCacheLimit) +- return &cache{rep} +-} +- +-func (c *cache) Get(ctx context.Context, key string) (string, error) { +- if val, ok := c.lru.Get(key); ok { +- return val.(string), nil +- } +- return "", nil +-} +- +-func (c *cache) Put(ctx context.Context, key string, value string) error { +- c.lru.Add(key, value) +- return nil +-} +- +-type redisCache struct { +- rdb *redis.Client +- prefix string +- ttl time.Duration +-} +- +-func newRedisCache(rdb *redis.Client, prefix string, ttl time.Duration) *redisCache { +- return &redisCache{rdb, prefix, ttl} +-} +- +-func (c *redisCache) namespaced(key string) string { +- if c.prefix == "" { +- return key +- } +- return strings.Join([]string{c.prefix, key}, ":") +-} +- +-func (c *redisCache) Get(ctx context.Context, key string) (string, error) { +- start := time.Now() +- val, err := c.rdb.Get(ctx, c.namespaced(key)).Result() +- redisCacheDurationSumm.WithLabelValues("GET").Observe(float64(time.Since(start).Milliseconds())) +- +- if err == redis.Nil { +- return "", nil +- } else if err != nil { +- RecordRedisError("CacheGet") +- return "", err +- } +- return val, nil +-} +- +-func (c *redisCache) Put(ctx context.Context, key string, value string) error { +- start := time.Now() +- err := c.rdb.SetEx(ctx, c.namespaced(key), value, c.ttl).Err() +- redisCacheDurationSumm.WithLabelValues("SETEX").Observe(float64(time.Since(start).Milliseconds())) +- +- if err != nil { +- RecordRedisError("CacheSet") +- } +- return err +-} +- +-type cacheWithCompression struct { +- cache Cache +-} +- +-func newCacheWithCompression(cache Cache) *cacheWithCompression { +- return &cacheWithCompression{cache} +-} +- +-func (c *cacheWithCompression) Get(ctx context.Context, key string) (string, error) { +- encodedVal, err := c.cache.Get(ctx, key) +- if err != nil { +- return "", err +- } +- if encodedVal == "" { +- return "", nil +- } +- val, err := snappy.Decode(nil, []byte(encodedVal)) +- if err != nil { +- return "", err +- } +- return string(val), nil +-} +- +-func (c *cacheWithCompression) Put(ctx context.Context, key string, value string) error { +- encodedVal := snappy.Encode(nil, []byte(value)) +- return c.cache.Put(ctx, key, string(encodedVal)) +-} +- +-type RPCCache interface { +- GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) +- PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error +-} +- +-type rpcCache struct { +- cache Cache +- handlers map[string]RPCMethodHandler +-} +- +-func newRPCCache(cache Cache) RPCCache { +- staticHandler := &StaticMethodHandler{cache: cache} +- debugGetRawReceiptsHandler := &StaticMethodHandler{cache: cache, +- filterGet: func(req *RPCReq) bool { +- // cache only if the request is for a block hash +- +- var p []rpc.BlockNumberOrHash +- err := json.Unmarshal(req.Params, &p) +- if err != nil { +- return false +- } +- if len(p) != 1 { +- return false +- } +- return p[0].BlockHash != nil +- }, +- filterPut: func(req *RPCReq, res *RPCRes) bool { +- // don't cache if response contains 0 receipts +- rawReceipts, ok := res.Result.([]interface{}) +- if !ok { +- return false +- } +- return len(rawReceipts) > 0 +- }, +- } +- handlers := map[string]RPCMethodHandler{ +- "eth_chainId": staticHandler, +- "net_version": staticHandler, +- "eth_getBlockTransactionCountByHash": staticHandler, +- "eth_getUncleCountByBlockHash": staticHandler, +- "eth_getBlockByHash": staticHandler, +- "eth_getTransactionByBlockHashAndIndex": staticHandler, +- "eth_getUncleByBlockHashAndIndex": staticHandler, +- "debug_getRawReceipts": debugGetRawReceiptsHandler, +- } +- return &rpcCache{ +- cache: cache, +- handlers: handlers, +- } +-} +- +-func (c *rpcCache) GetRPC(ctx context.Context, req *RPCReq) (*RPCRes, error) { +- handler := c.handlers[req.Method] +- if handler == nil { +- return nil, nil +- } +- res, err := handler.GetRPCMethod(ctx, req) +- if err != nil { +- RecordCacheError(req.Method) +- return nil, err +- } +- if res == nil { +- RecordCacheMiss(req.Method) +- } else { +- RecordCacheHit(req.Method) +- } +- return res, nil +-} +- +-func (c *rpcCache) PutRPC(ctx context.Context, req *RPCReq, res *RPCRes) error { +- handler := c.handlers[req.Method] +- if handler == nil { +- return nil +- } +- return handler.PutRPCMethod(ctx, req, res) +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-213
+ +
+ +
+
+
diff --git OP/proxyd/cache_test.go CELO/proxyd/cache_test.go +deleted file mode 100644 +index 1a5d543227ae99f2f705355af16f467cdf2d307d..0000000000000000000000000000000000000000 +--- OP/proxyd/cache_test.go ++++ /dev/null +@@ -1,213 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "strconv" +- "testing" +- +- "github.com/stretchr/testify/require" +-) +- +-func TestRPCCacheImmutableRPCs(t *testing.T) { +- ctx := context.Background() +- +- cache := newRPCCache(newMemoryCache()) +- ID := []byte(strconv.Itoa(1)) +- +- rpcs := []struct { +- req *RPCReq +- res *RPCRes +- name string +- }{ +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_chainId", +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: "0xff", +- ID: ID, +- }, +- name: "eth_chainId", +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "net_version", +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: "9999", +- ID: ID, +- }, +- name: "net_version", +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_getBlockTransactionCountByHash", +- Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}), +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: `{"eth_getBlockTransactionCountByHash":"!"}`, +- ID: ID, +- }, +- name: "eth_getBlockTransactionCountByHash", +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_getUncleCountByBlockHash", +- Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}), +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: `{"eth_getUncleCountByBlockHash":"!"}`, +- ID: ID, +- }, +- name: "eth_getUncleCountByBlockHash", +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_getBlockByHash", +- Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", "false"}), +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: `{"eth_getBlockByHash":"!"}`, +- ID: ID, +- }, +- name: "eth_getBlockByHash", +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_getUncleByBlockHashAndIndex", +- Params: mustMarshalJSON([]string{"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238", "0x90"}), +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: `{"eth_getUncleByBlockHashAndIndex":"!"}`, +- ID: ID, +- }, +- name: "eth_getUncleByBlockHashAndIndex", +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "debug_getRawReceipts", +- Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"}), +- ID: ID, +- }, +- res: &RPCRes{ +- JSONRPC: "2.0", +- Result: []interface{}{"a"}, +- ID: ID, +- }, +- name: "debug_getRawReceipts", +- }, +- } +- +- for _, rpc := range rpcs { +- t.Run(rpc.name, func(t *testing.T) { +- err := cache.PutRPC(ctx, rpc.req, rpc.res) +- require.NoError(t, err) +- +- cachedRes, err := cache.GetRPC(ctx, rpc.req) +- require.NoError(t, err) +- require.Equal(t, rpc.res, cachedRes) +- }) +- } +-} +- +-func TestRPCCacheUnsupportedMethod(t *testing.T) { +- ctx := context.Background() +- +- cache := newRPCCache(newMemoryCache()) +- ID := []byte(strconv.Itoa(1)) +- +- rpcs := []struct { +- req *RPCReq +- name string +- }{ +- { +- name: "eth_syncing", +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_syncing", +- ID: ID, +- }, +- }, +- { +- name: "eth_blockNumber", +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_blockNumber", +- ID: ID, +- }, +- }, +- { +- name: "eth_getBlockByNumber", +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_getBlockByNumber", +- ID: ID, +- }, +- }, +- { +- name: "eth_getBlockRange", +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_getBlockRange", +- ID: ID, +- }, +- }, +- { +- name: "eth_gasPrice", +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_gasPrice", +- ID: ID, +- }, +- }, +- { +- name: "eth_call", +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "eth_call", +- ID: ID, +- }, +- }, +- { +- req: &RPCReq{ +- JSONRPC: "2.0", +- Method: "debug_getRawReceipts", +- Params: mustMarshalJSON([]string{"0x100"}), +- ID: ID, +- }, +- name: "debug_getRawReceipts", +- }, +- } +- +- for _, rpc := range rpcs { +- t.Run(rpc.name, func(t *testing.T) { +- fakeval := mustMarshalJSON([]string{rpc.name}) +- err := cache.PutRPC(ctx, rpc.req, &RPCRes{Result: fakeval}) +- require.NoError(t, err) +- +- cachedRes, err := cache.GetRPC(ctx, rpc.req) +- require.NoError(t, err) +- require.Nil(t, cachedRes) +- }) +- } +- +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-184
+ +
+ +
+
+
diff --git OP/proxyd/config.go CELO/proxyd/config.go +deleted file mode 100644 +index 4719a55f85c1d3fe9ca841a92fff2e8b53bfc59a..0000000000000000000000000000000000000000 +--- OP/proxyd/config.go ++++ /dev/null +@@ -1,184 +0,0 @@ +-package proxyd +- +-import ( +- "fmt" +- "math/big" +- "os" +- "strings" +- "time" +-) +- +-type ServerConfig struct { +- RPCHost string `toml:"rpc_host"` +- RPCPort int `toml:"rpc_port"` +- WSHost string `toml:"ws_host"` +- WSPort int `toml:"ws_port"` +- MaxBodySizeBytes int64 `toml:"max_body_size_bytes"` +- MaxConcurrentRPCs int64 `toml:"max_concurrent_rpcs"` +- LogLevel string `toml:"log_level"` +- +- // TimeoutSeconds specifies the maximum time spent serving an HTTP request. Note that isn't used for websocket connections +- TimeoutSeconds int `toml:"timeout_seconds"` +- +- MaxUpstreamBatchSize int `toml:"max_upstream_batch_size"` +- +- EnableRequestLog bool `toml:"enable_request_log"` +- MaxRequestBodyLogLen int `toml:"max_request_body_log_len"` +- EnablePprof bool `toml:"enable_pprof"` +- EnableXServedByHeader bool `toml:"enable_served_by_header"` +- AllowAllOrigins bool `toml:"allow_all_origins"` +-} +- +-type CacheConfig struct { +- Enabled bool `toml:"enabled"` +- TTL TOMLDuration `toml:"ttl"` +-} +- +-type RedisConfig struct { +- URL string `toml:"url"` +- Namespace string `toml:"namespace"` +-} +- +-type MetricsConfig struct { +- Enabled bool `toml:"enabled"` +- Host string `toml:"host"` +- Port int `toml:"port"` +-} +- +-type RateLimitConfig struct { +- UseRedis bool `toml:"use_redis"` +- BaseRate int `toml:"base_rate"` +- BaseInterval TOMLDuration `toml:"base_interval"` +- ExemptOrigins []string `toml:"exempt_origins"` +- ExemptUserAgents []string `toml:"exempt_user_agents"` +- ErrorMessage string `toml:"error_message"` +- MethodOverrides map[string]*RateLimitMethodOverride `toml:"method_overrides"` +- IPHeaderOverride string `toml:"ip_header_override"` +-} +- +-type RateLimitMethodOverride struct { +- Limit int `toml:"limit"` +- Interval TOMLDuration `toml:"interval"` +- Global bool `toml:"global"` +-} +- +-type TOMLDuration time.Duration +- +-func (t *TOMLDuration) UnmarshalText(b []byte) error { +- d, err := time.ParseDuration(string(b)) +- if err != nil { +- return err +- } +- +- *t = TOMLDuration(d) +- return nil +-} +- +-type BackendOptions struct { +- ResponseTimeoutSeconds int `toml:"response_timeout_seconds"` +- MaxResponseSizeBytes int64 `toml:"max_response_size_bytes"` +- MaxRetries int `toml:"max_retries"` +- OutOfServiceSeconds int `toml:"out_of_service_seconds"` +- MaxDegradedLatencyThreshold TOMLDuration `toml:"max_degraded_latency_threshold"` +- MaxLatencyThreshold TOMLDuration `toml:"max_latency_threshold"` +- MaxErrorRateThreshold float64 `toml:"max_error_rate_threshold"` +-} +- +-type BackendConfig struct { +- Username string `toml:"username"` +- Password string `toml:"password"` +- RPCURL string `toml:"rpc_url"` +- WSURL string `toml:"ws_url"` +- WSPort int `toml:"ws_port"` +- MaxRPS int `toml:"max_rps"` +- MaxWSConns int `toml:"max_ws_conns"` +- CAFile string `toml:"ca_file"` +- ClientCertFile string `toml:"client_cert_file"` +- ClientKeyFile string `toml:"client_key_file"` +- StripTrailingXFF bool `toml:"strip_trailing_xff"` +- Headers map[string]string `toml:"headers"` +- +- Weight int `toml:"weight"` +- +- ConsensusSkipPeerCountCheck bool `toml:"consensus_skip_peer_count"` +- ConsensusForcedCandidate bool `toml:"consensus_forced_candidate"` +- ConsensusReceiptsTarget string `toml:"consensus_receipts_target"` +-} +- +-type BackendsConfig map[string]*BackendConfig +- +-type BackendGroupConfig struct { +- Backends []string `toml:"backends"` +- +- WeightedRouting bool `toml:"weighted_routing"` +- +- ConsensusAware bool `toml:"consensus_aware"` +- ConsensusAsyncHandler string `toml:"consensus_handler"` +- ConsensusPollerInterval TOMLDuration `toml:"consensus_poller_interval"` +- +- ConsensusBanPeriod TOMLDuration `toml:"consensus_ban_period"` +- ConsensusMaxUpdateThreshold TOMLDuration `toml:"consensus_max_update_threshold"` +- ConsensusMaxBlockLag uint64 `toml:"consensus_max_block_lag"` +- ConsensusMaxBlockRange uint64 `toml:"consensus_max_block_range"` +- ConsensusMinPeerCount int `toml:"consensus_min_peer_count"` +- +- ConsensusHA bool `toml:"consensus_ha"` +- ConsensusHAHeartbeatInterval TOMLDuration `toml:"consensus_ha_heartbeat_interval"` +- ConsensusHALockPeriod TOMLDuration `toml:"consensus_ha_lock_period"` +- ConsensusHARedis RedisConfig `toml:"consensus_ha_redis"` +- +- Fallbacks []string `toml:"fallbacks"` +-} +- +-type BackendGroupsConfig map[string]*BackendGroupConfig +- +-type MethodMappingsConfig map[string]string +- +-type BatchConfig struct { +- MaxSize int `toml:"max_size"` +- ErrorMessage string `toml:"error_message"` +-} +- +-// SenderRateLimitConfig configures the sender-based rate limiter +-// for eth_sendRawTransaction requests. +-// To enable pre-eip155 transactions, add '0' to allowed_chain_ids. +-type SenderRateLimitConfig struct { +- Enabled bool +- Interval TOMLDuration +- Limit int +- AllowedChainIds []*big.Int `toml:"allowed_chain_ids"` +-} +- +-type Config struct { +- WSBackendGroup string `toml:"ws_backend_group"` +- Server ServerConfig `toml:"server"` +- Cache CacheConfig `toml:"cache"` +- Redis RedisConfig `toml:"redis"` +- Metrics MetricsConfig `toml:"metrics"` +- RateLimit RateLimitConfig `toml:"rate_limit"` +- BackendOptions BackendOptions `toml:"backend"` +- Backends BackendsConfig `toml:"backends"` +- BatchConfig BatchConfig `toml:"batch"` +- Authentication map[string]string `toml:"authentication"` +- BackendGroups BackendGroupsConfig `toml:"backend_groups"` +- RPCMethodMappings map[string]string `toml:"rpc_method_mappings"` +- WSMethodWhitelist []string `toml:"ws_method_whitelist"` +- WhitelistErrorMessage string `toml:"whitelist_error_message"` +- SenderRateLimit SenderRateLimitConfig `toml:"sender_rate_limit"` +-} +- +-func ReadFromEnvOrConfig(value string) (string, error) { +- if strings.HasPrefix(value, "$") { +- envValue := os.Getenv(strings.TrimPrefix(value, "$")) +- if envValue == "" { +- return "", fmt.Errorf("config env var %s not found", value) +- } +- return envValue, nil +- } +- +- if strings.HasPrefix(value, "\\") { +- return strings.TrimPrefix(value, "\\"), nil +- } +- +- return value, nil +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-746
+ +
+ +
+
+
diff --git OP/proxyd/consensus_poller.go CELO/proxyd/consensus_poller.go +deleted file mode 100644 +index 90af41db7067c106c812a92f3510c44b55ebdd80..0000000000000000000000000000000000000000 +--- OP/proxyd/consensus_poller.go ++++ /dev/null +@@ -1,746 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "fmt" +- "strconv" +- "strings" +- "sync" +- "time" +- +- "github.com/ethereum/go-ethereum/common/hexutil" +- +- "github.com/ethereum/go-ethereum/log" +-) +- +-const ( +- DefaultPollerInterval = 1 * time.Second +-) +- +-type OnConsensusBroken func() +- +-// ConsensusPoller checks the consensus state for each member of a BackendGroup +-// resolves the highest common block for multiple nodes, and reconciles the consensus +-// in case of block hash divergence to minimize re-orgs +-type ConsensusPoller struct { +- ctx context.Context +- cancelFunc context.CancelFunc +- listeners []OnConsensusBroken +- +- backendGroup *BackendGroup +- backendState map[*Backend]*backendState +- consensusGroupMux sync.Mutex +- consensusGroup []*Backend +- +- tracker ConsensusTracker +- asyncHandler ConsensusAsyncHandler +- +- minPeerCount uint64 +- banPeriod time.Duration +- maxUpdateThreshold time.Duration +- maxBlockLag uint64 +- maxBlockRange uint64 +- interval time.Duration +-} +- +-type backendState struct { +- backendStateMux sync.Mutex +- +- latestBlockNumber hexutil.Uint64 +- latestBlockHash string +- safeBlockNumber hexutil.Uint64 +- finalizedBlockNumber hexutil.Uint64 +- +- peerCount uint64 +- inSync bool +- +- lastUpdate time.Time +- +- bannedUntil time.Time +-} +- +-func (bs *backendState) IsBanned() bool { +- return time.Now().Before(bs.bannedUntil) +-} +- +-// GetConsensusGroup returns the backend members that are agreeing in a consensus +-func (cp *ConsensusPoller) GetConsensusGroup() []*Backend { +- defer cp.consensusGroupMux.Unlock() +- cp.consensusGroupMux.Lock() +- +- g := make([]*Backend, len(cp.consensusGroup)) +- copy(g, cp.consensusGroup) +- +- return g +-} +- +-// GetLatestBlockNumber returns the `latest` agreed block number in a consensus +-func (ct *ConsensusPoller) GetLatestBlockNumber() hexutil.Uint64 { +- return ct.tracker.GetLatestBlockNumber() +-} +- +-// GetSafeBlockNumber returns the `safe` agreed block number in a consensus +-func (ct *ConsensusPoller) GetSafeBlockNumber() hexutil.Uint64 { +- return ct.tracker.GetSafeBlockNumber() +-} +- +-// GetFinalizedBlockNumber returns the `finalized` agreed block number in a consensus +-func (ct *ConsensusPoller) GetFinalizedBlockNumber() hexutil.Uint64 { +- return ct.tracker.GetFinalizedBlockNumber() +-} +- +-func (cp *ConsensusPoller) Shutdown() { +- cp.asyncHandler.Shutdown() +-} +- +-// ConsensusAsyncHandler controls the asynchronous polling mechanism, interval and shutdown +-type ConsensusAsyncHandler interface { +- Init() +- Shutdown() +-} +- +-// NoopAsyncHandler allows fine control updating the consensus +-type NoopAsyncHandler struct{} +- +-func NewNoopAsyncHandler() ConsensusAsyncHandler { +- log.Warn("using NewNoopAsyncHandler") +- return &NoopAsyncHandler{} +-} +-func (ah *NoopAsyncHandler) Init() {} +-func (ah *NoopAsyncHandler) Shutdown() {} +- +-// PollerAsyncHandler asynchronously updates each individual backend and the group consensus +-type PollerAsyncHandler struct { +- ctx context.Context +- cp *ConsensusPoller +-} +- +-func NewPollerAsyncHandler(ctx context.Context, cp *ConsensusPoller) ConsensusAsyncHandler { +- return &PollerAsyncHandler{ +- ctx: ctx, +- cp: cp, +- } +-} +-func (ah *PollerAsyncHandler) Init() { +- // create the individual backend pollers. +- log.Info("total number of primary candidates", "primaries", len(ah.cp.backendGroup.Primaries())) +- log.Info("total number of fallback candidates", "fallbacks", len(ah.cp.backendGroup.Fallbacks())) +- +- for _, be := range ah.cp.backendGroup.Primaries() { +- go func(be *Backend) { +- for { +- timer := time.NewTimer(ah.cp.interval) +- ah.cp.UpdateBackend(ah.ctx, be) +- select { +- case <-timer.C: +- case <-ah.ctx.Done(): +- timer.Stop() +- return +- } +- } +- }(be) +- } +- +- for _, be := range ah.cp.backendGroup.Fallbacks() { +- go func(be *Backend) { +- for { +- timer := time.NewTimer(ah.cp.interval) +- +- healthyCandidates := ah.cp.FilterCandidates(ah.cp.backendGroup.Primaries()) +- +- log.Info("number of healthy primary candidates", "healthy_candidates", len(healthyCandidates)) +- if len(healthyCandidates) == 0 { +- log.Debug("zero healthy candidates, querying fallback backend", +- "backend_name", be.Name) +- ah.cp.UpdateBackend(ah.ctx, be) +- } +- +- select { +- case <-timer.C: +- case <-ah.ctx.Done(): +- timer.Stop() +- return +- } +- } +- }(be) +- } +- +- // create the group consensus poller +- go func() { +- for { +- timer := time.NewTimer(ah.cp.interval) +- log.Info("updating backend group consensus") +- ah.cp.UpdateBackendGroupConsensus(ah.ctx) +- +- select { +- case <-timer.C: +- case <-ah.ctx.Done(): +- timer.Stop() +- return +- } +- } +- }() +-} +-func (ah *PollerAsyncHandler) Shutdown() { +- ah.cp.cancelFunc() +-} +- +-type ConsensusOpt func(cp *ConsensusPoller) +- +-func WithTracker(tracker ConsensusTracker) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.tracker = tracker +- } +-} +- +-func WithAsyncHandler(asyncHandler ConsensusAsyncHandler) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.asyncHandler = asyncHandler +- } +-} +- +-func WithListener(listener OnConsensusBroken) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.AddListener(listener) +- } +-} +- +-func (cp *ConsensusPoller) AddListener(listener OnConsensusBroken) { +- cp.listeners = append(cp.listeners, listener) +-} +- +-func (cp *ConsensusPoller) ClearListeners() { +- cp.listeners = []OnConsensusBroken{} +-} +- +-func WithBanPeriod(banPeriod time.Duration) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.banPeriod = banPeriod +- } +-} +- +-func WithMaxUpdateThreshold(maxUpdateThreshold time.Duration) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.maxUpdateThreshold = maxUpdateThreshold +- } +-} +- +-func WithMaxBlockLag(maxBlockLag uint64) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.maxBlockLag = maxBlockLag +- } +-} +- +-func WithMaxBlockRange(maxBlockRange uint64) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.maxBlockRange = maxBlockRange +- } +-} +- +-func WithMinPeerCount(minPeerCount uint64) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.minPeerCount = minPeerCount +- } +-} +- +-func WithPollerInterval(interval time.Duration) ConsensusOpt { +- return func(cp *ConsensusPoller) { +- cp.interval = interval +- } +-} +- +-func NewConsensusPoller(bg *BackendGroup, opts ...ConsensusOpt) *ConsensusPoller { +- ctx, cancelFunc := context.WithCancel(context.Background()) +- +- state := make(map[*Backend]*backendState, len(bg.Backends)) +- +- cp := &ConsensusPoller{ +- ctx: ctx, +- cancelFunc: cancelFunc, +- backendGroup: bg, +- backendState: state, +- +- banPeriod: 5 * time.Minute, +- maxUpdateThreshold: 30 * time.Second, +- maxBlockLag: 8, // 8*12 seconds = 96 seconds ~ 1.6 minutes +- minPeerCount: 3, +- interval: DefaultPollerInterval, +- } +- +- for _, opt := range opts { +- opt(cp) +- } +- +- if cp.tracker == nil { +- cp.tracker = NewInMemoryConsensusTracker() +- } +- +- if cp.asyncHandler == nil { +- cp.asyncHandler = NewPollerAsyncHandler(ctx, cp) +- } +- +- cp.Reset() +- cp.asyncHandler.Init() +- +- return cp +-} +- +-// UpdateBackend refreshes the consensus state of a single backend +-func (cp *ConsensusPoller) UpdateBackend(ctx context.Context, be *Backend) { +- bs := cp.getBackendState(be) +- RecordConsensusBackendBanned(be, bs.IsBanned()) +- +- if bs.IsBanned() { +- log.Debug("skipping backend - banned", "backend", be.Name) +- return +- } +- +- // if backend is not healthy state we'll only resume checking it after ban +- if !be.IsHealthy() && !be.forcedCandidate { +- log.Warn("backend banned - not healthy", "backend", be.Name) +- cp.Ban(be) +- return +- } +- +- inSync, err := cp.isInSync(ctx, be) +- RecordConsensusBackendInSync(be, err == nil && inSync) +- if err != nil { +- log.Warn("error updating backend sync state", "name", be.Name, "err", err) +- } +- +- var peerCount uint64 +- if !be.skipPeerCountCheck { +- peerCount, err = cp.getPeerCount(ctx, be) +- if err != nil { +- log.Warn("error updating backend peer count", "name", be.Name, "err", err) +- } +- RecordConsensusBackendPeerCount(be, peerCount) +- } +- +- latestBlockNumber, latestBlockHash, err := cp.fetchBlock(ctx, be, "latest") +- if err != nil { +- log.Warn("error updating backend - latest block", "name", be.Name, "err", err) +- } +- +- safeBlockNumber, _, err := cp.fetchBlock(ctx, be, "safe") +- if err != nil { +- log.Warn("error updating backend - safe block", "name", be.Name, "err", err) +- } +- +- finalizedBlockNumber, _, err := cp.fetchBlock(ctx, be, "finalized") +- if err != nil { +- log.Warn("error updating backend - finalized block", "name", be.Name, "err", err) +- } +- +- RecordConsensusBackendUpdateDelay(be, bs.lastUpdate) +- +- changed := cp.setBackendState(be, peerCount, inSync, +- latestBlockNumber, latestBlockHash, +- safeBlockNumber, finalizedBlockNumber) +- +- RecordBackendLatestBlock(be, latestBlockNumber) +- RecordBackendSafeBlock(be, safeBlockNumber) +- RecordBackendFinalizedBlock(be, finalizedBlockNumber) +- +- if changed { +- log.Debug("backend state updated", +- "name", be.Name, +- "peerCount", peerCount, +- "inSync", inSync, +- "latestBlockNumber", latestBlockNumber, +- "latestBlockHash", latestBlockHash, +- "safeBlockNumber", safeBlockNumber, +- "finalizedBlockNumber", finalizedBlockNumber, +- "lastUpdate", bs.lastUpdate) +- } +- +- // sanity check for latest, safe and finalized block tags +- expectedBlockTags := cp.checkExpectedBlockTags( +- latestBlockNumber, +- bs.safeBlockNumber, safeBlockNumber, +- bs.finalizedBlockNumber, finalizedBlockNumber) +- +- RecordBackendUnexpectedBlockTags(be, !expectedBlockTags) +- +- if !expectedBlockTags && !be.forcedCandidate { +- log.Warn("backend banned - unexpected block tags", +- "backend", be.Name, +- "oldFinalized", bs.finalizedBlockNumber, +- "finalizedBlockNumber", finalizedBlockNumber, +- "oldSafe", bs.safeBlockNumber, +- "safeBlockNumber", safeBlockNumber, +- "latestBlockNumber", latestBlockNumber, +- ) +- cp.Ban(be) +- } +-} +- +-// checkExpectedBlockTags for unexpected conditions on block tags +-// - finalized block number should never decrease +-// - safe block number should never decrease +-// - finalized block should be <= safe block <= latest block +-func (cp *ConsensusPoller) checkExpectedBlockTags( +- currentLatest hexutil.Uint64, +- oldSafe hexutil.Uint64, currentSafe hexutil.Uint64, +- oldFinalized hexutil.Uint64, currentFinalized hexutil.Uint64) bool { +- return currentFinalized >= oldFinalized && +- currentSafe >= oldSafe && +- currentFinalized <= currentSafe && +- currentSafe <= currentLatest +-} +- +-// UpdateBackendGroupConsensus resolves the current group consensus based on the state of the backends +-func (cp *ConsensusPoller) UpdateBackendGroupConsensus(ctx context.Context) { +- // get the latest block number from the tracker +- currentConsensusBlockNumber := cp.GetLatestBlockNumber() +- +- // get the candidates for the consensus group +- candidates := cp.getConsensusCandidates() +- +- // update the lowest latest block number and hash +- // the lowest safe block number +- // the lowest finalized block number +- var lowestLatestBlock hexutil.Uint64 +- var lowestLatestBlockHash string +- var lowestFinalizedBlock hexutil.Uint64 +- var lowestSafeBlock hexutil.Uint64 +- for _, bs := range candidates { +- if lowestLatestBlock == 0 || bs.latestBlockNumber < lowestLatestBlock { +- lowestLatestBlock = bs.latestBlockNumber +- lowestLatestBlockHash = bs.latestBlockHash +- } +- if lowestFinalizedBlock == 0 || bs.finalizedBlockNumber < lowestFinalizedBlock { +- lowestFinalizedBlock = bs.finalizedBlockNumber +- } +- if lowestSafeBlock == 0 || bs.safeBlockNumber < lowestSafeBlock { +- lowestSafeBlock = bs.safeBlockNumber +- } +- } +- +- // find the proposed block among the candidates +- // the proposed block needs have the same hash in the entire consensus group +- proposedBlock := lowestLatestBlock +- proposedBlockHash := lowestLatestBlockHash +- hasConsensus := false +- broken := false +- +- if lowestLatestBlock > currentConsensusBlockNumber { +- log.Debug("validating consensus on block", "lowestLatestBlock", lowestLatestBlock) +- } +- +- // if there is a block to propose, check if it is the same in all backends +- if proposedBlock > 0 { +- for !hasConsensus { +- allAgreed := true +- for be := range candidates { +- actualBlockNumber, actualBlockHash, err := cp.fetchBlock(ctx, be, proposedBlock.String()) +- if err != nil { +- log.Warn("error updating backend", "name", be.Name, "err", err) +- continue +- } +- if proposedBlockHash == "" { +- proposedBlockHash = actualBlockHash +- } +- blocksDontMatch := (actualBlockNumber != proposedBlock) || (actualBlockHash != proposedBlockHash) +- if blocksDontMatch { +- if currentConsensusBlockNumber >= actualBlockNumber { +- log.Warn("backend broke consensus", +- "name", be.Name, +- "actualBlockNumber", actualBlockNumber, +- "actualBlockHash", actualBlockHash, +- "proposedBlock", proposedBlock, +- "proposedBlockHash", proposedBlockHash) +- broken = true +- } +- allAgreed = false +- break +- } +- } +- if allAgreed { +- hasConsensus = true +- } else { +- // walk one block behind and try again +- proposedBlock -= 1 +- proposedBlockHash = "" +- log.Debug("no consensus, now trying", "block:", proposedBlock) +- } +- } +- } +- +- if broken { +- // propagate event to other interested parts, such as cache invalidator +- for _, l := range cp.listeners { +- l() +- } +- log.Info("consensus broken", +- "currentConsensusBlockNumber", currentConsensusBlockNumber, +- "proposedBlock", proposedBlock, +- "proposedBlockHash", proposedBlockHash) +- } +- +- // update tracker +- cp.tracker.SetLatestBlockNumber(proposedBlock) +- cp.tracker.SetSafeBlockNumber(lowestSafeBlock) +- cp.tracker.SetFinalizedBlockNumber(lowestFinalizedBlock) +- +- // update consensus group +- group := make([]*Backend, 0, len(candidates)) +- consensusBackendsNames := make([]string, 0, len(candidates)) +- filteredBackendsNames := make([]string, 0, len(cp.backendGroup.Backends)) +- for _, be := range cp.backendGroup.Backends { +- _, exist := candidates[be] +- if exist { +- group = append(group, be) +- consensusBackendsNames = append(consensusBackendsNames, be.Name) +- } else { +- filteredBackendsNames = append(filteredBackendsNames, be.Name) +- } +- } +- +- cp.consensusGroupMux.Lock() +- cp.consensusGroup = group +- cp.consensusGroupMux.Unlock() +- +- RecordGroupConsensusLatestBlock(cp.backendGroup, proposedBlock) +- RecordGroupConsensusSafeBlock(cp.backendGroup, lowestSafeBlock) +- RecordGroupConsensusFinalizedBlock(cp.backendGroup, lowestFinalizedBlock) +- +- RecordGroupConsensusCount(cp.backendGroup, len(group)) +- RecordGroupConsensusFilteredCount(cp.backendGroup, len(filteredBackendsNames)) +- RecordGroupTotalCount(cp.backendGroup, len(cp.backendGroup.Backends)) +- +- log.Debug("group state", +- "proposedBlock", proposedBlock, +- "consensusBackends", strings.Join(consensusBackendsNames, ", "), +- "filteredBackends", strings.Join(filteredBackendsNames, ", ")) +-} +- +-// IsBanned checks if a specific backend is banned +-func (cp *ConsensusPoller) IsBanned(be *Backend) bool { +- bs := cp.backendState[be] +- defer bs.backendStateMux.Unlock() +- bs.backendStateMux.Lock() +- return bs.IsBanned() +-} +- +-// Ban bans a specific backend +-func (cp *ConsensusPoller) Ban(be *Backend) { +- if be.forcedCandidate { +- return +- } +- +- bs := cp.backendState[be] +- defer bs.backendStateMux.Unlock() +- bs.backendStateMux.Lock() +- bs.bannedUntil = time.Now().Add(cp.banPeriod) +- +- // when we ban a node, we give it the chance to start from any block when it is back +- bs.latestBlockNumber = 0 +- bs.safeBlockNumber = 0 +- bs.finalizedBlockNumber = 0 +-} +- +-// Unban removes any bans from the backends +-func (cp *ConsensusPoller) Unban(be *Backend) { +- bs := cp.backendState[be] +- defer bs.backendStateMux.Unlock() +- bs.backendStateMux.Lock() +- bs.bannedUntil = time.Now().Add(-10 * time.Hour) +-} +- +-// Reset reset all backend states +-func (cp *ConsensusPoller) Reset() { +- for _, be := range cp.backendGroup.Backends { +- cp.backendState[be] = &backendState{} +- } +-} +- +-// fetchBlock is a convenient wrapper to make a request to get a block directly from the backend +-func (cp *ConsensusPoller) fetchBlock(ctx context.Context, be *Backend, block string) (blockNumber hexutil.Uint64, blockHash string, err error) { +- var rpcRes RPCRes +- err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_getBlockByNumber", block, false) +- if err != nil { +- return 0, "", err +- } +- +- jsonMap, ok := rpcRes.Result.(map[string]interface{}) +- if !ok { +- return 0, "", fmt.Errorf("unexpected response to eth_getBlockByNumber on backend %s", be.Name) +- } +- blockNumber = hexutil.Uint64(hexutil.MustDecodeUint64(jsonMap["number"].(string))) +- blockHash = jsonMap["hash"].(string) +- +- return +-} +- +-// getPeerCount is a convenient wrapper to retrieve the current peer count from the backend +-func (cp *ConsensusPoller) getPeerCount(ctx context.Context, be *Backend) (count uint64, err error) { +- var rpcRes RPCRes +- err = be.ForwardRPC(ctx, &rpcRes, "67", "net_peerCount") +- if err != nil { +- return 0, err +- } +- +- jsonMap, ok := rpcRes.Result.(string) +- if !ok { +- return 0, fmt.Errorf("unexpected response to net_peerCount on backend %s", be.Name) +- } +- +- count = hexutil.MustDecodeUint64(jsonMap) +- +- return count, nil +-} +- +-// isInSync is a convenient wrapper to check if the backend is in sync from the network +-func (cp *ConsensusPoller) isInSync(ctx context.Context, be *Backend) (result bool, err error) { +- var rpcRes RPCRes +- err = be.ForwardRPC(ctx, &rpcRes, "67", "eth_syncing") +- if err != nil { +- return false, err +- } +- +- var res bool +- switch typed := rpcRes.Result.(type) { +- case bool: +- syncing := typed +- res = !syncing +- case string: +- syncing, err := strconv.ParseBool(typed) +- if err != nil { +- return false, err +- } +- res = !syncing +- default: +- // result is a json when not in sync +- res = false +- } +- +- return res, nil +-} +- +-// getBackendState creates a copy of backend state so that the caller can use it without locking +-func (cp *ConsensusPoller) getBackendState(be *Backend) *backendState { +- bs := cp.backendState[be] +- defer bs.backendStateMux.Unlock() +- bs.backendStateMux.Lock() +- +- return &backendState{ +- latestBlockNumber: bs.latestBlockNumber, +- latestBlockHash: bs.latestBlockHash, +- safeBlockNumber: bs.safeBlockNumber, +- finalizedBlockNumber: bs.finalizedBlockNumber, +- peerCount: bs.peerCount, +- inSync: bs.inSync, +- lastUpdate: bs.lastUpdate, +- bannedUntil: bs.bannedUntil, +- } +-} +- +-func (cp *ConsensusPoller) GetLastUpdate(be *Backend) time.Time { +- bs := cp.backendState[be] +- defer bs.backendStateMux.Unlock() +- bs.backendStateMux.Lock() +- return bs.lastUpdate +-} +- +-func (cp *ConsensusPoller) setBackendState(be *Backend, peerCount uint64, inSync bool, +- latestBlockNumber hexutil.Uint64, latestBlockHash string, +- safeBlockNumber hexutil.Uint64, +- finalizedBlockNumber hexutil.Uint64) bool { +- bs := cp.backendState[be] +- bs.backendStateMux.Lock() +- changed := bs.latestBlockHash != latestBlockHash +- bs.peerCount = peerCount +- bs.inSync = inSync +- bs.latestBlockNumber = latestBlockNumber +- bs.latestBlockHash = latestBlockHash +- bs.finalizedBlockNumber = finalizedBlockNumber +- bs.safeBlockNumber = safeBlockNumber +- bs.lastUpdate = time.Now() +- bs.backendStateMux.Unlock() +- return changed +-} +- +-// getConsensusCandidates will search for candidates in the primary group, +-// if there are none it will search for candidates in he fallback group +-func (cp *ConsensusPoller) getConsensusCandidates() map[*Backend]*backendState { +- +- healthyPrimaries := cp.FilterCandidates(cp.backendGroup.Primaries()) +- +- RecordHealthyCandidates(cp.backendGroup, len(healthyPrimaries)) +- if len(healthyPrimaries) > 0 { +- return healthyPrimaries +- } +- +- return cp.FilterCandidates(cp.backendGroup.Fallbacks()) +-} +- +-// filterCandidates find out what backends are the candidates to be in the consensus group +-// and create a copy of current their state +-// +-// a candidate is a serving node within the following conditions: +-// - not banned +-// - healthy (network latency and error rate) +-// - with minimum peer count +-// - in sync +-// - updated recently +-// - not lagging latest block +-func (cp *ConsensusPoller) FilterCandidates(backends []*Backend) map[*Backend]*backendState { +- +- candidates := make(map[*Backend]*backendState, len(cp.backendGroup.Backends)) +- +- for _, be := range backends { +- +- bs := cp.getBackendState(be) +- if be.forcedCandidate { +- candidates[be] = bs +- continue +- } +- if bs.IsBanned() { +- continue +- } +- if !be.IsHealthy() { +- continue +- } +- if !be.skipPeerCountCheck && bs.peerCount < cp.minPeerCount { +- log.Debug("backend peer count too low for inclusion in consensus", +- "backend_name", be.Name, +- "peer_count", bs.peerCount, +- "min_peer_count", cp.minPeerCount, +- ) +- continue +- } +- if !bs.inSync { +- continue +- } +- if bs.lastUpdate.Add(cp.maxUpdateThreshold).Before(time.Now()) { +- continue +- } +- +- candidates[be] = bs +- } +- +- // find the highest block, in order to use it defining the highest non-lagging ancestor block +- var highestLatestBlock hexutil.Uint64 +- for _, bs := range candidates { +- if bs.latestBlockNumber > highestLatestBlock { +- highestLatestBlock = bs.latestBlockNumber +- } +- } +- +- // find the highest common ancestor block +- lagging := make([]*Backend, 0, len(candidates)) +- for be, bs := range candidates { +- // check if backend is lagging behind the highest block +- if uint64(highestLatestBlock-bs.latestBlockNumber) > cp.maxBlockLag { +- lagging = append(lagging, be) +- } +- } +- +- // remove lagging backends from the candidates +- for _, be := range lagging { +- delete(candidates, be) +- } +- +- return candidates +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-356
+ +
+ +
+
+
diff --git OP/proxyd/consensus_tracker.go CELO/proxyd/consensus_tracker.go +deleted file mode 100644 +index 77e0fdba99125b1686de6cd83162be6cedfa7936..0000000000000000000000000000000000000000 +--- OP/proxyd/consensus_tracker.go ++++ /dev/null +@@ -1,356 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "encoding/json" +- "fmt" +- "os" +- "sync" +- "time" +- +- "github.com/ethereum/go-ethereum/common/hexutil" +- "github.com/ethereum/go-ethereum/log" +- "github.com/go-redsync/redsync/v4" +- "github.com/go-redsync/redsync/v4/redis/goredis/v9" +- "github.com/redis/go-redis/v9" +-) +- +-// ConsensusTracker abstracts how we store and retrieve the current consensus +-// allowing it to be stored locally in-memory or in a shared Redis cluster +-type ConsensusTracker interface { +- GetLatestBlockNumber() hexutil.Uint64 +- SetLatestBlockNumber(blockNumber hexutil.Uint64) +- GetSafeBlockNumber() hexutil.Uint64 +- SetSafeBlockNumber(blockNumber hexutil.Uint64) +- GetFinalizedBlockNumber() hexutil.Uint64 +- SetFinalizedBlockNumber(blockNumber hexutil.Uint64) +-} +- +-// DTO to hold the current consensus state +-type ConsensusTrackerState struct { +- Latest hexutil.Uint64 `json:"latest"` +- Safe hexutil.Uint64 `json:"safe"` +- Finalized hexutil.Uint64 `json:"finalized"` +-} +- +-func (ct *InMemoryConsensusTracker) update(o *ConsensusTrackerState) { +- ct.mutex.Lock() +- defer ct.mutex.Unlock() +- +- ct.state.Latest = o.Latest +- ct.state.Safe = o.Safe +- ct.state.Finalized = o.Finalized +-} +- +-// InMemoryConsensusTracker store and retrieve in memory, async-safe +-type InMemoryConsensusTracker struct { +- mutex sync.Mutex +- state *ConsensusTrackerState +-} +- +-func NewInMemoryConsensusTracker() ConsensusTracker { +- return &InMemoryConsensusTracker{ +- mutex: sync.Mutex{}, +- state: &ConsensusTrackerState{}, +- } +-} +- +-func (ct *InMemoryConsensusTracker) Valid() bool { +- return ct.GetLatestBlockNumber() > 0 && +- ct.GetSafeBlockNumber() > 0 && +- ct.GetFinalizedBlockNumber() > 0 +-} +- +-func (ct *InMemoryConsensusTracker) Behind(other *InMemoryConsensusTracker) bool { +- return ct.GetLatestBlockNumber() < other.GetLatestBlockNumber() || +- ct.GetSafeBlockNumber() < other.GetSafeBlockNumber() || +- ct.GetFinalizedBlockNumber() < other.GetFinalizedBlockNumber() +-} +- +-func (ct *InMemoryConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 { +- defer ct.mutex.Unlock() +- ct.mutex.Lock() +- +- return ct.state.Latest +-} +- +-func (ct *InMemoryConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) { +- defer ct.mutex.Unlock() +- ct.mutex.Lock() +- +- ct.state.Latest = blockNumber +-} +- +-func (ct *InMemoryConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 { +- defer ct.mutex.Unlock() +- ct.mutex.Lock() +- +- return ct.state.Safe +-} +- +-func (ct *InMemoryConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) { +- defer ct.mutex.Unlock() +- ct.mutex.Lock() +- +- ct.state.Safe = blockNumber +-} +- +-func (ct *InMemoryConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 { +- defer ct.mutex.Unlock() +- ct.mutex.Lock() +- +- return ct.state.Finalized +-} +- +-func (ct *InMemoryConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) { +- defer ct.mutex.Unlock() +- ct.mutex.Lock() +- +- ct.state.Finalized = blockNumber +-} +- +-// RedisConsensusTracker store and retrieve in a shared Redis cluster, with leader election +-type RedisConsensusTracker struct { +- ctx context.Context +- client *redis.Client +- namespace string +- backendGroup *BackendGroup +- +- redlock *redsync.Mutex +- lockPeriod time.Duration +- heartbeatInterval time.Duration +- +- leader bool +- leaderName string +- +- // holds the state collected by local pollers +- local *InMemoryConsensusTracker +- +- // holds a copy of the remote shared state +- // when leader, updates the remote with the local state +- remote *InMemoryConsensusTracker +-} +- +-type RedisConsensusTrackerOpt func(cp *RedisConsensusTracker) +- +-func WithLockPeriod(lockPeriod time.Duration) RedisConsensusTrackerOpt { +- return func(ct *RedisConsensusTracker) { +- ct.lockPeriod = lockPeriod +- } +-} +- +-func WithHeartbeatInterval(heartbeatInterval time.Duration) RedisConsensusTrackerOpt { +- return func(ct *RedisConsensusTracker) { +- ct.heartbeatInterval = heartbeatInterval +- } +-} +-func NewRedisConsensusTracker(ctx context.Context, +- redisClient *redis.Client, +- bg *BackendGroup, +- namespace string, +- opts ...RedisConsensusTrackerOpt) ConsensusTracker { +- +- tracker := &RedisConsensusTracker{ +- ctx: ctx, +- client: redisClient, +- backendGroup: bg, +- namespace: namespace, +- +- lockPeriod: 30 * time.Second, +- heartbeatInterval: 2 * time.Second, +- local: NewInMemoryConsensusTracker().(*InMemoryConsensusTracker), +- remote: NewInMemoryConsensusTracker().(*InMemoryConsensusTracker), +- } +- +- for _, opt := range opts { +- opt(tracker) +- } +- +- return tracker +-} +- +-func (ct *RedisConsensusTracker) Init() { +- go func() { +- for { +- timer := time.NewTimer(ct.heartbeatInterval) +- ct.stateHeartbeat() +- +- select { +- case <-timer.C: +- continue +- case <-ct.ctx.Done(): +- timer.Stop() +- return +- } +- } +- }() +-} +- +-func (ct *RedisConsensusTracker) stateHeartbeat() { +- pool := goredis.NewPool(ct.client) +- rs := redsync.New(pool) +- key := ct.key("mutex") +- +- val, err := ct.client.Get(ct.ctx, key).Result() +- if err != nil && err != redis.Nil { +- log.Error("failed to read the lock", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "read_lock", err) +- if ct.leader { +- ok, err := ct.redlock.Unlock() +- if err != nil || !ok { +- log.Error("failed to release the lock after error", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "leader_release_lock", err) +- return +- } +- ct.leader = false +- } +- return +- } +- if val != "" { +- if ct.leader { +- log.Debug("extending lock") +- ok, err := ct.redlock.Extend() +- if err != nil || !ok { +- log.Error("failed to extend lock", "err", err, "mutex", ct.redlock.Name(), "val", ct.redlock.Value()) +- RecordGroupConsensusError(ct.backendGroup, "leader_extend_lock", err) +- ok, err := ct.redlock.Unlock() +- if err != nil || !ok { +- log.Error("failed to release the lock after error", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "leader_release_lock", err) +- return +- } +- ct.leader = false +- return +- } +- ct.postPayload(val) +- } else { +- // retrieve current leader +- leaderName, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("leader:%s", val))).Result() +- if err != nil && err != redis.Nil { +- log.Error("failed to read the remote leader", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "read_leader", err) +- return +- } +- ct.leaderName = leaderName +- log.Debug("following", "val", val, "leader", leaderName) +- // retrieve payload +- val, err := ct.client.Get(ct.ctx, ct.key(fmt.Sprintf("state:%s", val))).Result() +- if err != nil && err != redis.Nil { +- log.Error("failed to read the remote state", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "read_state", err) +- return +- } +- if val == "" { +- log.Error("remote state is missing (recent leader election maybe?)") +- RecordGroupConsensusError(ct.backendGroup, "read_state_missing", err) +- return +- } +- state := &ConsensusTrackerState{} +- err = json.Unmarshal([]byte(val), state) +- if err != nil { +- log.Error("failed to unmarshal the remote state", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "read_unmarshal_state", err) +- return +- } +- +- ct.remote.update(state) +- log.Debug("updated state from remote", "state", val, "leader", leaderName) +- +- RecordGroupConsensusHALatestBlock(ct.backendGroup, leaderName, ct.remote.state.Latest) +- RecordGroupConsensusHASafeBlock(ct.backendGroup, leaderName, ct.remote.state.Safe) +- RecordGroupConsensusHAFinalizedBlock(ct.backendGroup, leaderName, ct.remote.state.Finalized) +- } +- } else { +- if !ct.local.Valid() { +- log.Warn("local state is not valid or behind remote, skipping") +- return +- } +- if ct.remote.Valid() && ct.local.Behind(ct.remote) { +- log.Warn("local state is behind remote, skipping") +- return +- } +- +- log.Info("lock not found, creating a new one") +- +- mutex := rs.NewMutex(key, +- redsync.WithExpiry(ct.lockPeriod), +- redsync.WithFailFast(true), +- redsync.WithTries(1)) +- +- // nosemgrep: missing-unlock-before-return +- // this lock is hold indefinitely, and it is extended until the leader dies +- if err := mutex.Lock(); err != nil { +- log.Debug("failed to obtain lock", "err", err) +- ct.leader = false +- return +- } +- +- log.Info("lock acquired", "mutex", mutex.Name(), "val", mutex.Value()) +- ct.redlock = mutex +- ct.leader = true +- ct.postPayload(mutex.Value()) +- } +-} +- +-func (ct *RedisConsensusTracker) key(tag string) string { +- return fmt.Sprintf("consensus:%s:%s", ct.namespace, tag) +-} +- +-func (ct *RedisConsensusTracker) GetLatestBlockNumber() hexutil.Uint64 { +- return ct.remote.GetLatestBlockNumber() +-} +- +-func (ct *RedisConsensusTracker) SetLatestBlockNumber(blockNumber hexutil.Uint64) { +- ct.local.SetLatestBlockNumber(blockNumber) +-} +- +-func (ct *RedisConsensusTracker) GetSafeBlockNumber() hexutil.Uint64 { +- return ct.remote.GetSafeBlockNumber() +-} +- +-func (ct *RedisConsensusTracker) SetSafeBlockNumber(blockNumber hexutil.Uint64) { +- ct.local.SetSafeBlockNumber(blockNumber) +-} +- +-func (ct *RedisConsensusTracker) GetFinalizedBlockNumber() hexutil.Uint64 { +- return ct.remote.GetFinalizedBlockNumber() +-} +- +-func (ct *RedisConsensusTracker) SetFinalizedBlockNumber(blockNumber hexutil.Uint64) { +- ct.local.SetFinalizedBlockNumber(blockNumber) +-} +- +-func (ct *RedisConsensusTracker) postPayload(mutexVal string) { +- jsonState, err := json.Marshal(ct.local.state) +- if err != nil { +- log.Error("failed to marshal local", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "leader_marshal_local_state", err) +- ct.leader = false +- return +- } +- err = ct.client.Set(ct.ctx, ct.key(fmt.Sprintf("state:%s", mutexVal)), jsonState, ct.lockPeriod).Err() +- if err != nil { +- log.Error("failed to post the state", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "leader_post_state", err) +- ct.leader = false +- return +- } +- +- leader, _ := os.LookupEnv("HOSTNAME") +- err = ct.client.Set(ct.ctx, ct.key(fmt.Sprintf("leader:%s", mutexVal)), leader, ct.lockPeriod).Err() +- if err != nil { +- log.Error("failed to post the leader", "err", err) +- RecordGroupConsensusError(ct.backendGroup, "leader_post_leader", err) +- ct.leader = false +- return +- } +- +- log.Debug("posted state", "state", string(jsonState), "leader", leader) +- +- ct.leaderName = leader +- ct.remote.update(ct.local.state) +- +- RecordGroupConsensusHALatestBlock(ct.backendGroup, leader, ct.remote.state.Latest) +- RecordGroupConsensusHASafeBlock(ct.backendGroup, leader, ct.remote.state.Safe) +- RecordGroupConsensusHAFinalizedBlock(ct.backendGroup, leader, ct.remote.state.Finalized) +-}
@@ -6978,13 +32563,13 @@
- (new) + OP
- CELO + (deleted)
@@ -6994,122 +32579,442 @@
-
+92
-
-0
+
+0
+
-5
-
diff --git OP/packages/contracts-bedrock/snapshots/abi/UsingRegistry.json CELO/packages/contracts-bedrock/snapshots/abi/UsingRegistry.json -new file mode 100644 -index 0000000000000000000000000000000000000000..dc8fa7e0f21ca37add36efa01627337c9521293c ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/abi/UsingRegistry.json -@@ -0,0 +1,93 @@ -+[ -+ { -+ "inputs": [], -+ "name": "owner", -+ "outputs": [ -+ { -+ "internalType": "address", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "registry", -+ "outputs": [ -+ { -+ "internalType": "contract ICeloRegistry", -+ "name": "", -+ "type": "address" -+ } -+ ], -+ "stateMutability": "view", -+ "type": "function" -+ }, -+ { -+ "inputs": [], -+ "name": "renounceOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "registryAddress", -+ "type": "address" -+ } -+ ], -+ "name": "setRegistry", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "inputs": [ -+ { -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "transferOwnership", -+ "outputs": [], -+ "stateMutability": "nonpayable", -+ "type": "function" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "previousOwner", -+ "type": "address" -+ }, -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "newOwner", -+ "type": "address" -+ } -+ ], -+ "name": "OwnershipTransferred", -+ "type": "event" -+ }, -+ { -+ "anonymous": false, -+ "inputs": [ -+ { -+ "indexed": true, -+ "internalType": "address", -+ "name": "registryAddress", -+ "type": "address" -+ } -+ ], -+ "name": "RegistrySet", -+ "type": "event" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/entrypoint.sh CELO/proxyd/entrypoint.sh +deleted file mode 100644 +index ef83fa8e47d4f3383e698c9faf2b99d1d7c30f01..0000000000000000000000000000000000000000 +--- OP/proxyd/entrypoint.sh ++++ /dev/null +@@ -1,6 +0,0 @@ +-#!/bin/sh +- +-echo "Updating CA certificates." +-update-ca-certificates +-echo "Running CMD." +-exec "$@" +\ No newline at end of file
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-7
+ +
+ +
+
+
diff --git OP/proxyd/errors.go CELO/proxyd/errors.go +deleted file mode 100644 +index 51f8df6ddebb137aa5cfdd57611df6677101fab6..0000000000000000000000000000000000000000 +--- OP/proxyd/errors.go ++++ /dev/null +@@ -1,7 +0,0 @@ +-package proxyd +- +-import "fmt" +- +-func wrapErr(err error, msg string) error { +- return fmt.Errorf("%s %w", msg, err) +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-123
+ +
+ +
+
+
diff --git OP/proxyd/example.config.toml CELO/proxyd/example.config.toml +deleted file mode 100644 +index b54b342f5189b944c1794d1595cfb58a7410eed7..0000000000000000000000000000000000000000 +--- OP/proxyd/example.config.toml ++++ /dev/null +@@ -1,123 +0,0 @@ +-# List of WS methods to whitelist. +-ws_method_whitelist = [ +- "eth_subscribe", +- "eth_call", +- "eth_chainId" +-] +-# Enable WS on this backend group. There can only be one WS-enabled backend group. +-ws_backend_group = "main" +- +-[server] +-# Host for the proxyd RPC server to listen on. +-rpc_host = "0.0.0.0" +-# Port for the above. +-rpc_port = 8080 +-# Host for the proxyd WS server to listen on. +-ws_host = "0.0.0.0" +-# Port for the above +-# Set the ws_port to 0 to disable WS +-ws_port = 8085 +-# Maximum client body size, in bytes, that the server will accept. +-max_body_size_bytes = 10485760 +-max_concurrent_rpcs = 1000 +-# Server log level +-log_level = "info" +- +-[redis] +-# URL to a Redis instance. +-url = "redis://localhost:6379" +- +-[metrics] +-# Whether or not to enable Prometheus metrics. +-enabled = true +-# Host for the Prometheus metrics endpoint to listen on. +-host = "0.0.0.0" +-# Port for the above. +-port = 9761 +- +-[backend] +-# How long proxyd should wait for a backend response before timing out. +-response_timeout_seconds = 5 +-# Maximum response size, in bytes, that proxyd will accept from a backend. +-max_response_size_bytes = 5242880 +-# Maximum number of times proxyd will try a backend before giving up. +-max_retries = 3 +-# Number of seconds to wait before trying an unhealthy backend again. +-out_of_service_seconds = 600 +-# Maximum latency accepted to serve requests, default 10s +-max_latency_threshold = "30s" +-# Maximum latency accepted to serve requests before degraded, default 5s +-max_degraded_latency_threshold = "10s" +-# Maximum error rate accepted to serve requests, default 0.5 (i.e. 50%) +-max_error_rate_threshold = 0.3 +- +-[backends] +-# A map of backends by name. +-[backends.infura] +-# The URL to contact the backend at. Will be read from the environment +-# if an environment variable prefixed with $ is provided. +-rpc_url = "" +-# The WS URL to contact the backend at. Will be read from the environment +-# if an environment variable prefixed with $ is provided. +-ws_url = "" +-username = "" +-# An HTTP Basic password to authenticate with the backend. Will be read from +-# the environment if an environment variable prefixed with $ is provided. +-password = "" +-max_rps = 3 +-max_ws_conns = 1 +-# Path to a custom root CA. +-ca_file = "" +-# Path to a custom client cert file. +-client_cert_file = "" +-# Path to a custom client key file. +-client_key_file = "" +-# Allows backends to skip peer count checking, default false +-# consensus_skip_peer_count = true +-# Specified the target method to get receipts, default "debug_getRawReceipts" +-# See https://github.com/ethereum-optimism/optimism/blob/186e46a47647a51a658e699e9ff047d39444c2de/op-node/sources/receipts.go#L186-L253 +-consensus_receipts_target = "eth_getBlockReceipts" +- +-[backends.alchemy] +-rpc_url = "" +-ws_url = "" +-username = "" +-password = "" +-max_rps = 3 +-max_ws_conns = 1 +-consensus_receipts_target = "alchemy_getTransactionReceipts" +- +-[backend_groups] +-[backend_groups.main] +-backends = ["infura"] +-# Enable consensus awareness for backend group, making it act as a load balancer, default false +-# consensus_aware = true +-# Period in which the backend wont serve requests if banned, default 5m +-# consensus_ban_period = "1m" +-# Maximum delay for update the backend, default 30s +-# consensus_max_update_threshold = "20s" +-# Maximum block lag, default 8 +-# consensus_max_block_lag = 16 +-# Maximum block range (for eth_getLogs method), no default +-# consensus_max_block_range = 20000 +-# Minimum peer count, default 3 +-# consensus_min_peer_count = 4 +- +-[backend_groups.alchemy] +-backends = ["alchemy"] +- +-# If the authentication group below is in the config, +-# proxyd will only accept authenticated requests. +-[authentication] +-# Mapping of auth key to alias. The alias is used to provide a human- +-# readable name for the auth key in monitoring. The auth key will be +-# read from the environment if an environment variable prefixed with $ +-# is provided. Note that you will need to quote the environment variable +-# in order for it to be value TOML, e.g. "$FOO_AUTH_KEY" = "foo_alias". +-secret = "test" +- +-# Mapping of methods to backend groups. +-[rpc_method_mappings] +-eth_call = "main" +-eth_chainId = "main" +-eth_blockNumber = "alchemy"
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-139
+ +
+ +
+
+
diff --git OP/proxyd/frontend_rate_limiter.go CELO/proxyd/frontend_rate_limiter.go +deleted file mode 100644 +index d0590f0561da160d7a1846980d19ffc247056a01..0000000000000000000000000000000000000000 +--- OP/proxyd/frontend_rate_limiter.go ++++ /dev/null +@@ -1,139 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "fmt" +- "sync" +- "time" +- +- "github.com/redis/go-redis/v9" +-) +- +-type FrontendRateLimiter interface { +- // Take consumes a key, and a maximum number of requests +- // per time interval. It returns a boolean denoting if +- // the limit could be taken, or an error if a failure +- // occurred in the backing rate limit implementation. +- // +- // No error will be returned if the limit could not be taken +- // as a result of the requestor being over the limit. +- Take(ctx context.Context, key string) (bool, error) +-} +- +-// limitedKeys is a wrapper around a map that stores a truncated +-// timestamp and a mutex. The map is used to keep track of rate +-// limit keys, and their used limits. +-type limitedKeys struct { +- truncTS int64 +- keys map[string]int +- mtx sync.Mutex +-} +- +-func newLimitedKeys(t int64) *limitedKeys { +- return &limitedKeys{ +- truncTS: t, +- keys: make(map[string]int), +- } +-} +- +-func (l *limitedKeys) Take(key string, max int) bool { +- l.mtx.Lock() +- defer l.mtx.Unlock() +- val, ok := l.keys[key] +- if !ok { +- l.keys[key] = 0 +- val = 0 +- } +- l.keys[key] = val + 1 +- return val < max +-} +- +-// MemoryFrontendRateLimiter is a rate limiter that stores +-// all rate limiting information in local memory. It works +-// by storing a limitedKeys struct that references the +-// truncated timestamp at which the struct was created. If +-// the current truncated timestamp doesn't match what's +-// referenced, the limit is reset. Otherwise, values in +-// a map are incremented to represent the limit. +-type MemoryFrontendRateLimiter struct { +- currGeneration *limitedKeys +- dur time.Duration +- max int +- mtx sync.Mutex +-} +- +-func NewMemoryFrontendRateLimit(dur time.Duration, max int) FrontendRateLimiter { +- return &MemoryFrontendRateLimiter{ +- dur: dur, +- max: max, +- } +-} +- +-func (m *MemoryFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) { +- m.mtx.Lock() +- // Create truncated timestamp +- truncTS := truncateNow(m.dur) +- +- // If there is no current rate limit map or the rate limit map reference +- // a different timestamp, reset limits. +- if m.currGeneration == nil || m.currGeneration.truncTS != truncTS { +- m.currGeneration = newLimitedKeys(truncTS) +- } +- +- // Pull out the limiter so we can unlock before incrementing the limit. +- limiter := m.currGeneration +- +- m.mtx.Unlock() +- +- return limiter.Take(key, m.max), nil +-} +- +-// RedisFrontendRateLimiter is a rate limiter that stores data in Redis. +-// It uses the basic rate limiter pattern described on the Redis best +-// practices website: https://redis.com/redis-best-practices/basic-rate-limiting/. +-type RedisFrontendRateLimiter struct { +- r *redis.Client +- dur time.Duration +- max int +- prefix string +-} +- +-func NewRedisFrontendRateLimiter(r *redis.Client, dur time.Duration, max int, prefix string) FrontendRateLimiter { +- return &RedisFrontendRateLimiter{ +- r: r, +- dur: dur, +- max: max, +- prefix: prefix, +- } +-} +- +-func (r *RedisFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) { +- var incr *redis.IntCmd +- truncTS := truncateNow(r.dur) +- fullKey := fmt.Sprintf("rate_limit:%s:%s:%d", r.prefix, key, truncTS) +- _, err := r.r.Pipelined(ctx, func(pipe redis.Pipeliner) error { +- incr = pipe.Incr(ctx, fullKey) +- pipe.PExpire(ctx, fullKey, r.dur-time.Millisecond) +- return nil +- }) +- if err != nil { +- frontendRateLimitTakeErrors.Inc() +- return false, err +- } +- +- return incr.Val()-1 < int64(r.max), nil +-} +- +-type noopFrontendRateLimiter struct{} +- +-var NoopFrontendRateLimiter = &noopFrontendRateLimiter{} +- +-func (n *noopFrontendRateLimiter) Take(ctx context.Context, key string) (bool, error) { +- return true, nil +-} +- +-// truncateNow truncates the current timestamp +-// to the specified duration. +-func truncateNow(dur time.Duration) int64 { +- return time.Now().Truncate(dur).Unix() +-}
@@ -7118,13 +33023,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7135,29 +33040,80 @@
+0
-
-0
+
-53
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/CalledByVm.json CELO/packages/contracts-bedrock/snapshots/storageLayout/CalledByVm.json -new file mode 100644 -index 0000000000000000000000000000000000000000..0637a088a01e8ddab3bf3fa98dbe804cbde1a0dc ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/CalledByVm.json -@@ -0,0 +1 @@ -+[] -\ No newline at end of file
+
diff --git OP/proxyd/frontend_rate_limiter_test.go CELO/proxyd/frontend_rate_limiter_test.go +deleted file mode 100644 +index fb5f808bb5ecaf85727398968faa6df79b848d5d..0000000000000000000000000000000000000000 +--- OP/proxyd/frontend_rate_limiter_test.go ++++ /dev/null +@@ -1,53 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "fmt" +- "testing" +- "time" +- +- "github.com/alicebob/miniredis" +- "github.com/redis/go-redis/v9" +- "github.com/stretchr/testify/require" +-) +- +-func TestFrontendRateLimiter(t *testing.T) { +- redisServer, err := miniredis.Run() +- require.NoError(t, err) +- defer redisServer.Close() +- +- redisClient := redis.NewClient(&redis.Options{ +- Addr: fmt.Sprintf("127.0.0.1:%s", redisServer.Port()), +- }) +- +- max := 2 +- lims := []struct { +- name string +- frl FrontendRateLimiter +- }{ +- {"memory", NewMemoryFrontendRateLimit(2*time.Second, max)}, +- {"redis", NewRedisFrontendRateLimiter(redisClient, 2*time.Second, max, "")}, +- } +- +- for _, cfg := range lims { +- frl := cfg.frl +- ctx := context.Background() +- t.Run(cfg.name, func(t *testing.T) { +- for i := 0; i < 4; i++ { +- ok, err := frl.Take(ctx, "foo") +- require.NoError(t, err) +- require.Equal(t, i < max, ok) +- ok, err = frl.Take(ctx, "bar") +- require.NoError(t, err) +- require.Equal(t, i < max, ok) +- } +- time.Sleep(2 * time.Second) +- for i := 0; i < 4; i++ { +- ok, _ := frl.Take(ctx, "foo") +- require.Equal(t, i < max, ok) +- ok, _ = frl.Take(ctx, "bar") +- require.Equal(t, i < max, ok) +- } +- }) +- } +-}
@@ -7166,13 +33122,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7182,52 +33138,114 @@
-
+22
-
-0
+
+0
+
-86
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/CeloRegistry.json CELO/packages/contracts-bedrock/snapshots/storageLayout/CeloRegistry.json -new file mode 100644 -index 0000000000000000000000000000000000000000..17b0df2bd7f9e8254e7ac4730d34917b70b3063b ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/CeloRegistry.json -@@ -0,0 +1,23 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 20, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "32", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "mapping(bytes32 => address)" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/go.mod CELO/proxyd/go.mod +deleted file mode 100644 +index 088bf9bc9ed3e2dcd41ed5b4c4eae85881b01925..0000000000000000000000000000000000000000 +--- OP/proxyd/go.mod ++++ /dev/null +@@ -1,86 +0,0 @@ +-module github.com/ethereum-optimism/optimism/proxyd +- +-go 1.21 +- +-require ( +- github.com/BurntSushi/toml v1.3.2 +- github.com/alicebob/miniredis v2.5.0+incompatible +- github.com/emirpasic/gods v1.18.1 +- github.com/ethereum/go-ethereum v1.13.15 +- github.com/go-redsync/redsync/v4 v4.10.0 +- github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb +- github.com/gorilla/mux v1.8.0 +- github.com/gorilla/websocket v1.5.0 +- github.com/hashicorp/golang-lru v1.0.2 +- github.com/pkg/errors v0.9.1 +- github.com/prometheus/client_golang v1.17.0 +- github.com/redis/go-redis/v9 v9.2.1 +- github.com/rs/cors v1.10.1 +- github.com/stretchr/testify v1.8.4 +- github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 +- github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a +- golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa +- golang.org/x/sync v0.5.0 +- gopkg.in/yaml.v3 v3.0.1 +-) +- +-require ( +- github.com/DataDog/zstd v1.5.5 // indirect +- github.com/Microsoft/go-winio v0.6.1 // indirect +- github.com/VictoriaMetrics/fastcache v1.12.1 // indirect +- github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect +- github.com/beorn7/perks v1.0.1 // indirect +- github.com/bits-and-blooms/bitset v1.10.0 // indirect +- github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect +- github.com/cespare/xxhash/v2 v2.2.0 // indirect +- github.com/cockroachdb/errors v1.11.1 // indirect +- github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect +- github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 // indirect +- github.com/cockroachdb/redact v1.1.5 // indirect +- github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect +- github.com/consensys/bavard v0.1.13 // indirect +- github.com/consensys/gnark-crypto v0.12.1 // indirect +- github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect +- github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect +- github.com/davecgh/go-spew v1.1.1 // indirect +- github.com/deckarep/golang-set/v2 v2.3.1 // indirect +- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect +- github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect +- github.com/ethereum/c-kzg-4844 v0.4.0 // indirect +- github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect +- github.com/getsentry/sentry-go v0.25.0 // indirect +- github.com/go-ole/go-ole v1.3.0 // indirect +- github.com/gofrs/flock v0.8.1 // indirect +- github.com/gogo/protobuf v1.3.2 // indirect +- github.com/gomodule/redigo v1.8.9 // indirect +- github.com/hashicorp/errwrap v1.1.0 // indirect +- github.com/hashicorp/go-multierror v1.1.1 // indirect +- github.com/holiman/bloomfilter/v2 v2.0.3 // indirect +- github.com/holiman/uint256 v1.2.4 // indirect +- github.com/klauspost/compress v1.17.1 // indirect +- github.com/kr/pretty v0.3.1 // indirect +- github.com/kr/text v0.2.0 // indirect +- github.com/mattn/go-runewidth v0.0.15 // indirect +- github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect +- github.com/mmcloughlin/addchain v0.4.0 // indirect +- github.com/olekukonko/tablewriter v0.0.5 // indirect +- github.com/pmezard/go-difflib v1.0.0 // indirect +- github.com/prometheus/client_model v0.5.0 // indirect +- github.com/prometheus/common v0.45.0 // indirect +- github.com/prometheus/procfs v0.12.0 // indirect +- github.com/rivo/uniseg v0.4.4 // indirect +- github.com/rogpeppe/go-internal v1.11.0 // indirect +- github.com/shirou/gopsutil v3.21.11+incompatible // indirect +- github.com/supranational/blst v0.3.11 // indirect +- github.com/tklauser/go-sysconf v0.3.12 // indirect +- github.com/tklauser/numcpus v0.6.1 // indirect +- github.com/yuin/gopher-lua v1.1.0 // indirect +- github.com/yusufpapurcu/wmi v1.2.3 // indirect +- golang.org/x/crypto v0.17.0 // indirect +- golang.org/x/mod v0.14.0 // indirect +- golang.org/x/sys v0.16.0 // indirect +- golang.org/x/text v0.14.0 // indirect +- golang.org/x/tools v0.15.0 // indirect +- google.golang.org/protobuf v1.33.0 // indirect +- rsc.io/tmplfunc v0.0.3 // indirect +-)
@@ -7236,13 +33254,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7252,59 +33270,318 @@
-
+29
-
-0
+
+0
+
-290
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyDirectory.json CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyDirectory.json -new file mode 100644 -index 0000000000000000000000000000000000000000..61ccdc5fb15116df778992284198adbb9aeaa26b ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyDirectory.json -@@ -0,0 +1,30 @@ -+[ -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 0, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 1, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "32", -+ "label": "currencies", -+ "offset": 0, -+ "slot": "1", -+ "type": "mapping(address => struct IFeeCurrencyDirectory.CurrencyConfig)" -+ }, -+ { -+ "bytes": "32", -+ "label": "currencyList", -+ "offset": 0, -+ "slot": "2", -+ "type": "address[]" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/go.sum CELO/proxyd/go.sum +deleted file mode 100644 +index 11a684f0e398d0d70ed4bd4827b86777e06bb271..0000000000000000000000000000000000000000 +--- OP/proxyd/go.sum ++++ /dev/null +@@ -1,290 +0,0 @@ +-github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +-github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +-github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +-github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +-github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +-github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +-github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +-github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +-github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +-github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +-github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= +-github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +-github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +-github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +-github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +-github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +-github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +-github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +-github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +-github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +-github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +-github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +-github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +-github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +-github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +-github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +-github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +-github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360 h1:x1dzGu9e1FYmkG8mL9emtdWD1EzH/17SijnoLvKvPiM= +-github.com/cockroachdb/pebble v0.0.0-20231020221949-babd592d2360/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +-github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +-github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +-github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +-github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +-github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +-github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +-github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +-github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +-github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +-github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +-github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +-github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +-github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A= +-github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +-github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +-github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +-github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +-github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +-github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +-github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +-github.com/ethereum/go-ethereum v1.13.15 h1:U7sSGYGo4SPjP6iNIifNoyIAiNjrmQkz6EwQG+/EZWo= +-github.com/ethereum/go-ethereum v1.13.15/go.mod h1:TN8ZiHrdJwSe8Cb6x+p0hs5CxhJZPbqB7hHkaUXcmIU= +-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +-github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +-github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +-github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +-github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= +-github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= +-github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +-github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +-github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +-github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +-github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +-github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +-github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +-github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +-github.com/go-redis/redis/v7 v7.4.0 h1:7obg6wUoj05T0EpY0o8B59S9w5yeMWql7sw2kwNW1x4= +-github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +-github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= +-github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= +-github.com/go-redsync/redsync/v4 v4.10.0 h1:hTeAak4C73mNBQSTq6KCKDFaiIlfC+z5yTTl8fCJuBs= +-github.com/go-redsync/redsync/v4 v4.10.0/go.mod h1:ZfayzutkgeBmEmBlUR3j+rF6kN44UUGtEdfzhBFZTPc= +-github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +-github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +-github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +-github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +-github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +-github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= +-github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= +-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +-github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +-github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +-github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +-github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +-github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +-github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +-github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +-github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +-github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +-github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +-github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +-github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +-github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +-github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +-github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +-github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +-github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +-github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +-github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +-github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +-github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +-github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +-github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +-github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +-github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +-github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +-github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +-github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +-github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +-github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +-github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg= +-github.com/redis/go-redis/v9 v9.2.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +-github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo= +-github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo= +-github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +-github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +-github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +-github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +-github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +-github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +-github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +-github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= +-github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= +-github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= +-github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +-github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +-github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +-github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +-github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +-github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +-github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +-github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a h1:WS5nQycV+82Ndezq0UcMcGVG416PZgcJPqI/bLM824A= +-github.com/xaionaro-go/weightedshuffle v0.0.0-20211213010739-6a74fbc7d24a/go.mod h1:0KAUfC65le2kMu4fnBxm7Xj3PkQ3MBpJbF5oMmqufBc= +-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +-github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= +-github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +-github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +-github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +-golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +-golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +-golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +-golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +-golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +-golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +-golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +-golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +-golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +-golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +-golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +-golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +-rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +-rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
@@ -7313,13 +33590,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7329,52 +33606,767 @@
-
+22
-
-0
+
+0
+
-92
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyWhitelist.json CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyWhitelist.json -new file mode 100644 -index 0000000000000000000000000000000000000000..fed27094a71bd0794f119b2f272d22847d0e6b2d ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeCurrencyWhitelist.json -@@ -0,0 +1,23 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 20, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "32", -+ "label": "whitelist", -+ "offset": 0, -+ "slot": "1", -+ "type": "address[]" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/methods.go CELO/proxyd/methods.go +deleted file mode 100644 +index 08ea773288ab2fb08d25351db7efc8f8f0bd8169..0000000000000000000000000000000000000000 +--- OP/proxyd/methods.go ++++ /dev/null +@@ -1,92 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "crypto/sha256" +- "encoding/json" +- "fmt" +- "strings" +- "sync" +- +- "github.com/ethereum/go-ethereum/log" +-) +- +-type RPCMethodHandler interface { +- GetRPCMethod(context.Context, *RPCReq) (*RPCRes, error) +- PutRPCMethod(context.Context, *RPCReq, *RPCRes) error +-} +- +-type StaticMethodHandler struct { +- cache Cache +- m sync.RWMutex +- filterGet func(*RPCReq) bool +- filterPut func(*RPCReq, *RPCRes) bool +-} +- +-func (e *StaticMethodHandler) key(req *RPCReq) string { +- // signature is the hashed json.RawMessage param contents +- h := sha256.New() +- h.Write(req.Params) +- signature := fmt.Sprintf("%x", h.Sum(nil)) +- return strings.Join([]string{"cache", req.Method, signature}, ":") +-} +- +-func (e *StaticMethodHandler) GetRPCMethod(ctx context.Context, req *RPCReq) (*RPCRes, error) { +- if e.cache == nil { +- return nil, nil +- } +- if e.filterGet != nil && !e.filterGet(req) { +- return nil, nil +- } +- +- e.m.RLock() +- defer e.m.RUnlock() +- +- key := e.key(req) +- val, err := e.cache.Get(ctx, key) +- if err != nil { +- log.Error("error reading from cache", "key", key, "method", req.Method, "err", err) +- return nil, err +- } +- if val == "" { +- return nil, nil +- } +- +- var result interface{} +- if err := json.Unmarshal([]byte(val), &result); err != nil { +- log.Error("error unmarshalling value from cache", "key", key, "method", req.Method, "err", err) +- return nil, err +- } +- return &RPCRes{ +- JSONRPC: req.JSONRPC, +- Result: result, +- ID: req.ID, +- }, nil +-} +- +-func (e *StaticMethodHandler) PutRPCMethod(ctx context.Context, req *RPCReq, res *RPCRes) error { +- if e.cache == nil { +- return nil +- } +- // if there is a filter on get, we don't want to cache it because its irretrievable +- if e.filterGet != nil && !e.filterGet(req) { +- return nil +- } +- // response filter +- if e.filterPut != nil && !e.filterPut(req, res) { +- return nil +- } +- +- e.m.Lock() +- defer e.m.Unlock() +- +- key := e.key(req) +- value := mustMarshalJSON(res.Result) +- +- err := e.cache.Put(ctx, key, string(value)) +- if err != nil { +- log.Error("error putting into cache", "key", key, "method", req.Method, "err", err) +- return err +- } +- return nil +-}
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-601
+ +
+ +
+
+
diff --git OP/proxyd/metrics.go CELO/proxyd/metrics.go +deleted file mode 100644 +index 4046af031c9f2da8515ee8a07ee7b60bdd226134..0000000000000000000000000000000000000000 +--- OP/proxyd/metrics.go ++++ /dev/null +@@ -1,601 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "fmt" +- "regexp" +- "strconv" +- "strings" +- "time" +- +- "github.com/ethereum/go-ethereum/common/hexutil" +- +- "github.com/prometheus/client_golang/prometheus" +- "github.com/prometheus/client_golang/prometheus/promauto" +-) +- +-const ( +- MetricsNamespace = "proxyd" +- +- RPCRequestSourceHTTP = "http" +- RPCRequestSourceWS = "ws" +- +- BackendProxyd = "proxyd" +- SourceClient = "client" +- SourceBackend = "backend" +- MethodUnknown = "unknown" +-) +- +-var PayloadSizeBuckets = []float64{10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000} +-var MillisecondDurationBuckets = []float64{1, 10, 50, 100, 500, 1000, 5000, 10000, 100000} +- +-var ( +- rpcRequestsTotal = promauto.NewCounter(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "rpc_requests_total", +- Help: "Count of total client RPC requests.", +- }) +- +- rpcForwardsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "rpc_forwards_total", +- Help: "Count of total RPC requests forwarded to each backend.", +- }, []string{ +- "auth", +- "backend_name", +- "method_name", +- "source", +- }) +- +- rpcBackendHTTPResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "rpc_backend_http_response_codes_total", +- Help: "Count of total backend responses by HTTP status code.", +- }, []string{ +- "auth", +- "backend_name", +- "method_name", +- "status_code", +- "batched", +- }) +- +- rpcErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "rpc_errors_total", +- Help: "Count of total RPC errors.", +- }, []string{ +- "auth", +- "backend_name", +- "method_name", +- "error_code", +- }) +- +- rpcSpecialErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "rpc_special_errors_total", +- Help: "Count of total special RPC errors.", +- }, []string{ +- "auth", +- "backend_name", +- "method_name", +- "error_type", +- }) +- +- rpcBackendRequestDurationSumm = promauto.NewSummaryVec(prometheus.SummaryOpts{ +- Namespace: MetricsNamespace, +- Name: "rpc_backend_request_duration_seconds", +- Help: "Summary of backend response times broken down by backend and method name.", +- Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001}, +- }, []string{ +- "backend_name", +- "method_name", +- "batched", +- }) +- +- activeClientWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "active_client_ws_conns", +- Help: "Gauge of active client WS connections.", +- }, []string{ +- "auth", +- }) +- +- activeBackendWsConnsGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "active_backend_ws_conns", +- Help: "Gauge of active backend WS connections.", +- }, []string{ +- "backend_name", +- }) +- +- unserviceableRequestsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "unserviceable_requests_total", +- Help: "Count of total requests that were rejected due to no backends being available.", +- }, []string{ +- "auth", +- "request_source", +- }) +- +- httpResponseCodesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "http_response_codes_total", +- Help: "Count of total HTTP response codes.", +- }, []string{ +- "status_code", +- }) +- +- httpRequestDurationSumm = promauto.NewSummary(prometheus.SummaryOpts{ +- Namespace: MetricsNamespace, +- Name: "http_request_duration_seconds", +- Help: "Summary of HTTP request durations, in seconds.", +- Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001}, +- }) +- +- wsMessagesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "ws_messages_total", +- Help: "Count of total websocket messages including protocol control.", +- }, []string{ +- "auth", +- "backend_name", +- "source", +- }) +- +- redisErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "redis_errors_total", +- Help: "Count of total Redis errors.", +- }, []string{ +- "source", +- }) +- +- requestPayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{ +- Namespace: MetricsNamespace, +- Name: "request_payload_sizes", +- Help: "Histogram of client request payload sizes.", +- Buckets: PayloadSizeBuckets, +- }, []string{ +- "auth", +- }) +- +- responsePayloadSizesGauge = promauto.NewHistogramVec(prometheus.HistogramOpts{ +- Namespace: MetricsNamespace, +- Name: "response_payload_sizes", +- Help: "Histogram of client response payload sizes.", +- Buckets: PayloadSizeBuckets, +- }, []string{ +- "auth", +- }) +- +- cacheHitsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "cache_hits_total", +- Help: "Number of cache hits.", +- }, []string{ +- "method", +- }) +- +- cacheMissesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "cache_misses_total", +- Help: "Number of cache misses.", +- }, []string{ +- "method", +- }) +- +- cacheErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "cache_errors_total", +- Help: "Number of cache errors.", +- }, []string{ +- "method", +- }) +- +- batchRPCShortCircuitsTotal = promauto.NewCounter(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "batch_rpc_short_circuits_total", +- Help: "Count of total batch RPC short-circuits.", +- }) +- +- rpcSpecialErrors = []string{ +- "nonce too low", +- "gas price too high", +- "gas price too low", +- "invalid parameters", +- } +- +- redisCacheDurationSumm = promauto.NewHistogramVec(prometheus.HistogramOpts{ +- Namespace: MetricsNamespace, +- Name: "redis_cache_duration_milliseconds", +- Help: "Histogram of Redis command durations, in milliseconds.", +- Buckets: MillisecondDurationBuckets, +- }, []string{"command"}) +- +- tooManyRequestErrorsTotal = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "too_many_request_errors_total", +- Help: "Count of request timeouts due to too many concurrent RPCs.", +- }, []string{ +- "backend_name", +- }) +- +- batchSizeHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ +- Namespace: MetricsNamespace, +- Name: "batch_size_summary", +- Help: "Summary of batch sizes", +- Buckets: []float64{ +- 1, +- 5, +- 10, +- 25, +- 50, +- 100, +- }, +- }) +- +- frontendRateLimitTakeErrors = promauto.NewCounter(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "rate_limit_take_errors", +- Help: "Count of errors taking frontend rate limits", +- }) +- +- consensusLatestBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_latest_block", +- Help: "Consensus latest block", +- }, []string{ +- "backend_group_name", +- }) +- +- consensusSafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_safe_block", +- Help: "Consensus safe block", +- }, []string{ +- "backend_group_name", +- }) +- +- consensusFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_finalized_block", +- Help: "Consensus finalized block", +- }, []string{ +- "backend_group_name", +- }) +- +- consensusHAError = promauto.NewCounterVec(prometheus.CounterOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_ha_error", +- Help: "Consensus HA error count", +- }, []string{ +- "error", +- }) +- +- consensusHALatestBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_ha_latest_block", +- Help: "Consensus HA latest block", +- }, []string{ +- "backend_group_name", +- "leader", +- }) +- +- consensusHASafeBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_ha_safe_block", +- Help: "Consensus HA safe block", +- }, []string{ +- "backend_group_name", +- "leader", +- }) +- +- consensusHAFinalizedBlock = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_ha_finalized_block", +- Help: "Consensus HA finalized block", +- }, []string{ +- "backend_group_name", +- "leader", +- }) +- +- backendLatestBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_latest_block", +- Help: "Current latest block observed per backend", +- }, []string{ +- "backend_name", +- }) +- +- backendSafeBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_safe_block", +- Help: "Current safe block observed per backend", +- }, []string{ +- "backend_name", +- }) +- +- backendFinalizedBlockBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_finalized_block", +- Help: "Current finalized block observed per backend", +- }, []string{ +- "backend_name", +- }) +- +- backendUnexpectedBlockTagsBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_unexpected_block_tags", +- Help: "Bool gauge for unexpected block tags", +- }, []string{ +- "backend_name", +- }) +- +- consensusGroupCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_count", +- Help: "Consensus group serving traffic count", +- }, []string{ +- "backend_group_name", +- }) +- +- consensusGroupFilteredCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_filtered_count", +- Help: "Consensus group filtered out from serving traffic count", +- }, []string{ +- "backend_group_name", +- }) +- +- consensusGroupTotalCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "group_consensus_total_count", +- Help: "Total count of candidates to be part of consensus group", +- }, []string{ +- "backend_group_name", +- }) +- +- consensusBannedBackends = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "consensus_backend_banned", +- Help: "Bool gauge for banned backends", +- }, []string{ +- "backend_name", +- }) +- +- consensusPeerCountBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "consensus_backend_peer_count", +- Help: "Peer count", +- }, []string{ +- "backend_name", +- }) +- +- consensusInSyncBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "consensus_backend_in_sync", +- Help: "Bool gauge for backends in sync", +- }, []string{ +- "backend_name", +- }) +- +- consensusUpdateDelayBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "consensus_backend_update_delay", +- Help: "Delay (ms) for backend update", +- }, []string{ +- "backend_name", +- }) +- +- avgLatencyBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_avg_latency", +- Help: "Average latency per backend", +- }, []string{ +- "backend_name", +- }) +- +- degradedBackends = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_degraded", +- Help: "Bool gauge for degraded backends", +- }, []string{ +- "backend_name", +- }) +- +- networkErrorRateBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_error_rate", +- Help: "Request error rate per backend", +- }, []string{ +- "backend_name", +- }) +- +- healthyPrimaryCandidates = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "healthy_candidates", +- Help: "Record the number of healthy primary candidates", +- }, []string{ +- "backend_group_name", +- }) +- +- backendGroupFallbackBackend = promauto.NewGaugeVec(prometheus.GaugeOpts{ +- Namespace: MetricsNamespace, +- Name: "backend_group_fallback_backenend", +- Help: "Bool gauge for if a backend is a fallback for a backend group", +- }, []string{ +- "backend_group", +- "backend_name", +- "fallback", +- }) +-) +- +-func RecordRedisError(source string) { +- redisErrorsTotal.WithLabelValues(source).Inc() +-} +- +-func RecordRPCError(ctx context.Context, backendName, method string, err error) { +- rpcErr, ok := err.(*RPCErr) +- var code int +- if ok { +- MaybeRecordSpecialRPCError(ctx, backendName, method, rpcErr) +- code = rpcErr.Code +- } else { +- code = -1 +- } +- +- rpcErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, strconv.Itoa(code)).Inc() +-} +- +-func RecordWSMessage(ctx context.Context, backendName, source string) { +- wsMessagesTotal.WithLabelValues(GetAuthCtx(ctx), backendName, source).Inc() +-} +- +-func RecordUnserviceableRequest(ctx context.Context, source string) { +- unserviceableRequestsTotal.WithLabelValues(GetAuthCtx(ctx), source).Inc() +-} +- +-func RecordRPCForward(ctx context.Context, backendName, method, source string) { +- rpcForwardsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, source).Inc() +-} +- +-func MaybeRecordSpecialRPCError(ctx context.Context, backendName, method string, rpcErr *RPCErr) { +- errMsg := strings.ToLower(rpcErr.Message) +- for _, errStr := range rpcSpecialErrors { +- if strings.Contains(errMsg, errStr) { +- rpcSpecialErrorsTotal.WithLabelValues(GetAuthCtx(ctx), backendName, method, errStr).Inc() +- return +- } +- } +-} +- +-func RecordRequestPayloadSize(ctx context.Context, payloadSize int) { +- requestPayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize)) +-} +- +-func RecordResponsePayloadSize(ctx context.Context, payloadSize int) { +- responsePayloadSizesGauge.WithLabelValues(GetAuthCtx(ctx)).Observe(float64(payloadSize)) +-} +- +-func RecordCacheHit(method string) { +- cacheHitsTotal.WithLabelValues(method).Inc() +-} +- +-func RecordCacheMiss(method string) { +- cacheMissesTotal.WithLabelValues(method).Inc() +-} +- +-func RecordCacheError(method string) { +- cacheErrorsTotal.WithLabelValues(method).Inc() +-} +- +-func RecordBatchSize(size int) { +- batchSizeHistogram.Observe(float64(size)) +-} +- +-var nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z ]+`) +- +-func RecordGroupConsensusError(group *BackendGroup, label string, err error) { +- errClean := nonAlphanumericRegex.ReplaceAllString(err.Error(), "") +- errClean = strings.ReplaceAll(errClean, " ", "_") +- errClean = strings.ReplaceAll(errClean, "__", "_") +- label = fmt.Sprintf("%s.%s", label, errClean) +- consensusHAError.WithLabelValues(label).Inc() +-} +- +-func RecordGroupConsensusHALatestBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) { +- consensusHALatestBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber)) +-} +- +-func RecordGroupConsensusHASafeBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) { +- consensusHASafeBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber)) +-} +- +-func RecordGroupConsensusHAFinalizedBlock(group *BackendGroup, leader string, blockNumber hexutil.Uint64) { +- consensusHAFinalizedBlock.WithLabelValues(group.Name, leader).Set(float64(blockNumber)) +-} +- +-func RecordGroupConsensusLatestBlock(group *BackendGroup, blockNumber hexutil.Uint64) { +- consensusLatestBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) +-} +- +-func RecordGroupConsensusSafeBlock(group *BackendGroup, blockNumber hexutil.Uint64) { +- consensusSafeBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) +-} +- +-func RecordGroupConsensusFinalizedBlock(group *BackendGroup, blockNumber hexutil.Uint64) { +- consensusFinalizedBlock.WithLabelValues(group.Name).Set(float64(blockNumber)) +-} +- +-func RecordGroupConsensusCount(group *BackendGroup, count int) { +- consensusGroupCount.WithLabelValues(group.Name).Set(float64(count)) +-} +- +-func RecordGroupConsensusFilteredCount(group *BackendGroup, count int) { +- consensusGroupFilteredCount.WithLabelValues(group.Name).Set(float64(count)) +-} +- +-func RecordGroupTotalCount(group *BackendGroup, count int) { +- consensusGroupTotalCount.WithLabelValues(group.Name).Set(float64(count)) +-} +- +-func RecordBackendLatestBlock(b *Backend, blockNumber hexutil.Uint64) { +- backendLatestBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) +-} +- +-func RecordBackendSafeBlock(b *Backend, blockNumber hexutil.Uint64) { +- backendSafeBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) +-} +- +-func RecordBackendFinalizedBlock(b *Backend, blockNumber hexutil.Uint64) { +- backendFinalizedBlockBackend.WithLabelValues(b.Name).Set(float64(blockNumber)) +-} +- +-func RecordBackendUnexpectedBlockTags(b *Backend, unexpected bool) { +- backendUnexpectedBlockTagsBackend.WithLabelValues(b.Name).Set(boolToFloat64(unexpected)) +-} +- +-func RecordConsensusBackendBanned(b *Backend, banned bool) { +- consensusBannedBackends.WithLabelValues(b.Name).Set(boolToFloat64(banned)) +-} +- +-func RecordHealthyCandidates(b *BackendGroup, candidates int) { +- healthyPrimaryCandidates.WithLabelValues(b.Name).Set(float64(candidates)) +-} +- +-func RecordConsensusBackendPeerCount(b *Backend, peerCount uint64) { +- consensusPeerCountBackend.WithLabelValues(b.Name).Set(float64(peerCount)) +-} +- +-func RecordConsensusBackendInSync(b *Backend, inSync bool) { +- consensusInSyncBackend.WithLabelValues(b.Name).Set(boolToFloat64(inSync)) +-} +- +-func RecordConsensusBackendUpdateDelay(b *Backend, lastUpdate time.Time) { +- // avoid recording the delay for the first update +- if lastUpdate.IsZero() { +- return +- } +- delay := time.Since(lastUpdate) +- consensusUpdateDelayBackend.WithLabelValues(b.Name).Set(float64(delay.Milliseconds())) +-} +- +-func RecordBackendNetworkLatencyAverageSlidingWindow(b *Backend, avgLatency time.Duration) { +- avgLatencyBackend.WithLabelValues(b.Name).Set(float64(avgLatency.Milliseconds())) +- degradedBackends.WithLabelValues(b.Name).Set(boolToFloat64(b.IsDegraded())) +-} +- +-func RecordBackendNetworkErrorRateSlidingWindow(b *Backend, rate float64) { +- networkErrorRateBackend.WithLabelValues(b.Name).Set(rate) +-} +- +-func RecordBackendGroupFallbacks(bg *BackendGroup, name string, fallback bool) { +- backendGroupFallbackBackend.WithLabelValues(bg.Name, name, strconv.FormatBool(fallback)).Set(boolToFloat64(fallback)) +-} +- +-func boolToFloat64(b bool) float64 { +- if b { +- return 1 +- } +- return 0 +-}
@@ -7383,13 +34375,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7399,101 +34391,500 @@
-
+71
-
-0
+
+0
+
-472
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/FeeHandler.json CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeHandler.json -new file mode 100644 -index 0000000000000000000000000000000000000000..468bb7dc389218cc2a62ad57d94c340b31fa5a30 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/FeeHandler.json -@@ -0,0 +1,72 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 20, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "20", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "contract ICeloRegistry" -+ }, -+ { -+ "bytes": "32", -+ "label": "_status", -+ "offset": 0, -+ "slot": "2", -+ "type": "uint256" -+ }, -+ { -+ "bytes": "32", -+ "label": "lastLimitDay", -+ "offset": 0, -+ "slot": "3", -+ "type": "uint256" -+ }, -+ { -+ "bytes": "32", -+ "label": "burnFraction", -+ "offset": 0, -+ "slot": "4", -+ "type": "struct FixidityLib.Fraction" -+ }, -+ { -+ "bytes": "20", -+ "label": "feeBeneficiary", -+ "offset": 0, -+ "slot": "5", -+ "type": "address" -+ }, -+ { -+ "bytes": "32", -+ "label": "celoToBeBurned", -+ "offset": 0, -+ "slot": "6", -+ "type": "uint256" -+ }, -+ { -+ "bytes": "32", -+ "label": "tokenStates", -+ "offset": 0, -+ "slot": "7", -+ "type": "mapping(address => struct FeeHandler.TokenState)" -+ }, -+ { -+ "bytes": "64", -+ "label": "activeTokens", -+ "offset": 0, -+ "slot": "8", -+ "type": "struct EnumerableSet.AddressSet" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/proxyd.go CELO/proxyd/proxyd.go +deleted file mode 100644 +index 402909b5f40430e253f645d0a223f2ad496b2df4..0000000000000000000000000000000000000000 +--- OP/proxyd/proxyd.go ++++ /dev/null +@@ -1,472 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "crypto/tls" +- "errors" +- "fmt" +- "net/http" +- "os" +- "time" +- +- "github.com/ethereum/go-ethereum/common/math" +- "github.com/ethereum/go-ethereum/log" +- "github.com/prometheus/client_golang/prometheus/promhttp" +- "github.com/redis/go-redis/v9" +- "golang.org/x/exp/slog" +- "golang.org/x/sync/semaphore" +-) +- +-func SetLogLevel(logLevel slog.Leveler) { +- log.SetDefault(log.NewLogger(slog.NewJSONHandler( +- os.Stdout, &slog.HandlerOptions{Level: logLevel}))) +-} +- +-func Start(config *Config) (*Server, func(), error) { +- if len(config.Backends) == 0 { +- return nil, nil, errors.New("must define at least one backend") +- } +- if len(config.BackendGroups) == 0 { +- return nil, nil, errors.New("must define at least one backend group") +- } +- if len(config.RPCMethodMappings) == 0 { +- return nil, nil, errors.New("must define at least one RPC method mapping") +- } +- +- for authKey := range config.Authentication { +- if authKey == "none" { +- return nil, nil, errors.New("cannot use none as an auth key") +- } +- } +- +- var redisClient *redis.Client +- if config.Redis.URL != "" { +- rURL, err := ReadFromEnvOrConfig(config.Redis.URL) +- if err != nil { +- return nil, nil, err +- } +- redisClient, err = NewRedisClient(rURL) +- if err != nil { +- return nil, nil, err +- } +- } +- +- if redisClient == nil && config.RateLimit.UseRedis { +- return nil, nil, errors.New("must specify a Redis URL if UseRedis is true in rate limit config") +- } +- +- // While modifying shared globals is a bad practice, the alternative +- // is to clone these errors on every invocation. This is inefficient. +- // We'd also have to make sure that errors.Is and errors.As continue +- // to function properly on the cloned errors. +- if config.RateLimit.ErrorMessage != "" { +- ErrOverRateLimit.Message = config.RateLimit.ErrorMessage +- } +- if config.WhitelistErrorMessage != "" { +- ErrMethodNotWhitelisted.Message = config.WhitelistErrorMessage +- } +- if config.BatchConfig.ErrorMessage != "" { +- ErrTooManyBatchRequests.Message = config.BatchConfig.ErrorMessage +- } +- +- if config.SenderRateLimit.Enabled { +- if config.SenderRateLimit.Limit <= 0 { +- return nil, nil, errors.New("limit in sender_rate_limit must be > 0") +- } +- if time.Duration(config.SenderRateLimit.Interval) < time.Second { +- return nil, nil, errors.New("interval in sender_rate_limit must be >= 1s") +- } +- } +- +- maxConcurrentRPCs := config.Server.MaxConcurrentRPCs +- if maxConcurrentRPCs == 0 { +- maxConcurrentRPCs = math.MaxInt64 +- } +- rpcRequestSemaphore := semaphore.NewWeighted(maxConcurrentRPCs) +- +- backendNames := make([]string, 0) +- backendsByName := make(map[string]*Backend) +- for name, cfg := range config.Backends { +- opts := make([]BackendOpt, 0) +- +- rpcURL, err := ReadFromEnvOrConfig(cfg.RPCURL) +- if err != nil { +- return nil, nil, err +- } +- wsURL, err := ReadFromEnvOrConfig(cfg.WSURL) +- if err != nil { +- return nil, nil, err +- } +- if rpcURL == "" { +- return nil, nil, fmt.Errorf("must define an RPC URL for backend %s", name) +- } +- +- if config.BackendOptions.ResponseTimeoutSeconds != 0 { +- timeout := secondsToDuration(config.BackendOptions.ResponseTimeoutSeconds) +- opts = append(opts, WithTimeout(timeout)) +- } +- if config.BackendOptions.MaxRetries != 0 { +- opts = append(opts, WithMaxRetries(config.BackendOptions.MaxRetries)) +- } +- if config.BackendOptions.MaxResponseSizeBytes != 0 { +- opts = append(opts, WithMaxResponseSize(config.BackendOptions.MaxResponseSizeBytes)) +- } +- if config.BackendOptions.OutOfServiceSeconds != 0 { +- opts = append(opts, WithOutOfServiceDuration(secondsToDuration(config.BackendOptions.OutOfServiceSeconds))) +- } +- if config.BackendOptions.MaxDegradedLatencyThreshold > 0 { +- opts = append(opts, WithMaxDegradedLatencyThreshold(time.Duration(config.BackendOptions.MaxDegradedLatencyThreshold))) +- } +- if config.BackendOptions.MaxLatencyThreshold > 0 { +- opts = append(opts, WithMaxLatencyThreshold(time.Duration(config.BackendOptions.MaxLatencyThreshold))) +- } +- if config.BackendOptions.MaxErrorRateThreshold > 0 { +- opts = append(opts, WithMaxErrorRateThreshold(config.BackendOptions.MaxErrorRateThreshold)) +- } +- if cfg.MaxRPS != 0 { +- opts = append(opts, WithMaxRPS(cfg.MaxRPS)) +- } +- if cfg.MaxWSConns != 0 { +- opts = append(opts, WithMaxWSConns(cfg.MaxWSConns)) +- } +- if cfg.Password != "" { +- passwordVal, err := ReadFromEnvOrConfig(cfg.Password) +- if err != nil { +- return nil, nil, err +- } +- opts = append(opts, WithBasicAuth(cfg.Username, passwordVal)) +- } +- +- headers := map[string]string{} +- for headerName, headerValue := range cfg.Headers { +- headerValue, err := ReadFromEnvOrConfig(headerValue) +- if err != nil { +- return nil, nil, err +- } +- +- headers[headerName] = headerValue +- } +- opts = append(opts, WithHeaders(headers)) +- +- tlsConfig, err := configureBackendTLS(cfg) +- if err != nil { +- return nil, nil, err +- } +- if tlsConfig != nil { +- log.Info("using custom TLS config for backend", "name", name) +- opts = append(opts, WithTLSConfig(tlsConfig)) +- } +- if cfg.StripTrailingXFF { +- opts = append(opts, WithStrippedTrailingXFF()) +- } +- opts = append(opts, WithProxydIP(os.Getenv("PROXYD_IP"))) +- opts = append(opts, WithConsensusSkipPeerCountCheck(cfg.ConsensusSkipPeerCountCheck)) +- opts = append(opts, WithConsensusForcedCandidate(cfg.ConsensusForcedCandidate)) +- opts = append(opts, WithWeight(cfg.Weight)) +- +- receiptsTarget, err := ReadFromEnvOrConfig(cfg.ConsensusReceiptsTarget) +- if err != nil { +- return nil, nil, err +- } +- receiptsTarget, err = validateReceiptsTarget(receiptsTarget) +- if err != nil { +- return nil, nil, err +- } +- opts = append(opts, WithConsensusReceiptTarget(receiptsTarget)) +- +- back := NewBackend(name, rpcURL, wsURL, rpcRequestSemaphore, opts...) +- backendNames = append(backendNames, name) +- backendsByName[name] = back +- log.Info("configured backend", +- "name", name, +- "backend_names", backendNames, +- "rpc_url", rpcURL, +- "ws_url", wsURL) +- } +- +- backendGroups := make(map[string]*BackendGroup) +- for bgName, bg := range config.BackendGroups { +- backends := make([]*Backend, 0) +- fallbackBackends := make(map[string]bool) +- fallbackCount := 0 +- for _, bName := range bg.Backends { +- if backendsByName[bName] == nil { +- return nil, nil, fmt.Errorf("backend %s is not defined", bName) +- } +- backends = append(backends, backendsByName[bName]) +- +- for _, fb := range bg.Fallbacks { +- if bName == fb { +- fallbackBackends[bName] = true +- log.Info("configured backend as fallback", +- "backend_name", bName, +- "backend_group", bgName, +- ) +- fallbackCount++ +- } +- } +- +- if _, ok := fallbackBackends[bName]; !ok { +- fallbackBackends[bName] = false +- log.Info("configured backend as primary", +- "backend_name", bName, +- "backend_group", bgName, +- ) +- } +- } +- +- if fallbackCount != len(bg.Fallbacks) { +- return nil, nil, +- fmt.Errorf( +- "error: number of fallbacks instantiated (%d) did not match configured (%d) for backend group %s", +- fallbackCount, len(bg.Fallbacks), bgName, +- ) +- } +- +- backendGroups[bgName] = &BackendGroup{ +- Name: bgName, +- Backends: backends, +- WeightedRouting: bg.WeightedRouting, +- FallbackBackends: fallbackBackends, +- } +- } +- +- var wsBackendGroup *BackendGroup +- if config.WSBackendGroup != "" { +- wsBackendGroup = backendGroups[config.WSBackendGroup] +- if wsBackendGroup == nil { +- return nil, nil, fmt.Errorf("ws backend group %s does not exist", config.WSBackendGroup) +- } +- } +- +- if wsBackendGroup == nil && config.Server.WSPort != 0 { +- return nil, nil, fmt.Errorf("a ws port was defined, but no ws group was defined") +- } +- +- for _, bg := range config.RPCMethodMappings { +- if backendGroups[bg] == nil { +- return nil, nil, fmt.Errorf("undefined backend group %s", bg) +- } +- } +- +- var resolvedAuth map[string]string +- +- if config.Authentication != nil { +- resolvedAuth = make(map[string]string) +- for secret, alias := range config.Authentication { +- resolvedSecret, err := ReadFromEnvOrConfig(secret) +- if err != nil { +- return nil, nil, err +- } +- resolvedAuth[resolvedSecret] = alias +- } +- } +- +- var ( +- cache Cache +- rpcCache RPCCache +- ) +- if config.Cache.Enabled { +- if redisClient == nil { +- log.Warn("redis is not configured, using in-memory cache") +- cache = newMemoryCache() +- } else { +- ttl := defaultCacheTtl +- if config.Cache.TTL != 0 { +- ttl = time.Duration(config.Cache.TTL) +- } +- cache = newRedisCache(redisClient, config.Redis.Namespace, ttl) +- } +- rpcCache = newRPCCache(newCacheWithCompression(cache)) +- } +- +- srv, err := NewServer( +- backendGroups, +- wsBackendGroup, +- NewStringSetFromStrings(config.WSMethodWhitelist), +- config.RPCMethodMappings, +- config.Server.MaxBodySizeBytes, +- resolvedAuth, +- secondsToDuration(config.Server.TimeoutSeconds), +- config.Server.MaxUpstreamBatchSize, +- config.Server.EnableXServedByHeader, +- rpcCache, +- config.RateLimit, +- config.SenderRateLimit, +- config.Server.EnableRequestLog, +- config.Server.MaxRequestBodyLogLen, +- config.BatchConfig.MaxSize, +- redisClient, +- ) +- if err != nil { +- return nil, nil, fmt.Errorf("error creating server: %w", err) +- } +- +- // Enable to support browser websocket connections. +- // See https://pkg.go.dev/github.com/gorilla/websocket#hdr-Origin_Considerations +- if config.Server.AllowAllOrigins { +- srv.upgrader.CheckOrigin = func(r *http.Request) bool { +- return true +- } +- } +- +- if config.Metrics.Enabled { +- addr := fmt.Sprintf("%s:%d", config.Metrics.Host, config.Metrics.Port) +- log.Info("starting metrics server", "addr", addr) +- go func() { +- if err := http.ListenAndServe(addr, promhttp.Handler()); err != nil { +- log.Error("error starting metrics server", "err", err) +- } +- }() +- } +- +- // To allow integration tests to cleanly come up, wait +- // 10ms to give the below goroutines enough time to +- // encounter an error creating their servers +- errTimer := time.NewTimer(10 * time.Millisecond) +- +- if config.Server.RPCPort != 0 { +- go func() { +- if err := srv.RPCListenAndServe(config.Server.RPCHost, config.Server.RPCPort); err != nil { +- if errors.Is(err, http.ErrServerClosed) { +- log.Info("RPC server shut down") +- return +- } +- log.Crit("error starting RPC server", "err", err) +- } +- }() +- } +- +- if config.Server.WSPort != 0 { +- go func() { +- if err := srv.WSListenAndServe(config.Server.WSHost, config.Server.WSPort); err != nil { +- if errors.Is(err, http.ErrServerClosed) { +- log.Info("WS server shut down") +- return +- } +- log.Crit("error starting WS server", "err", err) +- } +- }() +- } else { +- log.Info("WS server not enabled (ws_port is set to 0)") +- } +- +- for bgName, bg := range backendGroups { +- bgcfg := config.BackendGroups[bgName] +- if bgcfg.ConsensusAware { +- log.Info("creating poller for consensus aware backend_group", "name", bgName) +- +- copts := make([]ConsensusOpt, 0) +- +- if bgcfg.ConsensusAsyncHandler == "noop" { +- copts = append(copts, WithAsyncHandler(NewNoopAsyncHandler())) +- } +- if bgcfg.ConsensusBanPeriod > 0 { +- copts = append(copts, WithBanPeriod(time.Duration(bgcfg.ConsensusBanPeriod))) +- } +- if bgcfg.ConsensusMaxUpdateThreshold > 0 { +- copts = append(copts, WithMaxUpdateThreshold(time.Duration(bgcfg.ConsensusMaxUpdateThreshold))) +- } +- if bgcfg.ConsensusMaxBlockLag > 0 { +- copts = append(copts, WithMaxBlockLag(bgcfg.ConsensusMaxBlockLag)) +- } +- if bgcfg.ConsensusMinPeerCount > 0 { +- copts = append(copts, WithMinPeerCount(uint64(bgcfg.ConsensusMinPeerCount))) +- } +- if bgcfg.ConsensusMaxBlockRange > 0 { +- copts = append(copts, WithMaxBlockRange(bgcfg.ConsensusMaxBlockRange)) +- } +- if bgcfg.ConsensusPollerInterval > 0 { +- copts = append(copts, WithPollerInterval(time.Duration(bgcfg.ConsensusPollerInterval))) +- } +- +- for _, be := range bgcfg.Backends { +- if fallback, ok := bg.FallbackBackends[be]; !ok { +- log.Crit("error backend not found in backend fallback configurations", "backend_name", be) +- } else { +- log.Debug("configuring new backend for group", "backend_group", bgName, "backend_name", be, "fallback", fallback) +- RecordBackendGroupFallbacks(bg, be, fallback) +- } +- } +- +- var tracker ConsensusTracker +- if bgcfg.ConsensusHA { +- if bgcfg.ConsensusHARedis.URL == "" { +- log.Crit("must specify a consensus_ha_redis config when consensus_ha is true") +- } +- topts := make([]RedisConsensusTrackerOpt, 0) +- if bgcfg.ConsensusHALockPeriod > 0 { +- topts = append(topts, WithLockPeriod(time.Duration(bgcfg.ConsensusHALockPeriod))) +- } +- if bgcfg.ConsensusHAHeartbeatInterval > 0 { +- topts = append(topts, WithHeartbeatInterval(time.Duration(bgcfg.ConsensusHAHeartbeatInterval))) +- } +- consensusHARedisClient, err := NewRedisClient(bgcfg.ConsensusHARedis.URL) +- if err != nil { +- return nil, nil, err +- } +- ns := fmt.Sprintf("%s:%s", bgcfg.ConsensusHARedis.Namespace, bg.Name) +- tracker = NewRedisConsensusTracker(context.Background(), consensusHARedisClient, bg, ns, topts...) +- copts = append(copts, WithTracker(tracker)) +- } +- +- cp := NewConsensusPoller(bg, copts...) +- bg.Consensus = cp +- +- if bgcfg.ConsensusHA { +- tracker.(*RedisConsensusTracker).Init() +- } +- } +- } +- +- <-errTimer.C +- log.Info("started proxyd") +- +- shutdownFunc := func() { +- log.Info("shutting down proxyd") +- srv.Shutdown() +- log.Info("goodbye") +- } +- +- return srv, shutdownFunc, nil +-} +- +-func validateReceiptsTarget(val string) (string, error) { +- if val == "" { +- val = ReceiptsTargetDebugGetRawReceipts +- } +- switch val { +- case ReceiptsTargetDebugGetRawReceipts, +- ReceiptsTargetAlchemyGetTransactionReceipts, +- ReceiptsTargetEthGetTransactionReceipts, +- ReceiptsTargetParityGetTransactionReceipts: +- return val, nil +- default: +- return "", fmt.Errorf("invalid receipts target: %s", val) +- } +-} +- +-func secondsToDuration(seconds int) time.Duration { +- return time.Duration(seconds) * time.Second +-} +- +-func configureBackendTLS(cfg *BackendConfig) (*tls.Config, error) { +- if cfg.CAFile == "" { +- return nil, nil +- } +- +- tlsConfig, err := CreateTLSClient(cfg.CAFile) +- if err != nil { +- return nil, err +- } +- +- if cfg.ClientCertFile != "" && cfg.ClientKeyFile != "" { +- cert, err := ParseKeyPair(cfg.ClientCertFile, cfg.ClientKeyFile) +- if err != nil { +- return nil, err +- } +- tlsConfig.Certificates = []tls.Certificate{cert} +- } +- +- return tlsConfig, nil +-}
@@ -7502,13 +34893,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7518,45 +34909,60 @@
-
+15
-
-0
+
+0
+
-32
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/Freezable.json CELO/packages/contracts-bedrock/snapshots/storageLayout/Freezable.json -new file mode 100644 -index 0000000000000000000000000000000000000000..fb89bbc7e1ab3904137e39358de306a828c60dac ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/Freezable.json -@@ -0,0 +1,16 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "20", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "contract ICeloRegistry" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/reader.go CELO/proxyd/reader.go +deleted file mode 100644 +index b16301f1f0870494e824e874dfa91fe6a9d320d3..0000000000000000000000000000000000000000 +--- OP/proxyd/reader.go ++++ /dev/null +@@ -1,32 +0,0 @@ +-package proxyd +- +-import ( +- "errors" +- "io" +-) +- +-var ErrLimitReaderOverLimit = errors.New("over read limit") +- +-func LimitReader(r io.Reader, n int64) io.Reader { return &LimitedReader{r, n} } +- +-// A LimitedReader reads from R but limits the amount of +-// data returned to just N bytes. Each call to Read +-// updates N to reflect the new amount remaining. +-// Unlike the standard library version, Read returns +-// ErrLimitReaderOverLimit when N <= 0. +-type LimitedReader struct { +- R io.Reader // underlying reader +- N int64 // max bytes remaining +-} +- +-func (l *LimitedReader) Read(p []byte) (int, error) { +- if l.N <= 0 { +- return 0, ErrLimitReaderOverLimit +- } +- if int64(len(p)) > l.N { +- p = p[0:l.N] +- } +- n, err := l.R.Read(p) +- l.N -= int64(n) +- return n, err +-}
@@ -7565,13 +34971,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7581,66 +34987,71 @@
-
+36
-
-0
+
+0
+
-43
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/GoldToken.json CELO/packages/contracts-bedrock/snapshots/storageLayout/GoldToken.json -new file mode 100644 -index 0000000000000000000000000000000000000000..67b349856d86cdaab5dd67f9e9e413210d44ce63 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/GoldToken.json -@@ -0,0 +1,37 @@ -+[ -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 0, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 1, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "20", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "contract ICeloRegistry" -+ }, -+ { -+ "bytes": "32", -+ "label": "totalSupply_", -+ "offset": 0, -+ "slot": "2", -+ "type": "uint256" -+ }, -+ { -+ "bytes": "32", -+ "label": "allowed", -+ "offset": 0, -+ "slot": "3", -+ "type": "mapping(address => mapping(address => uint256))" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/reader_test.go CELO/proxyd/reader_test.go +deleted file mode 100644 +index 2ee23456edfc1d1ced04ea7dde0018063d679f48..0000000000000000000000000000000000000000 +--- OP/proxyd/reader_test.go ++++ /dev/null +@@ -1,43 +0,0 @@ +-package proxyd +- +-import ( +- "github.com/stretchr/testify/require" +- "io" +- "strings" +- "testing" +-) +- +-func TestLimitReader(t *testing.T) { +- data := "hellohellohellohello" +- r := LimitReader(strings.NewReader(data), 3) +- buf := make([]byte, 3) +- +- // Buffer reads OK +- n, err := r.Read(buf) +- require.NoError(t, err) +- require.Equal(t, 3, n) +- +- // Buffer is over limit +- n, err = r.Read(buf) +- require.Equal(t, ErrLimitReaderOverLimit, err) +- require.Equal(t, 0, n) +- +- // Buffer on initial read is over size +- buf = make([]byte, 16) +- r = LimitReader(strings.NewReader(data), 3) +- n, err = r.Read(buf) +- require.NoError(t, err) +- require.Equal(t, 3, n) +- +- // test with read all where the limit is less than the data +- r = LimitReader(strings.NewReader(data), 3) +- out, err := io.ReadAll(r) +- require.Equal(t, ErrLimitReaderOverLimit, err) +- require.Equal(t, "hel", string(out)) +- +- // test with read all where the limit is more than the data +- r = LimitReader(strings.NewReader(data), 21) +- out, err = io.ReadAll(r) +- require.NoError(t, err) +- require.Equal(t, data, string(out)) +-}
@@ -7649,13 +35060,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7665,38 +35076,50 @@
-
+8
-
-0
+
+0
+
-22
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/Initializable.json CELO/packages/contracts-bedrock/snapshots/storageLayout/Initializable.json -new file mode 100644 -index 0000000000000000000000000000000000000000..b29972a4de8eb134c79b8e19e36619de89bfeb4b ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/Initializable.json -@@ -0,0 +1,9 @@ -+[ -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 0, -+ "slot": "0", -+ "type": "bool" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/redis.go CELO/proxyd/redis.go +deleted file mode 100644 +index bd15f527f9b7d87ee5a6a433ba9f6dfac257a4a0..0000000000000000000000000000000000000000 +--- OP/proxyd/redis.go ++++ /dev/null +@@ -1,22 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "time" +- +- "github.com/redis/go-redis/v9" +-) +- +-func NewRedisClient(url string) (*redis.Client, error) { +- opts, err := redis.ParseURL(url) +- if err != nil { +- return nil, err +- } +- client := redis.NewClient(opts) +- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) +- defer cancel() +- if err := client.Ping(ctx).Err(); err != nil { +- return nil, wrapErr(err, "error connecting to redis") +- } +- return client, nil +-}
@@ -7705,13 +35128,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7721,59 +35144,338 @@
-
+29
-
-0
+
+0
+
-310
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/MentoFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/storageLayout/MentoFeeHandlerSeller.json -new file mode 100644 -index 0000000000000000000000000000000000000000..a66c44056e6d0350f83d4ee520bafeda4d5c2a58 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/MentoFeeHandlerSeller.json -@@ -0,0 +1,30 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 20, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "20", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "contract ICeloRegistry" -+ }, -+ { -+ "bytes": "32", -+ "label": "minimumReports", -+ "offset": 0, -+ "slot": "2", -+ "type": "mapping(address => uint256)" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/rewriter.go CELO/proxyd/rewriter.go +deleted file mode 100644 +index 605787eff3128121269f3d072e601503cd728eba..0000000000000000000000000000000000000000 +--- OP/proxyd/rewriter.go ++++ /dev/null +@@ -1,310 +0,0 @@ +-package proxyd +- +-import ( +- "encoding/json" +- "errors" +- +- "github.com/ethereum/go-ethereum/common/hexutil" +- "github.com/ethereum/go-ethereum/rpc" +-) +- +-type RewriteContext struct { +- latest hexutil.Uint64 +- safe hexutil.Uint64 +- finalized hexutil.Uint64 +- maxBlockRange uint64 +-} +- +-type RewriteResult uint8 +- +-const ( +- // RewriteNone means request should be forwarded as-is +- RewriteNone RewriteResult = iota +- +- // RewriteOverrideError means there was an error attempting to rewrite +- RewriteOverrideError +- +- // RewriteOverrideRequest means the modified request should be forwarded to the backend +- RewriteOverrideRequest +- +- // RewriteOverrideResponse means to skip calling the backend and serve the overridden response +- RewriteOverrideResponse +-) +- +-var ( +- ErrRewriteBlockOutOfRange = errors.New("block is out of range") +- ErrRewriteRangeTooLarge = errors.New("block range is too large") +-) +- +-// RewriteTags modifies the request and the response based on block tags +-func RewriteTags(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) { +- rw, err := RewriteResponse(rctx, req, res) +- if rw == RewriteOverrideResponse { +- return rw, err +- } +- return RewriteRequest(rctx, req, res) +-} +- +-// RewriteResponse modifies the response object to comply with the rewrite context +-// after the method has been called at the backend +-// RewriteResult informs the decision of the rewrite +-func RewriteResponse(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) { +- switch req.Method { +- case "eth_blockNumber": +- res.Result = rctx.latest +- return RewriteOverrideResponse, nil +- } +- return RewriteNone, nil +-} +- +-// RewriteRequest modifies the request object to comply with the rewrite context +-// before the method has been called at the backend +-// it returns false if nothing was changed +-func RewriteRequest(rctx RewriteContext, req *RPCReq, res *RPCRes) (RewriteResult, error) { +- switch req.Method { +- case "eth_getLogs", +- "eth_newFilter": +- return rewriteRange(rctx, req, res, 0) +- case "debug_getRawReceipts", "consensus_getReceipts": +- return rewriteParam(rctx, req, res, 0, true, false) +- case "eth_getBalance", +- "eth_getCode", +- "eth_getTransactionCount", +- "eth_call": +- return rewriteParam(rctx, req, res, 1, false, true) +- case "eth_getStorageAt", +- "eth_getProof": +- return rewriteParam(rctx, req, res, 2, false, true) +- case "eth_getBlockTransactionCountByNumber", +- "eth_getUncleCountByBlockNumber", +- "eth_getBlockByNumber", +- "eth_getTransactionByBlockNumberAndIndex", +- "eth_getUncleByBlockNumberAndIndex": +- return rewriteParam(rctx, req, res, 0, false, false) +- } +- return RewriteNone, nil +-} +- +-func rewriteParam(rctx RewriteContext, req *RPCReq, res *RPCRes, pos int, required bool, blockNrOrHash bool) (RewriteResult, error) { +- var p []interface{} +- err := json.Unmarshal(req.Params, &p) +- if err != nil { +- return RewriteOverrideError, err +- } +- +- // we assume latest if the param is missing, +- // and we don't rewrite if there is not enough params +- if len(p) == pos && !required { +- p = append(p, "latest") +- } else if len(p) <= pos { +- return RewriteNone, nil +- } +- +- // support for https://eips.ethereum.org/EIPS/eip-1898 +- var val interface{} +- var rw bool +- if blockNrOrHash { +- bnh, err := remarshalBlockNumberOrHash(p[pos]) +- if err != nil { +- // fallback to string +- s, ok := p[pos].(string) +- if ok { +- val, rw, err = rewriteTag(rctx, s) +- if err != nil { +- return RewriteOverrideError, err +- } +- } else { +- return RewriteOverrideError, errors.New("expected BlockNumberOrHash or string") +- } +- } else { +- val, rw, err = rewriteTagBlockNumberOrHash(rctx, bnh) +- if err != nil { +- return RewriteOverrideError, err +- } +- } +- } else { +- s, ok := p[pos].(string) +- if !ok { +- return RewriteOverrideError, errors.New("expected string") +- } +- +- val, rw, err = rewriteTag(rctx, s) +- if err != nil { +- return RewriteOverrideError, err +- } +- } +- +- if rw { +- p[pos] = val +- paramRaw, err := json.Marshal(p) +- if err != nil { +- return RewriteOverrideError, err +- } +- req.Params = paramRaw +- return RewriteOverrideRequest, nil +- } +- return RewriteNone, nil +-} +- +-func rewriteRange(rctx RewriteContext, req *RPCReq, res *RPCRes, pos int) (RewriteResult, error) { +- var p []map[string]interface{} +- err := json.Unmarshal(req.Params, &p) +- if err != nil { +- return RewriteOverrideError, err +- } +- +- // if either fromBlock or toBlock is defined, default the other to "latest" if unset +- _, hasFrom := p[pos]["fromBlock"] +- _, hasTo := p[pos]["toBlock"] +- if hasFrom && !hasTo { +- p[pos]["toBlock"] = "latest" +- } else if hasTo && !hasFrom { +- p[pos]["fromBlock"] = "latest" +- } +- +- modifiedFrom, err := rewriteTagMap(rctx, p[pos], "fromBlock") +- if err != nil { +- return RewriteOverrideError, err +- } +- +- modifiedTo, err := rewriteTagMap(rctx, p[pos], "toBlock") +- if err != nil { +- return RewriteOverrideError, err +- } +- +- if rctx.maxBlockRange > 0 && (hasFrom || hasTo) { +- from, err := blockNumber(p[pos], "fromBlock", uint64(rctx.latest)) +- if err != nil { +- return RewriteOverrideError, err +- } +- to, err := blockNumber(p[pos], "toBlock", uint64(rctx.latest)) +- if err != nil { +- return RewriteOverrideError, err +- } +- if to-from > rctx.maxBlockRange { +- return RewriteOverrideError, ErrRewriteRangeTooLarge +- } +- } +- +- // if any of the fields the request have been changed, re-marshal the params +- if modifiedFrom || modifiedTo { +- paramsRaw, err := json.Marshal(p) +- req.Params = paramsRaw +- if err != nil { +- return RewriteOverrideError, err +- } +- return RewriteOverrideRequest, nil +- } +- +- return RewriteNone, nil +-} +- +-func blockNumber(m map[string]interface{}, key string, latest uint64) (uint64, error) { +- current, ok := m[key].(string) +- if !ok { +- return 0, errors.New("expected string") +- } +- // the latest/safe/finalized tags are already replaced by rewriteTag +- if current == "earliest" { +- return 0, nil +- } +- if current == "pending" { +- return latest + 1, nil +- } +- return hexutil.DecodeUint64(current) +-} +- +-func rewriteTagMap(rctx RewriteContext, m map[string]interface{}, key string) (bool, error) { +- if m[key] == nil || m[key] == "" { +- return false, nil +- } +- +- current, ok := m[key].(string) +- if !ok { +- return false, errors.New("expected string") +- } +- +- val, rw, err := rewriteTag(rctx, current) +- if err != nil { +- return false, err +- } +- if rw { +- m[key] = val +- return true, nil +- } +- +- return false, nil +-} +- +-func remarshalBlockNumberOrHash(current interface{}) (*rpc.BlockNumberOrHash, error) { +- jv, err := json.Marshal(current) +- if err != nil { +- return nil, err +- } +- +- var bnh rpc.BlockNumberOrHash +- err = bnh.UnmarshalJSON(jv) +- if err != nil { +- return nil, err +- } +- +- return &bnh, nil +-} +- +-func rewriteTag(rctx RewriteContext, current string) (string, bool, error) { +- bnh, err := remarshalBlockNumberOrHash(current) +- if err != nil { +- return "", false, err +- } +- +- // this is a hash, not a block +- if bnh.BlockNumber == nil { +- return current, false, nil +- } +- +- switch *bnh.BlockNumber { +- case rpc.PendingBlockNumber, +- rpc.EarliestBlockNumber: +- return current, false, nil +- case rpc.FinalizedBlockNumber: +- return rctx.finalized.String(), true, nil +- case rpc.SafeBlockNumber: +- return rctx.safe.String(), true, nil +- case rpc.LatestBlockNumber: +- return rctx.latest.String(), true, nil +- default: +- if bnh.BlockNumber.Int64() > int64(rctx.latest) { +- return "", false, ErrRewriteBlockOutOfRange +- } +- } +- +- return current, false, nil +-} +- +-func rewriteTagBlockNumberOrHash(rctx RewriteContext, current *rpc.BlockNumberOrHash) (*rpc.BlockNumberOrHash, bool, error) { +- // this is a hash, not a block number +- if current.BlockNumber == nil { +- return current, false, nil +- } +- +- switch *current.BlockNumber { +- case rpc.PendingBlockNumber, +- rpc.EarliestBlockNumber: +- return current, false, nil +- case rpc.FinalizedBlockNumber: +- bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.finalized)) +- return &bn, true, nil +- case rpc.SafeBlockNumber: +- bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.safe)) +- return &bn, true, nil +- case rpc.LatestBlockNumber: +- bn := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(rctx.latest)) +- return &bn, true, nil +- default: +- if current.BlockNumber.Int64() > int64(rctx.latest) { +- return nil, false, ErrRewriteBlockOutOfRange +- } +- } +- +- return current, false, nil +-}
@@ -7782,13 +35484,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7798,59 +35500,745 @@
-
+29
-
-0
+
+0
+
-717
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/MockSortedOracles.json CELO/packages/contracts-bedrock/snapshots/storageLayout/MockSortedOracles.json -new file mode 100644 -index 0000000000000000000000000000000000000000..c44ef116af9505417a194688daf746a4c58cdcff ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/MockSortedOracles.json -@@ -0,0 +1,30 @@ -+[ -+ { -+ "bytes": "32", -+ "label": "numerators", -+ "offset": 0, -+ "slot": "0", -+ "type": "mapping(address => uint256)" -+ }, -+ { -+ "bytes": "32", -+ "label": "medianTimestamp", -+ "offset": 0, -+ "slot": "1", -+ "type": "mapping(address => uint256)" -+ }, -+ { -+ "bytes": "32", -+ "label": "numRates", -+ "offset": 0, -+ "slot": "2", -+ "type": "mapping(address => uint256)" -+ }, -+ { -+ "bytes": "32", -+ "label": "expired", -+ "offset": 0, -+ "slot": "3", -+ "type": "mapping(address => bool)" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/rewriter_test.go CELO/proxyd/rewriter_test.go +deleted file mode 100644 +index 1f0d80ba25c99f7d39988dd216a20e1aecd2c46b..0000000000000000000000000000000000000000 +--- OP/proxyd/rewriter_test.go ++++ /dev/null +@@ -1,717 +0,0 @@ +-package proxyd +- +-import ( +- "encoding/json" +- "strings" +- "testing" +- +- "github.com/ethereum/go-ethereum/common" +- "github.com/ethereum/go-ethereum/common/hexutil" +- "github.com/ethereum/go-ethereum/rpc" +- "github.com/stretchr/testify/require" +-) +- +-type args struct { +- rctx RewriteContext +- req *RPCReq +- res *RPCRes +-} +- +-type rewriteTest struct { +- name string +- args args +- expected RewriteResult +- expectedErr error +- check func(*testing.T, args) +-} +- +-func TestRewriteRequest(t *testing.T) { +- tests := []rewriteTest{ +- /* range scoped */ +- { +- name: "eth_getLogs fromBlock latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "latest"}})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"]) +- }, +- }, +- { +- name: "eth_getLogs fromBlock within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(55).String()}})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(55).String(), p[0]["fromBlock"]) +- }, +- }, +- { +- name: "eth_getLogs fromBlock out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(111).String()}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- { +- name: "eth_getLogs toBlock latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": "latest"}})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"]) +- }, +- }, +- { +- name: "eth_getLogs toBlock within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": hexutil.Uint64(55).String()}})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(55).String(), p[0]["toBlock"]) +- }, +- }, +- { +- name: "eth_getLogs toBlock out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": hexutil.Uint64(111).String()}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- { +- name: "eth_getLogs fromBlock, toBlock latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "latest", "toBlock": "latest"}})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"]) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"]) +- }, +- }, +- { +- name: "eth_getLogs fromBlock, toBlock within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(55).String(), "toBlock": hexutil.Uint64(77).String()}})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(55).String(), p[0]["fromBlock"]) +- require.Equal(t, hexutil.Uint64(77).String(), p[0]["toBlock"]) +- }, +- }, +- { +- name: "eth_getLogs fromBlock, toBlock out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(111).String(), "toBlock": hexutil.Uint64(222).String()}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- { +- name: "eth_getLogs fromBlock -> toBlock above max range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": hexutil.Uint64(20).String(), "toBlock": hexutil.Uint64(80).String()}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteRangeTooLarge, +- }, +- { +- name: "eth_getLogs earliest -> latest above max range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest", "toBlock": "latest"}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteRangeTooLarge, +- }, +- { +- name: "eth_getLogs earliest -> pending above max range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest", "toBlock": "pending"}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteRangeTooLarge, +- }, +- { +- name: "eth_getLogs earliest -> default above max range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"fromBlock": "earliest"}})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteRangeTooLarge, +- }, +- { +- name: "eth_getLogs default -> latest within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), maxBlockRange: 30}, +- req: &RPCReq{Method: "eth_getLogs", Params: mustMarshalJSON([]map[string]interface{}{{"toBlock": "latest"}})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []map[string]interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]["fromBlock"]) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]["toBlock"]) +- }, +- }, +- /* required parameter at pos 0 */ +- { +- name: "debug_getRawReceipts latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{"latest"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]) +- }, +- }, +- { +- name: "debug_getRawReceipts within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{hexutil.Uint64(55).String()})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(55).String(), p[0]) +- }, +- }, +- { +- name: "debug_getRawReceipts out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{hexutil.Uint64(111).String()})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- { +- name: "debug_getRawReceipts missing parameter", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{})}, +- res: nil, +- }, +- expected: RewriteNone, +- }, +- { +- name: "debug_getRawReceipts with block hash", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "debug_getRawReceipts", Params: mustMarshalJSON([]string{"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b"})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", p[0]) +- }, +- }, +- /* default block parameter */ +- { +- name: "eth_getCode omit block, should add", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 2, len(p)) +- require.Equal(t, "0x123", p[0]) +- bnh, err := remarshalBlockNumberOrHash(p[1]) +- require.Nil(t, err) +- require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) +- }, +- }, +- { +- name: "eth_getCode not enough params, should do nothing", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 0, len(p)) +- }, +- }, +- { +- name: "eth_getCode latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", "latest"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 2, len(p)) +- require.Equal(t, "0x123", p[0]) +- bnh, err := remarshalBlockNumberOrHash(p[1]) +- require.Nil(t, err) +- require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) +- }, +- }, +- { +- name: "eth_getCode within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", hexutil.Uint64(55).String()})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 2, len(p)) +- require.Equal(t, "0x123", p[0]) +- require.Equal(t, hexutil.Uint64(55).String(), p[1]) +- }, +- }, +- { +- name: "eth_getCode out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getCode", Params: mustMarshalJSON([]string{"0x123", hexutil.Uint64(111).String()})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- /* default block parameter, at position 2 */ +- { +- name: "eth_getStorageAt omit block, should add", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 3, len(p)) +- require.Equal(t, "0x123", p[0]) +- require.Equal(t, "5", p[1]) +- bnh, err := remarshalBlockNumberOrHash(p[2]) +- require.Nil(t, err) +- require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) +- }, +- }, +- { +- name: "eth_getStorageAt latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", "latest"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 3, len(p)) +- require.Equal(t, "0x123", p[0]) +- require.Equal(t, "5", p[1]) +- bnh, err := remarshalBlockNumberOrHash(p[2]) +- require.Nil(t, err) +- require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) +- }, +- }, +- { +- name: "eth_getStorageAt within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", hexutil.Uint64(55).String()})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 3, len(p)) +- require.Equal(t, "0x123", p[0]) +- require.Equal(t, "5", p[1]) +- require.Equal(t, hexutil.Uint64(55).String(), p[2]) +- }, +- }, +- { +- name: "eth_getStorageAt out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{"0x123", "5", hexutil.Uint64(111).String()})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- /* default block parameter, at position 0 */ +- { +- name: "eth_getBlockByNumber omit block, should add", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]) +- }, +- }, +- { +- name: "eth_getBlockByNumber latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"latest"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(100).String(), p[0]) +- }, +- }, +- { +- name: "eth_getBlockByNumber finalized", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), finalized: hexutil.Uint64(55)}, +- req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"finalized"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(55).String(), p[0]) +- }, +- }, +- { +- name: "eth_getBlockByNumber safe", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100), safe: hexutil.Uint64(50)}, +- req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{"safe"})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(50).String(), p[0]) +- }, +- }, +- { +- name: "eth_getBlockByNumber within range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{hexutil.Uint64(55).String()})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []string +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 1, len(p)) +- require.Equal(t, hexutil.Uint64(55).String(), p[0]) +- }, +- }, +- { +- name: "eth_getBlockByNumber out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getBlockByNumber", Params: mustMarshalJSON([]string{hexutil.Uint64(111).String()})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- { +- name: "eth_getStorageAt using rpc.BlockNumberOrHash", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]string{ +- "0xae851f927ee40de99aabb7461c00f9622ab91d60", +- "0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08", +- "0x1c4840bcb3de3ac403c0075b46c2c47d4396c5b624b6e1b2874ec04e8879b483"})}, +- res: nil, +- }, +- expected: RewriteNone, +- }, +- // eip1898 +- { +- name: "eth_getStorageAt using rpc.BlockNumberOrHash at genesis (blockNumber)", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ +- "0xae851f927ee40de99aabb7461c00f9622ab91d60", +- "10", +- map[string]interface{}{ +- "blockNumber": "0x0", +- }})}, +- res: nil, +- }, +- expected: RewriteNone, +- }, +- { +- name: "eth_getStorageAt using rpc.BlockNumberOrHash at genesis (hash)", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ +- "0xae851f927ee40de99aabb7461c00f9622ab91d60", +- "10", +- map[string]interface{}{ +- "blockHash": "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", +- "requireCanonical": true, +- }})}, +- res: nil, +- }, +- expected: RewriteNone, +- check: func(t *testing.T, args args) { +- var p []interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 3, len(p)) +- require.Equal(t, "0xae851f927ee40de99aabb7461c00f9622ab91d60", p[0]) +- require.Equal(t, "10", p[1]) +- bnh, err := remarshalBlockNumberOrHash(p[2]) +- require.Nil(t, err) +- require.Equal(t, rpc.BlockNumberOrHashWithHash(common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"), true), *bnh) +- require.True(t, bnh.RequireCanonical) +- }, +- }, +- { +- name: "eth_getStorageAt using rpc.BlockNumberOrHash at latest (blockNumber)", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ +- "0xae851f927ee40de99aabb7461c00f9622ab91d60", +- "10", +- map[string]interface{}{ +- "blockNumber": "latest", +- }})}, +- res: nil, +- }, +- expected: RewriteOverrideRequest, +- check: func(t *testing.T, args args) { +- var p []interface{} +- err := json.Unmarshal(args.req.Params, &p) +- require.Nil(t, err) +- require.Equal(t, 3, len(p)) +- require.Equal(t, "0xae851f927ee40de99aabb7461c00f9622ab91d60", p[0]) +- require.Equal(t, "10", p[1]) +- bnh, err := remarshalBlockNumberOrHash(p[2]) +- require.Nil(t, err) +- require.Equal(t, rpc.BlockNumberOrHashWithNumber(100), *bnh) +- }, +- }, +- { +- name: "eth_getStorageAt using rpc.BlockNumberOrHash out of range", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_getStorageAt", Params: mustMarshalJSON([]interface{}{ +- "0xae851f927ee40de99aabb7461c00f9622ab91d60", +- "10", +- map[string]interface{}{ +- "blockNumber": "0x111", +- }})}, +- res: nil, +- }, +- expected: RewriteOverrideError, +- expectedErr: ErrRewriteBlockOutOfRange, +- }, +- } +- +- // generalize tests for other methods with same interface and behavior +- tests = generalize(tests, "eth_getLogs", "eth_newFilter") +- tests = generalize(tests, "eth_getCode", "eth_getBalance") +- tests = generalize(tests, "eth_getCode", "eth_getTransactionCount") +- tests = generalize(tests, "eth_getCode", "eth_call") +- tests = generalize(tests, "eth_getBlockByNumber", "eth_getBlockTransactionCountByNumber") +- tests = generalize(tests, "eth_getBlockByNumber", "eth_getUncleCountByBlockNumber") +- tests = generalize(tests, "eth_getBlockByNumber", "eth_getTransactionByBlockNumberAndIndex") +- tests = generalize(tests, "eth_getBlockByNumber", "eth_getUncleByBlockNumberAndIndex") +- tests = generalize(tests, "eth_getStorageSlotAt", "eth_getProof") +- +- for _, tt := range tests { +- t.Run(tt.name, func(t *testing.T) { +- result, err := RewriteRequest(tt.args.rctx, tt.args.req, tt.args.res) +- if result != RewriteOverrideError { +- require.Nil(t, err) +- require.Equal(t, tt.expected, result) +- } else { +- require.Equal(t, tt.expectedErr, err) +- } +- if tt.check != nil { +- tt.check(t, tt.args) +- } +- }) +- } +-} +- +-func generalize(tests []rewriteTest, baseMethod string, generalizedMethod string) []rewriteTest { +- newCases := make([]rewriteTest, 0) +- for _, t := range tests { +- if t.args.req.Method == baseMethod { +- newName := strings.Replace(t.name, baseMethod, generalizedMethod, -1) +- var req *RPCReq +- var res *RPCRes +- +- if t.args.req != nil { +- req = &RPCReq{ +- JSONRPC: t.args.req.JSONRPC, +- Method: generalizedMethod, +- Params: t.args.req.Params, +- ID: t.args.req.ID, +- } +- } +- +- if t.args.res != nil { +- res = &RPCRes{ +- JSONRPC: t.args.res.JSONRPC, +- Result: t.args.res.Result, +- Error: t.args.res.Error, +- ID: t.args.res.ID, +- } +- } +- newCases = append(newCases, rewriteTest{ +- name: newName, +- args: args{ +- rctx: t.args.rctx, +- req: req, +- res: res, +- }, +- expected: t.expected, +- expectedErr: t.expectedErr, +- check: t.check, +- }) +- } +- } +- return append(tests, newCases...) +-} +- +-func TestRewriteResponse(t *testing.T) { +- type args struct { +- rctx RewriteContext +- req *RPCReq +- res *RPCRes +- } +- tests := []struct { +- name string +- args args +- expected RewriteResult +- check func(*testing.T, args) +- }{ +- { +- name: "eth_blockNumber latest", +- args: args{ +- rctx: RewriteContext{latest: hexutil.Uint64(100)}, +- req: &RPCReq{Method: "eth_blockNumber"}, +- res: &RPCRes{Result: hexutil.Uint64(200)}, +- }, +- expected: RewriteOverrideResponse, +- check: func(t *testing.T, args args) { +- require.Equal(t, args.res.Result, hexutil.Uint64(100)) +- }, +- }, +- } +- for _, tt := range tests { +- t.Run(tt.name, func(t *testing.T) { +- result, err := RewriteResponse(tt.args.rctx, tt.args.req, tt.args.res) +- require.Nil(t, err) +- require.Equal(t, tt.expected, result) +- if tt.check != nil { +- tt.check(t, tt.args) +- } +- }) +- } +-}
@@ -7859,13 +36247,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7875,101 +36263,198 @@
-
+71
-
-0
+
+0
+
-170
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/SortedOracles.json CELO/packages/contracts-bedrock/snapshots/storageLayout/SortedOracles.json -new file mode 100644 -index 0000000000000000000000000000000000000000..e1e5e1736aff6530fc2e9dbeeecc4a4c9a316365 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/SortedOracles.json -@@ -0,0 +1,72 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 20, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "32", -+ "label": "rates", -+ "offset": 0, -+ "slot": "1", -+ "type": "mapping(address => struct SortedLinkedListWithMedian.List)" -+ }, -+ { -+ "bytes": "32", -+ "label": "timestamps", -+ "offset": 0, -+ "slot": "2", -+ "type": "mapping(address => struct SortedLinkedListWithMedian.List)" -+ }, -+ { -+ "bytes": "32", -+ "label": "isOracle", -+ "offset": 0, -+ "slot": "3", -+ "type": "mapping(address => mapping(address => bool))" -+ }, -+ { -+ "bytes": "32", -+ "label": "oracles", -+ "offset": 0, -+ "slot": "4", -+ "type": "mapping(address => address[])" -+ }, -+ { -+ "bytes": "32", -+ "label": "reportExpirySeconds", -+ "offset": 0, -+ "slot": "5", -+ "type": "uint256" -+ }, -+ { -+ "bytes": "32", -+ "label": "tokenReportExpirySeconds", -+ "offset": 0, -+ "slot": "6", -+ "type": "mapping(address => uint256)" -+ }, -+ { -+ "bytes": "20", -+ "label": "breakerBox", -+ "offset": 0, -+ "slot": "7", -+ "type": "contract IBreakerBox" -+ }, -+ { -+ "bytes": "32", -+ "label": "equivalentTokens", -+ "offset": 0, -+ "slot": "8", -+ "type": "mapping(address => struct SortedOracles.EquivalentToken)" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/rpc.go CELO/proxyd/rpc.go +deleted file mode 100644 +index 902e26699b4977f1d75b1d59818ee4d34e7d02ce..0000000000000000000000000000000000000000 +--- OP/proxyd/rpc.go ++++ /dev/null +@@ -1,170 +0,0 @@ +-package proxyd +- +-import ( +- "encoding/json" +- "io" +- "strings" +-) +- +-type RPCReq struct { +- JSONRPC string `json:"jsonrpc"` +- Method string `json:"method"` +- Params json.RawMessage `json:"params"` +- ID json.RawMessage `json:"id"` +-} +- +-type RPCRes struct { +- JSONRPC string +- Result interface{} +- Error *RPCErr +- ID json.RawMessage +-} +- +-type rpcResJSON struct { +- JSONRPC string `json:"jsonrpc"` +- Result interface{} `json:"result,omitempty"` +- Error *RPCErr `json:"error,omitempty"` +- ID json.RawMessage `json:"id"` +-} +- +-type nullResultRPCRes struct { +- JSONRPC string `json:"jsonrpc"` +- Result interface{} `json:"result"` +- ID json.RawMessage `json:"id"` +-} +- +-func (r *RPCRes) IsError() bool { +- return r.Error != nil +-} +- +-func (r *RPCRes) MarshalJSON() ([]byte, error) { +- if r.Result == nil && r.Error == nil { +- return json.Marshal(&nullResultRPCRes{ +- JSONRPC: r.JSONRPC, +- Result: nil, +- ID: r.ID, +- }) +- } +- +- return json.Marshal(&rpcResJSON{ +- JSONRPC: r.JSONRPC, +- Result: r.Result, +- Error: r.Error, +- ID: r.ID, +- }) +-} +- +-type RPCErr struct { +- Code int `json:"code"` +- Message string `json:"message"` +- Data string `json:"data,omitempty"` +- HTTPErrorCode int `json:"-"` +-} +- +-func (r *RPCErr) Error() string { +- return r.Message +-} +- +-func (r *RPCErr) Clone() *RPCErr { +- return &RPCErr{ +- Code: r.Code, +- Message: r.Message, +- HTTPErrorCode: r.HTTPErrorCode, +- } +-} +- +-func IsValidID(id json.RawMessage) bool { +- // handle the case where the ID is a string +- if strings.HasPrefix(string(id), "\"") && strings.HasSuffix(string(id), "\"") { +- return len(id) > 2 +- } +- +- // technically allows a boolean/null ID, but so does Geth +- // https://github.com/ethereum/go-ethereum/blob/master/rpc/json.go#L72 +- return len(id) > 0 && id[0] != '{' && id[0] != '[' +-} +- +-func ParseRPCReq(body []byte) (*RPCReq, error) { +- req := new(RPCReq) +- if err := json.Unmarshal(body, req); err != nil { +- return nil, ErrParseErr +- } +- +- return req, nil +-} +- +-func ParseBatchRPCReq(body []byte) ([]json.RawMessage, error) { +- batch := make([]json.RawMessage, 0) +- if err := json.Unmarshal(body, &batch); err != nil { +- return nil, err +- } +- +- return batch, nil +-} +- +-func ParseRPCRes(r io.Reader) (*RPCRes, error) { +- body, err := io.ReadAll(r) +- if err != nil { +- return nil, wrapErr(err, "error reading RPC response") +- } +- +- res := new(RPCRes) +- if err := json.Unmarshal(body, res); err != nil { +- return nil, wrapErr(err, "error unmarshalling RPC response") +- } +- +- return res, nil +-} +- +-func ValidateRPCReq(req *RPCReq) error { +- if req.JSONRPC != JSONRPCVersion { +- return ErrInvalidRequest("invalid JSON-RPC version") +- } +- +- if req.Method == "" { +- return ErrInvalidRequest("no method specified") +- } +- +- if !IsValidID(req.ID) { +- return ErrInvalidRequest("invalid ID") +- } +- +- return nil +-} +- +-func NewRPCErrorRes(id json.RawMessage, err error) *RPCRes { +- var rpcErr *RPCErr +- if rr, ok := err.(*RPCErr); ok { +- rpcErr = rr +- } else { +- rpcErr = &RPCErr{ +- Code: JSONRPCErrorInternal, +- Message: err.Error(), +- } +- } +- +- return &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Error: rpcErr, +- ID: id, +- } +-} +- +-func NewRPCRes(id json.RawMessage, result interface{}) *RPCRes { +- return &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Result: result, +- ID: id, +- } +-} +- +-func IsBatch(raw []byte) bool { +- for _, c := range raw { +- // skip insignificant whitespace (http://www.ietf.org/rfc/rfc4627.txt) +- if c == 0x20 || c == 0x09 || c == 0x0a || c == 0x0d { +- continue +- } +- return c == '[' +- } +- return false +-}
@@ -7978,13 +36463,13 @@
- (new) + OP
- CELO + (deleted)
@@ -7994,171 +36479,1040 @@
-
+141
-
-0
+
+0
+
-89
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/StableTokenV2.json CELO/packages/contracts-bedrock/snapshots/storageLayout/StableTokenV2.json -new file mode 100644 -index 0000000000000000000000000000000000000000..eea3cafe6e9025cb532486b1e9ff84f4246310ec ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/StableTokenV2.json -@@ -0,0 +1,142 @@ -+[ -+ { -+ "bytes": "1", -+ "label": "_initialized", -+ "offset": 0, -+ "slot": "0", -+ "type": "uint8" -+ }, -+ { -+ "bytes": "1", -+ "label": "_initializing", -+ "offset": 1, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "1600", -+ "label": "__gap", -+ "offset": 0, -+ "slot": "1", -+ "type": "uint256[50]" -+ }, -+ { -+ "bytes": "32", -+ "label": "_balances", -+ "offset": 0, -+ "slot": "51", -+ "type": "mapping(address => uint256)" -+ }, -+ { -+ "bytes": "32", -+ "label": "_allowances", -+ "offset": 0, -+ "slot": "52", -+ "type": "mapping(address => mapping(address => uint256))" -+ }, -+ { -+ "bytes": "32", -+ "label": "_totalSupply", -+ "offset": 0, -+ "slot": "53", -+ "type": "uint256" -+ }, -+ { -+ "bytes": "32", -+ "label": "_name", -+ "offset": 0, -+ "slot": "54", -+ "type": "string" -+ }, -+ { -+ "bytes": "32", -+ "label": "_symbol", -+ "offset": 0, -+ "slot": "55", -+ "type": "string" -+ }, -+ { -+ "bytes": "1440", -+ "label": "__gap", -+ "offset": 0, -+ "slot": "56", -+ "type": "uint256[45]" -+ }, -+ { -+ "bytes": "32", -+ "label": "_HASHED_NAME", -+ "offset": 0, -+ "slot": "101", -+ "type": "bytes32" -+ }, -+ { -+ "bytes": "32", -+ "label": "_HASHED_VERSION", -+ "offset": 0, -+ "slot": "102", -+ "type": "bytes32" -+ }, -+ { -+ "bytes": "1600", -+ "label": "__gap", -+ "offset": 0, -+ "slot": "103", -+ "type": "uint256[50]" -+ }, -+ { -+ "bytes": "32", -+ "label": "_nonces", -+ "offset": 0, -+ "slot": "153", -+ "type": "mapping(address => struct CountersUpgradeable.Counter)" -+ }, -+ { -+ "bytes": "32", -+ "label": "_PERMIT_TYPEHASH_DEPRECATED_SLOT", -+ "offset": 0, -+ "slot": "154", -+ "type": "bytes32" -+ }, -+ { -+ "bytes": "1568", -+ "label": "__gap", -+ "offset": 0, -+ "slot": "155", -+ "type": "uint256[49]" -+ }, -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "204", -+ "type": "address" -+ }, -+ { -+ "bytes": "1568", -+ "label": "__gap", -+ "offset": 0, -+ "slot": "205", -+ "type": "uint256[49]" -+ }, -+ { -+ "bytes": "20", -+ "label": "validators", -+ "offset": 0, -+ "slot": "254", -+ "type": "address" -+ }, -+ { -+ "bytes": "20", -+ "label": "broker", -+ "offset": 0, -+ "slot": "255", -+ "type": "address" -+ }, -+ { -+ "bytes": "20", -+ "label": "exchange", -+ "offset": 0, -+ "slot": "256", -+ "type": "address" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/rpc_test.go CELO/proxyd/rpc_test.go +deleted file mode 100644 +index e30fe9361a6b2f8943450917dd15024438a1904c..0000000000000000000000000000000000000000 +--- OP/proxyd/rpc_test.go ++++ /dev/null +@@ -1,89 +0,0 @@ +-package proxyd +- +-import ( +- "encoding/json" +- "testing" +- +- "github.com/stretchr/testify/require" +-) +- +-func TestRPCResJSON(t *testing.T) { +- tests := []struct { +- name string +- in *RPCRes +- out string +- }{ +- { +- "string result", +- &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Result: "foobar", +- ID: []byte("123"), +- }, +- `{"jsonrpc":"2.0","result":"foobar","id":123}`, +- }, +- { +- "object result", +- &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Result: struct { +- Str string `json:"str"` +- }{ +- "test", +- }, +- ID: []byte("123"), +- }, +- `{"jsonrpc":"2.0","result":{"str":"test"},"id":123}`, +- }, +- { +- "nil result", +- &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Result: nil, +- ID: []byte("123"), +- }, +- `{"jsonrpc":"2.0","result":null,"id":123}`, +- }, +- { +- "error result without data", +- &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Error: &RPCErr{ +- Code: 1234, +- Message: "test err", +- }, +- ID: []byte("123"), +- }, +- `{"jsonrpc":"2.0","error":{"code":1234,"message":"test err"},"id":123}`, +- }, +- { +- "error result with data", +- &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Error: &RPCErr{ +- Code: 1234, +- Message: "test err", +- Data: "revert", +- }, +- ID: []byte("123"), +- }, +- `{"jsonrpc":"2.0","error":{"code":1234,"message":"test err","data":"revert"},"id":123}`, +- }, +- { +- "string ID", +- &RPCRes{ +- JSONRPC: JSONRPCVersion, +- Result: "foobar", +- ID: []byte("\"123\""), +- }, +- `{"jsonrpc":"2.0","result":"foobar","id":"123"}`, +- }, +- } +- for _, tt := range tests { +- t.Run(tt.name, func(t *testing.T) { +- out, err := json.Marshal(tt.in) +- require.NoError(t, err) +- require.Equal(t, tt.out, string(out)) +- }) +- } +-}
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-877
+ +
+ +
+
+
diff --git OP/proxyd/server.go CELO/proxyd/server.go +deleted file mode 100644 +index 527c2e6c1ff8f42afeda8309dfbaf0dc09dba80e..0000000000000000000000000000000000000000 +--- OP/proxyd/server.go ++++ /dev/null +@@ -1,877 +0,0 @@ +-package proxyd +- +-import ( +- "context" +- "crypto/rand" +- "encoding/hex" +- "encoding/json" +- "errors" +- "fmt" +- "io" +- "math" +- "math/big" +- "net/http" +- "regexp" +- "strconv" +- "strings" +- "sync" +- "time" +- +- "github.com/ethereum/go-ethereum/common/hexutil" +- "github.com/ethereum/go-ethereum/core" +- "github.com/ethereum/go-ethereum/core/txpool" +- "github.com/ethereum/go-ethereum/core/types" +- "github.com/ethereum/go-ethereum/log" +- "github.com/gorilla/mux" +- "github.com/gorilla/websocket" +- "github.com/prometheus/client_golang/prometheus" +- "github.com/redis/go-redis/v9" +- "github.com/rs/cors" +- "github.com/syndtr/goleveldb/leveldb/opt" +-) +- +-const ( +- ContextKeyAuth = "authorization" +- ContextKeyReqID = "req_id" +- ContextKeyXForwardedFor = "x_forwarded_for" +- DefaultMaxBatchRPCCallsLimit = 100 +- MaxBatchRPCCallsHardLimit = 1000 +- cacheStatusHdr = "X-Proxyd-Cache-Status" +- defaultRPCTimeout = 10 * time.Second +- defaultBodySizeLimit = 256 * opt.KiB +- defaultWSHandshakeTimeout = 10 * time.Second +- defaultWSReadTimeout = 2 * time.Minute +- defaultWSWriteTimeout = 10 * time.Second +- defaultCacheTtl = 1 * time.Hour +- maxRequestBodyLogLen = 2000 +- defaultMaxUpstreamBatchSize = 10 +- defaultRateLimitHeader = "X-Forwarded-For" +-) +- +-var emptyArrayResponse = json.RawMessage("[]") +- +-type Server struct { +- BackendGroups map[string]*BackendGroup +- wsBackendGroup *BackendGroup +- wsMethodWhitelist *StringSet +- rpcMethodMappings map[string]string +- maxBodySize int64 +- enableRequestLog bool +- maxRequestBodyLogLen int +- authenticatedPaths map[string]string +- timeout time.Duration +- maxUpstreamBatchSize int +- maxBatchSize int +- enableServedByHeader bool +- upgrader *websocket.Upgrader +- mainLim FrontendRateLimiter +- overrideLims map[string]FrontendRateLimiter +- senderLim FrontendRateLimiter +- allowedChainIds []*big.Int +- limExemptOrigins []*regexp.Regexp +- limExemptUserAgents []*regexp.Regexp +- globallyLimitedMethods map[string]bool +- rpcServer *http.Server +- wsServer *http.Server +- cache RPCCache +- srvMu sync.Mutex +- rateLimitHeader string +-} +- +-type limiterFunc func(method string) bool +- +-func NewServer( +- backendGroups map[string]*BackendGroup, +- wsBackendGroup *BackendGroup, +- wsMethodWhitelist *StringSet, +- rpcMethodMappings map[string]string, +- maxBodySize int64, +- authenticatedPaths map[string]string, +- timeout time.Duration, +- maxUpstreamBatchSize int, +- enableServedByHeader bool, +- cache RPCCache, +- rateLimitConfig RateLimitConfig, +- senderRateLimitConfig SenderRateLimitConfig, +- enableRequestLog bool, +- maxRequestBodyLogLen int, +- maxBatchSize int, +- redisClient *redis.Client, +-) (*Server, error) { +- if cache == nil { +- cache = &NoopRPCCache{} +- } +- +- if maxBodySize == 0 { +- maxBodySize = defaultBodySizeLimit +- } +- +- if timeout == 0 { +- timeout = defaultRPCTimeout +- } +- +- if maxUpstreamBatchSize == 0 { +- maxUpstreamBatchSize = defaultMaxUpstreamBatchSize +- } +- +- if maxBatchSize == 0 { +- maxBatchSize = DefaultMaxBatchRPCCallsLimit +- } +- +- if maxBatchSize > MaxBatchRPCCallsHardLimit { +- maxBatchSize = MaxBatchRPCCallsHardLimit +- } +- +- limiterFactory := func(dur time.Duration, max int, prefix string) FrontendRateLimiter { +- if rateLimitConfig.UseRedis { +- return NewRedisFrontendRateLimiter(redisClient, dur, max, prefix) +- } +- +- return NewMemoryFrontendRateLimit(dur, max) +- } +- +- var mainLim FrontendRateLimiter +- limExemptOrigins := make([]*regexp.Regexp, 0) +- limExemptUserAgents := make([]*regexp.Regexp, 0) +- if rateLimitConfig.BaseRate > 0 { +- mainLim = limiterFactory(time.Duration(rateLimitConfig.BaseInterval), rateLimitConfig.BaseRate, "main") +- for _, origin := range rateLimitConfig.ExemptOrigins { +- pattern, err := regexp.Compile(origin) +- if err != nil { +- return nil, err +- } +- limExemptOrigins = append(limExemptOrigins, pattern) +- } +- for _, agent := range rateLimitConfig.ExemptUserAgents { +- pattern, err := regexp.Compile(agent) +- if err != nil { +- return nil, err +- } +- limExemptUserAgents = append(limExemptUserAgents, pattern) +- } +- } else { +- mainLim = NoopFrontendRateLimiter +- } +- +- overrideLims := make(map[string]FrontendRateLimiter) +- globalMethodLims := make(map[string]bool) +- for method, override := range rateLimitConfig.MethodOverrides { +- overrideLims[method] = limiterFactory(time.Duration(override.Interval), override.Limit, method) +- +- if override.Global { +- globalMethodLims[method] = true +- } +- } +- var senderLim FrontendRateLimiter +- if senderRateLimitConfig.Enabled { +- senderLim = limiterFactory(time.Duration(senderRateLimitConfig.Interval), senderRateLimitConfig.Limit, "senders") +- } +- +- rateLimitHeader := defaultRateLimitHeader +- if rateLimitConfig.IPHeaderOverride != "" { +- rateLimitHeader = rateLimitConfig.IPHeaderOverride +- } +- +- return &Server{ +- BackendGroups: backendGroups, +- wsBackendGroup: wsBackendGroup, +- wsMethodWhitelist: wsMethodWhitelist, +- rpcMethodMappings: rpcMethodMappings, +- maxBodySize: maxBodySize, +- authenticatedPaths: authenticatedPaths, +- timeout: timeout, +- maxUpstreamBatchSize: maxUpstreamBatchSize, +- enableServedByHeader: enableServedByHeader, +- cache: cache, +- enableRequestLog: enableRequestLog, +- maxRequestBodyLogLen: maxRequestBodyLogLen, +- maxBatchSize: maxBatchSize, +- upgrader: &websocket.Upgrader{ +- HandshakeTimeout: defaultWSHandshakeTimeout, +- }, +- mainLim: mainLim, +- overrideLims: overrideLims, +- globallyLimitedMethods: globalMethodLims, +- senderLim: senderLim, +- allowedChainIds: senderRateLimitConfig.AllowedChainIds, +- limExemptOrigins: limExemptOrigins, +- limExemptUserAgents: limExemptUserAgents, +- rateLimitHeader: rateLimitHeader, +- }, nil +-} +- +-func (s *Server) RPCListenAndServe(host string, port int) error { +- s.srvMu.Lock() +- hdlr := mux.NewRouter() +- hdlr.HandleFunc("/healthz", s.HandleHealthz).Methods("GET") +- hdlr.HandleFunc("/", s.HandleRPC).Methods("POST") +- hdlr.HandleFunc("/{authorization}", s.HandleRPC).Methods("POST") +- c := cors.New(cors.Options{ +- AllowedOrigins: []string{"*"}, +- }) +- addr := fmt.Sprintf("%s:%d", host, port) +- s.rpcServer = &http.Server{ +- Handler: instrumentedHdlr(c.Handler(hdlr)), +- Addr: addr, +- } +- log.Info("starting HTTP server", "addr", addr) +- s.srvMu.Unlock() +- return s.rpcServer.ListenAndServe() +-} +- +-func (s *Server) WSListenAndServe(host string, port int) error { +- s.srvMu.Lock() +- hdlr := mux.NewRouter() +- hdlr.HandleFunc("/", s.HandleWS) +- hdlr.HandleFunc("/{authorization}", s.HandleWS) +- c := cors.New(cors.Options{ +- AllowedOrigins: []string{"*"}, +- }) +- addr := fmt.Sprintf("%s:%d", host, port) +- s.wsServer = &http.Server{ +- Handler: instrumentedHdlr(c.Handler(hdlr)), +- Addr: addr, +- } +- log.Info("starting WS server", "addr", addr) +- s.srvMu.Unlock() +- return s.wsServer.ListenAndServe() +-} +- +-func (s *Server) Shutdown() { +- s.srvMu.Lock() +- defer s.srvMu.Unlock() +- if s.rpcServer != nil { +- _ = s.rpcServer.Shutdown(context.Background()) +- } +- if s.wsServer != nil { +- _ = s.wsServer.Shutdown(context.Background()) +- } +- for _, bg := range s.BackendGroups { +- bg.Shutdown() +- } +-} +- +-func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) { +- _, _ = w.Write([]byte("OK")) +-} +- +-func (s *Server) HandleRPC(w http.ResponseWriter, r *http.Request) { +- ctx := s.populateContext(w, r) +- if ctx == nil { +- return +- } +- var cancel context.CancelFunc +- ctx, cancel = context.WithTimeout(ctx, s.timeout) +- defer cancel() +- +- origin := r.Header.Get("Origin") +- userAgent := r.Header.Get("User-Agent") +- // Use XFF in context since it will automatically be replaced by the remote IP +- xff := stripXFF(GetXForwardedFor(ctx)) +- isUnlimitedOrigin := s.isUnlimitedOrigin(origin) +- isUnlimitedUserAgent := s.isUnlimitedUserAgent(userAgent) +- +- if xff == "" { +- writeRPCError(ctx, w, nil, ErrInvalidRequest("request does not include a remote IP")) +- return +- } +- +- isLimited := func(method string) bool { +- isGloballyLimitedMethod := s.isGlobalLimit(method) +- if !isGloballyLimitedMethod && (isUnlimitedOrigin || isUnlimitedUserAgent) { +- return false +- } +- +- var lim FrontendRateLimiter +- if method == "" { +- lim = s.mainLim +- } else { +- lim = s.overrideLims[method] +- } +- +- if lim == nil { +- return false +- } +- +- ok, err := lim.Take(ctx, xff) +- if err != nil { +- log.Warn("error taking rate limit", "err", err) +- return true +- } +- return !ok +- } +- +- if isLimited("") { +- RecordRPCError(ctx, BackendProxyd, "unknown", ErrOverRateLimit) +- log.Warn( +- "rate limited request", +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- "user_agent", userAgent, +- "origin", origin, +- "remote_ip", xff, +- ) +- writeRPCError(ctx, w, nil, ErrOverRateLimit) +- return +- } +- +- log.Info( +- "received RPC request", +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- "user_agent", userAgent, +- "origin", origin, +- "remote_ip", xff, +- ) +- +- body, err := io.ReadAll(LimitReader(r.Body, s.maxBodySize)) +- if errors.Is(err, ErrLimitReaderOverLimit) { +- log.Error("request body too large", "req_id", GetReqID(ctx)) +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrRequestBodyTooLarge) +- writeRPCError(ctx, w, nil, ErrRequestBodyTooLarge) +- return +- } +- if err != nil { +- log.Error("error reading request body", "err", err) +- writeRPCError(ctx, w, nil, ErrInternal) +- return +- } +- RecordRequestPayloadSize(ctx, len(body)) +- +- if s.enableRequestLog { +- log.Info("Raw RPC request", +- "body", truncate(string(body), s.maxRequestBodyLogLen), +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- ) +- } +- +- if IsBatch(body) { +- reqs, err := ParseBatchRPCReq(body) +- if err != nil { +- log.Error("error parsing batch RPC request", "err", err) +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) +- writeRPCError(ctx, w, nil, ErrParseErr) +- return +- } +- +- RecordBatchSize(len(reqs)) +- +- if len(reqs) > s.maxBatchSize { +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrTooManyBatchRequests) +- writeRPCError(ctx, w, nil, ErrTooManyBatchRequests) +- return +- } +- +- if len(reqs) == 0 { +- writeRPCError(ctx, w, nil, ErrInvalidRequest("must specify at least one batch call")) +- return +- } +- +- batchRes, batchContainsCached, servedBy, err := s.handleBatchRPC(ctx, reqs, isLimited, true) +- if err == context.DeadlineExceeded { +- writeRPCError(ctx, w, nil, ErrGatewayTimeout) +- return +- } +- if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || +- errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) { +- writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error())) +- return +- } +- if err != nil { +- writeRPCError(ctx, w, nil, ErrInternal) +- return +- } +- if s.enableServedByHeader { +- w.Header().Set("x-served-by", servedBy) +- } +- setCacheHeader(w, batchContainsCached) +- writeBatchRPCRes(ctx, w, batchRes) +- return +- } +- +- rawBody := json.RawMessage(body) +- backendRes, cached, servedBy, err := s.handleBatchRPC(ctx, []json.RawMessage{rawBody}, isLimited, false) +- if err != nil { +- if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || +- errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) { +- writeRPCError(ctx, w, nil, ErrInvalidRequest(err.Error())) +- return +- } +- writeRPCError(ctx, w, nil, ErrInternal) +- return +- } +- if s.enableServedByHeader { +- w.Header().Set("x-served-by", servedBy) +- } +- setCacheHeader(w, cached) +- writeRPCRes(ctx, w, backendRes[0]) +-} +- +-func (s *Server) handleBatchRPC(ctx context.Context, reqs []json.RawMessage, isLimited limiterFunc, isBatch bool) ([]*RPCRes, bool, string, error) { +- // A request set is transformed into groups of batches. +- // Each batch group maps to a forwarded JSON-RPC batch request (subject to maxUpstreamBatchSize constraints) +- // A groupID is used to decouple Requests that have duplicate ID so they're not part of the same batch that's +- // forwarded to the backend. This is done to ensure that the order of JSON-RPC Responses match the Request order +- // as the backend MAY return Responses out of order. +- // NOTE: Duplicate request ids induces 1-sized JSON-RPC batches +- type batchGroup struct { +- groupID int +- backendGroup string +- } +- +- responses := make([]*RPCRes, len(reqs)) +- batches := make(map[batchGroup][]batchElem) +- ids := make(map[string]int, len(reqs)) +- +- for i := range reqs { +- parsedReq, err := ParseRPCReq(reqs[i]) +- if err != nil { +- log.Info("error parsing RPC call", "source", "rpc", "err", err) +- responses[i] = NewRPCErrorRes(nil, err) +- continue +- } +- +- // Simple health check +- if len(reqs) == 1 && parsedReq.Method == proxydHealthzMethod { +- res := &RPCRes{ +- ID: parsedReq.ID, +- JSONRPC: JSONRPCVersion, +- Result: "OK", +- } +- return []*RPCRes{res}, false, "", nil +- } +- +- if err := ValidateRPCReq(parsedReq); err != nil { +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) +- responses[i] = NewRPCErrorRes(nil, err) +- continue +- } +- +- if parsedReq.Method == "eth_accounts" { +- RecordRPCForward(ctx, BackendProxyd, "eth_accounts", RPCRequestSourceHTTP) +- responses[i] = NewRPCRes(parsedReq.ID, emptyArrayResponse) +- continue +- } +- +- group := s.rpcMethodMappings[parsedReq.Method] +- if group == "" { +- // use unknown below to prevent DOS vector that fills up memory +- // with arbitrary method names. +- log.Info( +- "blocked request for non-whitelisted method", +- "source", "rpc", +- "req_id", GetReqID(ctx), +- "method", parsedReq.Method, +- ) +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, ErrMethodNotWhitelisted) +- responses[i] = NewRPCErrorRes(parsedReq.ID, ErrMethodNotWhitelisted) +- continue +- } +- +- // Take rate limit for specific methods. +- // NOTE: eventually, this should apply to all batch requests. However, +- // since we don't have data right now on the size of each batch, we +- // only apply this to the methods that have an additional rate limit. +- if _, ok := s.overrideLims[parsedReq.Method]; ok && isLimited(parsedReq.Method) { +- log.Info( +- "rate limited specific RPC", +- "source", "rpc", +- "req_id", GetReqID(ctx), +- "method", parsedReq.Method, +- ) +- RecordRPCError(ctx, BackendProxyd, parsedReq.Method, ErrOverRateLimit) +- responses[i] = NewRPCErrorRes(parsedReq.ID, ErrOverRateLimit) +- continue +- } +- +- // Apply a sender-based rate limit if it is enabled. Note that sender-based rate +- // limits apply regardless of origin or user-agent. As such, they don't use the +- // isLimited method. +- if parsedReq.Method == "eth_sendRawTransaction" && s.senderLim != nil { +- if err := s.rateLimitSender(ctx, parsedReq); err != nil { +- RecordRPCError(ctx, BackendProxyd, parsedReq.Method, err) +- responses[i] = NewRPCErrorRes(parsedReq.ID, err) +- continue +- } +- } +- +- id := string(parsedReq.ID) +- // If this is a duplicate Request ID, move the Request to a new batchGroup +- ids[id]++ +- batchGroupID := ids[id] +- batchGroup := batchGroup{groupID: batchGroupID, backendGroup: group} +- batches[batchGroup] = append(batches[batchGroup], batchElem{parsedReq, i}) +- } +- +- servedBy := make(map[string]bool, 0) +- var cached bool +- for group, batch := range batches { +- var cacheMisses []batchElem +- +- for _, req := range batch { +- backendRes, _ := s.cache.GetRPC(ctx, req.Req) +- if backendRes != nil { +- responses[req.Index] = backendRes +- cached = true +- } else { +- cacheMisses = append(cacheMisses, req) +- } +- } +- +- // Create minibatches - each minibatch must be no larger than the maxUpstreamBatchSize +- numBatches := int(math.Ceil(float64(len(cacheMisses)) / float64(s.maxUpstreamBatchSize))) +- for i := 0; i < numBatches; i++ { +- if ctx.Err() == context.DeadlineExceeded { +- log.Info("short-circuiting batch RPC", +- "req_id", GetReqID(ctx), +- "auth", GetAuthCtx(ctx), +- "batch_index", i, +- ) +- batchRPCShortCircuitsTotal.Inc() +- return nil, false, "", context.DeadlineExceeded +- } +- +- start := i * s.maxUpstreamBatchSize +- end := int(math.Min(float64(start+s.maxUpstreamBatchSize), float64(len(cacheMisses)))) +- elems := cacheMisses[start:end] +- res, sb, err := s.BackendGroups[group.backendGroup].Forward(ctx, createBatchRequest(elems), isBatch) +- servedBy[sb] = true +- if err != nil { +- if errors.Is(err, ErrConsensusGetReceiptsCantBeBatched) || +- errors.Is(err, ErrConsensusGetReceiptsInvalidTarget) { +- return nil, false, "", err +- } +- log.Error( +- "error forwarding RPC batch", +- "batch_size", len(elems), +- "backend_group", group, +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- res = nil +- for _, elem := range elems { +- res = append(res, NewRPCErrorRes(elem.Req.ID, err)) +- } +- } +- +- for i := range elems { +- responses[elems[i].Index] = res[i] +- +- // TODO(inphi): batch put these +- if res[i].Error == nil && res[i].Result != nil { +- if err := s.cache.PutRPC(ctx, elems[i].Req, res[i]); err != nil { +- log.Warn( +- "cache put error", +- "req_id", GetReqID(ctx), +- "err", err, +- ) +- } +- } +- } +- } +- } +- +- servedByString := "" +- for sb, _ := range servedBy { +- if servedByString != "" { +- servedByString += ", " +- } +- servedByString += sb +- } +- +- return responses, cached, servedByString, nil +-} +- +-func (s *Server) HandleWS(w http.ResponseWriter, r *http.Request) { +- ctx := s.populateContext(w, r) +- if ctx == nil { +- return +- } +- +- log.Info("received WS connection", "req_id", GetReqID(ctx)) +- +- clientConn, err := s.upgrader.Upgrade(w, r, nil) +- if err != nil { +- log.Error("error upgrading client conn", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err) +- return +- } +- clientConn.SetReadLimit(s.maxBodySize) +- +- proxier, err := s.wsBackendGroup.ProxyWS(ctx, clientConn, s.wsMethodWhitelist) +- if err != nil { +- if errors.Is(err, ErrNoBackends) { +- RecordUnserviceableRequest(ctx, RPCRequestSourceWS) +- } +- log.Error("error dialing ws backend", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err) +- clientConn.Close() +- return +- } +- +- activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Inc() +- go func() { +- // Below call blocks so run it in a goroutine. +- if err := proxier.Proxy(ctx); err != nil { +- log.Error("error proxying websocket", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx), "err", err) +- } +- activeClientWsConnsGauge.WithLabelValues(GetAuthCtx(ctx)).Dec() +- }() +- +- log.Info("accepted WS connection", "auth", GetAuthCtx(ctx), "req_id", GetReqID(ctx)) +-} +- +-func (s *Server) populateContext(w http.ResponseWriter, r *http.Request) context.Context { +- vars := mux.Vars(r) +- authorization := vars["authorization"] +- xff := r.Header.Get(s.rateLimitHeader) +- if xff == "" { +- ipPort := strings.Split(r.RemoteAddr, ":") +- if len(ipPort) == 2 { +- xff = ipPort[0] +- } +- } +- ctx := context.WithValue(r.Context(), ContextKeyXForwardedFor, xff) // nolint:staticcheck +- +- if len(s.authenticatedPaths) > 0 { +- if authorization == "" || s.authenticatedPaths[authorization] == "" { +- log.Info("blocked unauthorized request", "authorization", authorization) +- httpResponseCodesTotal.WithLabelValues("401").Inc() +- w.WriteHeader(401) +- return nil +- } +- +- ctx = context.WithValue(ctx, ContextKeyAuth, s.authenticatedPaths[authorization]) // nolint:staticcheck +- } +- +- return context.WithValue( +- ctx, +- ContextKeyReqID, // nolint:staticcheck +- randStr(10), +- ) +-} +- +-func randStr(l int) string { +- b := make([]byte, l) +- if _, err := rand.Read(b); err != nil { +- panic(err) +- } +- return hex.EncodeToString(b) +-} +- +-func (s *Server) isUnlimitedOrigin(origin string) bool { +- for _, pat := range s.limExemptOrigins { +- if pat.MatchString(origin) { +- return true +- } +- } +- +- return false +-} +- +-func (s *Server) isUnlimitedUserAgent(origin string) bool { +- for _, pat := range s.limExemptUserAgents { +- if pat.MatchString(origin) { +- return true +- } +- } +- return false +-} +- +-func (s *Server) isGlobalLimit(method string) bool { +- return s.globallyLimitedMethods[method] +-} +- +-func (s *Server) rateLimitSender(ctx context.Context, req *RPCReq) error { +- var params []string +- if err := json.Unmarshal(req.Params, &params); err != nil { +- log.Debug("error unmarshalling raw transaction params", "err", err, "req_Id", GetReqID(ctx)) +- return ErrParseErr +- } +- +- if len(params) != 1 { +- log.Debug("raw transaction request has invalid number of params", "req_id", GetReqID(ctx)) +- // The error below is identical to the one Geth responds with. +- return ErrInvalidParams("missing value for required argument 0") +- } +- +- var data hexutil.Bytes +- if err := data.UnmarshalText([]byte(params[0])); err != nil { +- log.Debug("error decoding raw tx data", "err", err, "req_id", GetReqID(ctx)) +- // Geth returns the raw error from UnmarshalText. +- return ErrInvalidParams(err.Error()) +- } +- +- // Inflates a types.Transaction object from the transaction's raw bytes. +- tx := new(types.Transaction) +- if err := tx.UnmarshalBinary(data); err != nil { +- log.Debug("could not unmarshal transaction", "err", err, "req_id", GetReqID(ctx)) +- return ErrInvalidParams(err.Error()) +- } +- +- // Check if the transaction is for the expected chain, +- // otherwise reject before rate limiting to avoid replay attacks. +- if !s.isAllowedChainId(tx.ChainId()) { +- log.Debug("chain id is not allowed", "req_id", GetReqID(ctx)) +- return txpool.ErrInvalidSender +- } +- +- // Convert the transaction into a Message object so that we can get the +- // sender. This method performs an ecrecover, which can be expensive. +- msg, err := core.TransactionToMessage(tx, types.LatestSignerForChainID(tx.ChainId()), nil) +- if err != nil { +- log.Debug("could not get message from transaction", "err", err, "req_id", GetReqID(ctx)) +- return ErrInvalidParams(err.Error()) +- } +- ok, err := s.senderLim.Take(ctx, fmt.Sprintf("%s:%d", msg.From.Hex(), tx.Nonce())) +- if err != nil { +- log.Error("error taking from sender limiter", "err", err, "req_id", GetReqID(ctx)) +- return ErrInternal +- } +- if !ok { +- log.Debug("sender rate limit exceeded", "sender", msg.From.Hex(), "req_id", GetReqID(ctx)) +- return ErrOverSenderRateLimit +- } +- +- return nil +-} +- +-func (s *Server) isAllowedChainId(chainId *big.Int) bool { +- if s.allowedChainIds == nil || len(s.allowedChainIds) == 0 { +- return true +- } +- for _, id := range s.allowedChainIds { +- if chainId.Cmp(id) == 0 { +- return true +- } +- } +- return false +-} +- +-func setCacheHeader(w http.ResponseWriter, cached bool) { +- if cached { +- w.Header().Set(cacheStatusHdr, "HIT") +- } else { +- w.Header().Set(cacheStatusHdr, "MISS") +- } +-} +- +-func writeRPCError(ctx context.Context, w http.ResponseWriter, id json.RawMessage, err error) { +- var res *RPCRes +- if r, ok := err.(*RPCErr); ok { +- res = NewRPCErrorRes(id, r) +- } else { +- res = NewRPCErrorRes(id, ErrInternal) +- } +- writeRPCRes(ctx, w, res) +-} +- +-func writeRPCRes(ctx context.Context, w http.ResponseWriter, res *RPCRes) { +- statusCode := 200 +- if res.IsError() && res.Error.HTTPErrorCode != 0 { +- statusCode = res.Error.HTTPErrorCode +- } +- +- w.Header().Set("content-type", "application/json") +- w.WriteHeader(statusCode) +- ww := &recordLenWriter{Writer: w} +- enc := json.NewEncoder(ww) +- if err := enc.Encode(res); err != nil { +- log.Error("error writing rpc response", "err", err) +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) +- return +- } +- httpResponseCodesTotal.WithLabelValues(strconv.Itoa(statusCode)).Inc() +- RecordResponsePayloadSize(ctx, ww.Len) +-} +- +-func writeBatchRPCRes(ctx context.Context, w http.ResponseWriter, res []*RPCRes) { +- w.Header().Set("content-type", "application/json") +- w.WriteHeader(200) +- ww := &recordLenWriter{Writer: w} +- enc := json.NewEncoder(ww) +- if err := enc.Encode(res); err != nil { +- log.Error("error writing batch rpc response", "err", err) +- RecordRPCError(ctx, BackendProxyd, MethodUnknown, err) +- return +- } +- RecordResponsePayloadSize(ctx, ww.Len) +-} +- +-func instrumentedHdlr(h http.Handler) http.HandlerFunc { +- return func(w http.ResponseWriter, r *http.Request) { +- respTimer := prometheus.NewTimer(httpRequestDurationSumm) +- h.ServeHTTP(w, r) +- respTimer.ObserveDuration() +- } +-} +- +-func GetAuthCtx(ctx context.Context) string { +- authUser, ok := ctx.Value(ContextKeyAuth).(string) +- if !ok { +- return "none" +- } +- +- return authUser +-} +- +-func GetReqID(ctx context.Context) string { +- reqId, ok := ctx.Value(ContextKeyReqID).(string) +- if !ok { +- return "" +- } +- return reqId +-} +- +-func GetXForwardedFor(ctx context.Context) string { +- xff, ok := ctx.Value(ContextKeyXForwardedFor).(string) +- if !ok { +- return "" +- } +- return xff +-} +- +-type recordLenWriter struct { +- io.Writer +- Len int +-} +- +-func (w *recordLenWriter) Write(p []byte) (n int, err error) { +- n, err = w.Writer.Write(p) +- w.Len += n +- return +-} +- +-type NoopRPCCache struct{} +- +-func (n *NoopRPCCache) GetRPC(context.Context, *RPCReq) (*RPCRes, error) { +- return nil, nil +-} +- +-func (n *NoopRPCCache) PutRPC(context.Context, *RPCReq, *RPCRes) error { +- return nil +-} +- +-func truncate(str string, maxLen int) string { +- if maxLen == 0 { +- maxLen = maxRequestBodyLogLen +- } +- +- if len(str) > maxLen { +- return str[:maxLen] + "..." +- } else { +- return str +- } +-} +- +-type batchElem struct { +- Req *RPCReq +- Index int +-} +- +-func createBatchRequest(elems []batchElem) []*RPCReq { +- batch := make([]*RPCReq, len(elems)) +- for i := range elems { +- batch[i] = elems[i].Req +- } +- return batch +-}
@@ -8167,13 +37521,13 @@
- (new) + OP
- CELO + (deleted)
@@ -8183,66 +37537,84 @@
-
+36
-
-0
+
+0
+
-56
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/UniswapFeeHandlerSeller.json CELO/packages/contracts-bedrock/snapshots/storageLayout/UniswapFeeHandlerSeller.json -new file mode 100644 -index 0000000000000000000000000000000000000000..3688a3204dec12dbace7b35435f8d85cb1c9acb3 ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/UniswapFeeHandlerSeller.json -@@ -0,0 +1,37 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "1", -+ "label": "initialized", -+ "offset": 20, -+ "slot": "0", -+ "type": "bool" -+ }, -+ { -+ "bytes": "20", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "contract ICeloRegistry" -+ }, -+ { -+ "bytes": "32", -+ "label": "minimumReports", -+ "offset": 0, -+ "slot": "2", -+ "type": "mapping(address => uint256)" -+ }, -+ { -+ "bytes": "32", -+ "label": "routerAddresses", -+ "offset": 0, -+ "slot": "3", -+ "type": "mapping(address => struct EnumerableSet.AddressSet)" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/string_set.go CELO/proxyd/string_set.go +deleted file mode 100644 +index 45823491961161eff6212039e746ef33f9b3c7b3..0000000000000000000000000000000000000000 +--- OP/proxyd/string_set.go ++++ /dev/null +@@ -1,56 +0,0 @@ +-package proxyd +- +-import "sync" +- +-type StringSet struct { +- underlying map[string]bool +- mtx sync.RWMutex +-} +- +-func NewStringSet() *StringSet { +- return &StringSet{ +- underlying: make(map[string]bool), +- } +-} +- +-func NewStringSetFromStrings(in []string) *StringSet { +- underlying := make(map[string]bool) +- for _, str := range in { +- underlying[str] = true +- } +- return &StringSet{ +- underlying: underlying, +- } +-} +- +-func (s *StringSet) Has(test string) bool { +- s.mtx.RLock() +- defer s.mtx.RUnlock() +- return s.underlying[test] +-} +- +-func (s *StringSet) Add(str string) { +- s.mtx.Lock() +- defer s.mtx.Unlock() +- s.underlying[str] = true +-} +- +-func (s *StringSet) Entries() []string { +- s.mtx.RLock() +- defer s.mtx.RUnlock() +- out := make([]string, len(s.underlying)) +- var i int +- for entry := range s.underlying { +- out[i] = entry +- i++ +- } +- return out +-} +- +-func (s *StringSet) Extend(in []string) *StringSet { +- out := NewStringSetFromStrings(in) +- for k := range s.underlying { +- out.Add(k) +- } +- return out +-}
@@ -8251,13 +37623,13 @@
- (new) + OP
- CELO + (deleted)
@@ -8267,45 +37639,160 @@
-
+15
-
-0
+
+0
+
-33
-
diff --git OP/packages/contracts-bedrock/snapshots/storageLayout/UsingRegistry.json CELO/packages/contracts-bedrock/snapshots/storageLayout/UsingRegistry.json -new file mode 100644 -index 0000000000000000000000000000000000000000..fb89bbc7e1ab3904137e39358de306a828c60dac ---- /dev/null -+++ CELO/packages/contracts-bedrock/snapshots/storageLayout/UsingRegistry.json -@@ -0,0 +1,16 @@ -+[ -+ { -+ "bytes": "20", -+ "label": "_owner", -+ "offset": 0, -+ "slot": "0", -+ "type": "address" -+ }, -+ { -+ "bytes": "20", -+ "label": "registry", -+ "offset": 0, -+ "slot": "1", -+ "type": "contract ICeloRegistry" -+ } -+] -\ No newline at end of file
+
diff --git OP/proxyd/tls.go CELO/proxyd/tls.go +deleted file mode 100644 +index ed2bdaff44b4b9a06d4a5d358fd4c410efe67b00..0000000000000000000000000000000000000000 +--- OP/proxyd/tls.go ++++ /dev/null +@@ -1,33 +0,0 @@ +-package proxyd +- +-import ( +- "crypto/tls" +- "crypto/x509" +- "errors" +- "os" +-) +- +-func CreateTLSClient(ca string) (*tls.Config, error) { +- pem, err := os.ReadFile(ca) +- if err != nil { +- return nil, wrapErr(err, "error reading CA") +- } +- +- roots := x509.NewCertPool() +- ok := roots.AppendCertsFromPEM(pem) +- if !ok { +- return nil, errors.New("error parsing TLS client cert") +- } +- +- return &tls.Config{ +- RootCAs: roots, +- }, nil +-} +- +-func ParseKeyPair(crt, key string) (tls.Certificate, error) { +- cert, err := tls.LoadX509KeyPair(crt, key) +- if err != nil { +- return tls.Certificate{}, wrapErr(err, "error loading x509 key pair") +- } +- return cert, nil +-}
+ + + + + +
+ +
+ + + + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+ +
+
+
+ + +
+
+ +
+
+
+ + +
+
+
+
@@ -8314,13 +37801,13 @@
- (new) + OP
@@ -8330,37 +37817,37 @@
-
+9
+
+7
-0
-
diff --git OP/packages/contracts-bedrock/src/celo/CalledByVm.sol CELO/packages/contracts-bedrock/src/celo/CalledByVm.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..c3f6efe12072ef8c87e213f9c29b0789c26cff0f ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/CalledByVm.sol -@@ -0,0 +1,9 @@ -+// SPDX-License-Identifier: LGPL-3.0-only -+pragma solidity ^0.8.15; +
diff --git OP/.envrc.example CELO/.envrc.example +index 43ccf74842b63f71531e4c96b429a5ebbb0e6ea6..d917ad07ab18cad5c7e041ee328c24582164e7cd 100644 +--- OP/.envrc.example ++++ CELO/.envrc.example +@@ -66,3 +66,10 @@ + # Private key to use for contract deployments, you don't need to worry about + # this for the Getting Started guide. + export PRIVATE_KEY= + -+contract CalledByVm { -+ modifier onlyVm() { -+ require(msg.sender == address(0), "Only VM can call"); -+ _; -+ } -+}
++# CELO additional configuration ++export ENABLE_GOVERNANCE=false ++export FUNDS_DEV_ACCOUNTS=false ++export USE_PLASMA=false ++# Set to false if migrating state from a Celo L1. True for new testnets ++export DEPLOY_CELO_CONTRACTS=false
@@ -8369,13 +37856,13 @@
- (new) + OP
@@ -8385,69 +37872,33 @@
-
+41
+
+3
-0
-
diff --git OP/packages/contracts-bedrock/src/celo/CeloPredeploys.sol CELO/packages/contracts-bedrock/src/celo/CeloPredeploys.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..fc36601693c6d535c429e8fceef1a82349dc8eb4 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/CeloPredeploys.sol -@@ -0,0 +1,41 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.0; -+ -+import { console2 as console } from "forge-std/console2.sol"; -+ -+/// @title CeloPredeploys -+/// @notice Contains constant addresses for protocol contracts that are pre-deployed to the L2 system. -+library CeloPredeploys { -+ address internal constant CELO_REGISTRY = 0x000000000000000000000000000000000000ce10; -+ address internal constant GOLD_TOKEN = 0x471EcE3750Da237f93B8E339c536989b8978a438; -+ address internal constant FEE_HANDLER = 0xcD437749E43A154C07F3553504c68fBfD56B8778; -+ address internal constant FEE_CURRENCY_WHITELIST = 0xBB024E9cdCB2f9E34d893630D19611B8A5381b3c; -+ address internal constant MENTO_FEE_HANDLER_SELLER = 0x4eFa274B7e33476C961065000D58ee09F7921A74; -+ address internal constant UNISWAP_FEE_HANDLER_SELLER = 0xD3aeE28548Dbb65DF03981f0dC0713BfCBd10a97; -+ address internal constant SORTED_ORACLES = 0xefB84935239dAcdecF7c5bA76d8dE40b077B7b33; -+ address internal constant ADDRESS_SORTED_LINKED_LIST_WITH_MEDIAN = 0xED477A99035d0c1e11369F1D7A4e587893cc002B; -+ address internal constant FEE_CURRENCY = 0x4200000000000000000000000000000000001022; -+ address internal constant BRIDGED_ETH = 0x4200000000000000000000000000000000001023; -+ address internal constant FEE_CURRENCY_DIRECTORY = 0x71FFbD48E34bdD5a87c3c683E866dc63b8B2a685; -+ address internal constant cUSD = 0x765DE816845861e75A25fCA122bb6898B8B1282a; -+ -+ /// @notice Returns the name of the predeploy at the given address. -+ function getName(address _addr) internal pure returns (string memory out_) { -+ // require(isPredeployNamespace(_addr), "Predeploys: address must be a predeploy"); -+ -+ if (_addr == CELO_REGISTRY) return "CeloRegistry"; -+ if (_addr == GOLD_TOKEN) return "GoldToken"; -+ if (_addr == FEE_HANDLER) return "FeeHandler"; -+ if (_addr == FEE_CURRENCY_WHITELIST) return "FeeCurrencyWhitelist"; -+ if (_addr == MENTO_FEE_HANDLER_SELLER) return "MentoFeeHandlerSeller"; -+ if (_addr == UNISWAP_FEE_HANDLER_SELLER) return "UniswapFeeHandlerSeller"; -+ if (_addr == SORTED_ORACLES) return "SortedOracles"; -+ if (_addr == ADDRESS_SORTED_LINKED_LIST_WITH_MEDIAN) return "AddressSortedLinkedListWithMedian"; -+ if (_addr == FEE_CURRENCY) return "FeeCurrency"; -+ if (_addr == BRIDGED_ETH) return "BridgedEth"; -+ if (_addr == FEE_CURRENCY_DIRECTORY) return "FeeCurrencyDirectory"; -+ if (_addr == cUSD) return "cUSD"; +
diff --git OP/.gitignore CELO/.gitignore +index 54c16a1f67c3f2dbeff42e8717eb6b828b106336..16640801b343664ded9a93a8f581128a5a6b8343 100644 +--- OP/.gitignore ++++ CELO/.gitignore +@@ -47,3 +47,6 @@ __pycache__ +  + # Ignore echidna artifacts + crytic-export + -+ revert("Predeploys: unnamed predeploy"); -+ } -+}
++# vscode ++.vscode/
@@ -8456,13 +37907,13 @@
- (new) + OP
@@ -8472,123 +37923,43 @@
-
+95
-
-0
+
+1
+
-2
-
diff --git OP/packages/contracts-bedrock/src/celo/CeloRegistry.sol CELO/packages/contracts-bedrock/src/celo/CeloRegistry.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..7da4cfb35ddfef5c49183c7c3523f658e071aa33 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/CeloRegistry.sol -@@ -0,0 +1,95 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+ -+import "./interfaces/ICeloRegistry.sol"; -+import "./Initializable.sol"; -+ -+/** -+ * @title Routes identifiers to addresses. -+ */ -+contract CeloRegistry is ICeloRegistry, Ownable, Initializable { -+ mapping(bytes32 => address) public registry; -+ -+ event RegistryUpdated(string identifier, bytes32 indexed identifierHash, address indexed addr); -+ -+ /** -+ * @notice Sets initialized == true on implementation contracts -+ * @param test Set to true to skip implementation initialization -+ */ -+ constructor(bool test) Initializable(test) { } -+ -+ /** -+ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. -+ */ -+ function initialize() external initializer { -+ _transferOwnership(msg.sender); -+ } -+ -+ /** -+ * @notice Associates the given address with the given identifier. -+ * @param identifier Identifier of contract whose address we want to set. -+ * @param addr Address of contract. -+ */ -+ function setAddressFor(string calldata identifier, address addr) external onlyOwner { -+ bytes32 identifierHash = keccak256(abi.encodePacked(identifier)); -+ registry[identifierHash] = addr; -+ emit RegistryUpdated(identifier, identifierHash, addr); -+ } -+ -+ /** -+ * @notice Gets address associated with the given identifierHash. -+ * @param identifierHash Identifier hash of contract whose address we want to look up. -+ * @dev Throws if address not set. -+ */ -+ function getAddressForOrDie(bytes32 identifierHash) external view returns (address) { -+ require(registry[identifierHash] != address(0), "identifier has no registry entry"); -+ return registry[identifierHash]; -+ } -+ -+ /** -+ * @notice Gets address associated with the given identifierHash. -+ * @param identifierHash Identifier hash of contract whose address we want to look up. -+ */ -+ function getAddressFor(bytes32 identifierHash) external view returns (address) { -+ return registry[identifierHash]; -+ } -+ -+ /** -+ * @notice Gets address associated with the given identifier. -+ * @param identifier Identifier of contract whose address we want to look up. -+ * @dev Throws if address not set. -+ */ -+ function getAddressForStringOrDie(string calldata identifier) external view returns (address) { -+ bytes32 identifierHash = keccak256(abi.encodePacked(identifier)); -+ require(registry[identifierHash] != address(0), "identifier has no registry entry"); -+ return registry[identifierHash]; -+ } -+ -+ /** -+ * @notice Gets address associated with the given identifier. -+ * @param identifier Identifier of contract whose address we want to look up. -+ */ -+ function getAddressForString(string calldata identifier) external view returns (address) { -+ bytes32 identifierHash = keccak256(abi.encodePacked(identifier)); -+ return registry[identifierHash]; -+ } -+ -+ /** -+ * @notice Iterates over provided array of identifiers, getting the address for each. -+ * Returns true if `sender` matches the address of one of the provided identifiers. -+ * @param identifierHashes Array of hashes of approved identifiers. -+ * @param sender Address in question to verify membership. -+ * @return True if `sender` corresponds to the address of any of `identifiers` -+ * registry entries. -+ */ -+ function isOneOf(bytes32[] calldata identifierHashes, address sender) external view returns (bool) { -+ for (uint256 i = 0; i < identifierHashes.length; i++) { -+ if (registry[identifierHashes[i]] == sender) { -+ return true; -+ } -+ } -+ return false; -+ } -+}
+
diff --git OP/Makefile CELO/Makefile +index 02ea6b71f14fae83154fe471d873eeefe773682c..673066822abc9673c75fad1b44177255e6cec991 100644 +--- OP/Makefile ++++ CELO/Makefile +@@ -21,7 +21,6 @@ if [ -f "$$NVM_DIR/nvm.sh" ]; then \ + . $$NVM_DIR/nvm.sh && nvm use; \ + fi + pnpm install:ci +- pnpm prepare + pnpm build + .PHONY: build-ts +  +@@ -38,7 +37,7 @@ docker buildx bake \ + --progress plain \ + --load \ + -f docker-bake.hcl \ +- op-node op-batcher op-proposer op-challenger op-dispute-mon ++ op-node op-batcher op-proposer op-challenger op-dispute-mon op-supervisor + .PHONY: golang-docker +  + docker-builder-clean:
@@ -8597,13 +37968,13 @@
- (new) + OP
@@ -8613,73 +37984,188 @@
-
+45
-
-0
+
+16
+
-14
-
diff --git OP/packages/contracts-bedrock/src/celo/FeeCurrency.sol CELO/packages/contracts-bedrock/src/celo/FeeCurrency.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..59516e3d9e485002357a392e322e85ea30c3b327 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/FeeCurrency.sol -@@ -0,0 +1,45 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; -+ -+abstract contract FeeCurrency is ERC20 { -+ modifier onlyVm() { -+ require(msg.sender == address(0), "Only VM can call"); -+ _; -+ } -+ -+ function debitGasFees(address from, uint256 value) external onlyVm { -+ _burn(from, value); -+ } +
diff --git OP/README.md CELO/README.md +index 435fb5b46ca018beedb9fd77be1b319719f41730..b8f0ca0fcd9e1b2171656ceb21beaef4fb434e03 100644 +--- OP/README.md ++++ CELO/README.md +@@ -28,18 +28,18 @@ <!-- END doctoc generated TOC please keep comment here to allow auto update --> +  + ## What is Optimism? +  +-[Optimism](https://www.optimism.io/) is a project dedicated to scaling Ethereum's technology and expanding its ability to coordinate people from across the world to build effective decentralized economies and governance systems. The [Optimism Collective](https://app.optimism.io/announcement) builds open-source software for running L2 blockchains and aims to address key governance and economic challenges in the wider cryptocurrency ecosystem. Optimism operates on the principle of **impact=profit**, the idea that individuals who positively impact the Collective should be proportionally rewarded with profit. **Change the incentives and you change the world.** ++[Optimism](https://www.optimism.io/) is a project dedicated to scaling Ethereum's technology and expanding its ability to coordinate people from across the world to build effective decentralized economies and governance systems. The [Optimism Collective](https://www.optimism.io/vision) builds open-source software that powers scalable blockchains and aims to address key governance and economic challenges in the wider Ethereum ecosystem. Optimism operates on the principle of **impact=profit**, the idea that individuals who positively impact the Collective should be proportionally rewarded with profit. **Change the incentives and you change the world.** +  +-In this repository, you'll find numerous core components of the OP Stack, the decentralized software stack maintained by the Optimism Collective that powers Optimism and forms the backbone of blockchains like [OP Mainnet](https://explorer.optimism.io/) and [Base](https://base.org). Designed to be "aggressively open source," the OP Stack encourages you to explore, modify, extend, and test the code as needed. Although not all elements of the OP Stack are contained here, many of its essential components can be found within this repository. By collaborating on free, open software and shared standards, the Optimism Collective aims to prevent siloed software development and rapidly accelerate the development of the Ethereum ecosystem. Come contribute, build the future, and redefine power, together. ++In this repository you'll find numerous core components of the OP Stack, the decentralized software stack maintained by the Optimism Collective that powers Optimism and forms the backbone of blockchains like [OP Mainnet](https://explorer.optimism.io/) and [Base](https://base.org). The OP Stack is designed to be aggressively open-source — you are welcome to explore, modify, and extend this code. +  + ## Documentation +  + - If you want to build on top of OP Mainnet, refer to the [Optimism Documentation](https://docs.optimism.io) +-- If you want to build your own OP Stack based blockchain, refer to the [OP Stack Guide](https://docs.optimism.io/stack/getting-started), and make sure to understand this repository's [Development and Release Process](#development-and-release-process) ++- If you want to build your own OP Stack based blockchain, refer to the [OP Stack Guide](https://docs.optimism.io/stack/getting-started) and make sure to understand this repository's [Development and Release Process](#development-and-release-process) +  + ## Specification +  +-If you're interested in the technical details of how Optimism works, refer to the [Optimism Protocol Specification](https://github.com/ethereum-optimism/specs). ++Detailed specifications for the OP Stack can be found within the [OP Stack Specs](https://github.com/ethereum-optimism/specs) repository. +  + ## Community +  +@@ -48,15 +48,16 @@ Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/). +  + ## Contributing +  +-Read through [CONTRIBUTING.md](./CONTRIBUTING.md) for a general overview of the contributing process for this repository. +-Use the [Developer Quick Start](./CONTRIBUTING.md#development-quick-start) to get your development environment set up to start working on the Optimism Monorepo. +-Then check out the list of [Good First Issues](https://github.com/ethereum-optimism/optimism/issues?q=is:open+is:issue+label:D-good-first-issue) to find something fun to work on! +-Typo fixes are welcome; however, please create a single commit with all of the typo fixes & batch as many fixes together in a PR as possible. Spammy PRs will be closed. ++The OP Stack is a collaborative project. By collaborating on free, open software and shared standards, the Optimism Collective aims to prevent siloed software development and rapidly accelerate the development of the Ethereum ecosystem. Come contribute, build the future, and redefine power, together. + -+ // New function signature, will be used when all fee currencies have migrated -+ function creditGasFees(address[] calldata recipients, uint256[] calldata amounts) public onlyVm { -+ require(recipients.length == amounts.length, "Recipients and amounts must be the same length."); ++[CONTRIBUTING.md](./CONTRIBUTING.md) contains a detailed explanation of the contributing process for this repository. Make sure to use the [Developer Quick Start](./CONTRIBUTING.md#development-quick-start) to properly set up your development environment. + -+ for (uint256 i = 0; i < recipients.length; i++) { -+ _mint(recipients[i], amounts[i]); -+ } -+ } ++[Good First Issues](https://github.com/ethereum-optimism/optimism/issues?q=is:open+is:issue+label:D-good-first-issue) are a great place to look for tasks to tackle if you're not sure where to start. +  + ## Security Policy and Vulnerability Reporting +  + Please refer to the canonical [Security Policy](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md) document for detailed information about how to report vulnerabilities in this codebase. +-Bounty hunters are encouraged to check out [the Optimism Immunefi bug bounty program](https://immunefi.com/bounty/optimism/). ++Bounty hunters are encouraged to check out the [Optimism Immunefi bug bounty program](https://immunefi.com/bounty/optimism/). + The Optimism Immunefi program offers up to $2,000,042 for in-scope critical vulnerabilities. +  + ## Directory Structure +@@ -80,8 +81,8 @@ ├── <a href="./ops">ops</a>: Various operational packages + ├── <a href="./ops-bedrock">ops-bedrock</a>: Bedrock devnet work + ├── <a href="./packages">packages</a> + │ ├── <a href="./packages/chain-mon">chain-mon</a>: Chain monitoring services +-│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: Bedrock smart contracts +-│ ├── <a href="./packages/sdk">sdk</a>: provides a set of tools for interacting with Optimism ++│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: OP Stack smart contracts ++│ ├── <a href="./packages/devnet-tasks">devnet-tasks</a>: Legacy Hardhat tasks used within devnet CI tests + ├── <a href="./proxyd">proxyd</a>: Configurable RPC request router and proxy + ├── <a href="./specs">specs</a>: Specs of the rollup starting at the Bedrock upgrade + </pre> +@@ -90,7 +91,7 @@ ## Development and Release Process +  + ### Overview +  +-Please read this section if you're planning to fork this repository, or make frequent PRs into this repository. ++Please read this section carefully if you're planning to fork or make frequent PRs into this repository. +  + ### Production Releases +  +@@ -99,11 +100,11 @@ For example, an `op-node` release might be versioned as `op-node/v1.1.2`, and smart contract releases might be versioned as `op-contracts/v1.0.0`. + Release candidates are versioned in the format `op-node/v1.1.2-rc.1`. + We always start with `rc.1` rather than `rc`. +  +-For contract releases, refer to the GitHub release notes for a given release, which will list the specific contracts being released—not all contracts are considered production ready within a release, and many are under active development. ++For contract releases, refer to the GitHub release notes for a given release which will list the specific contracts being released. Not all contracts are considered production ready within a release and many are under active development. +  + Tags of the form `v<semver>`, such as `v1.1.4`, indicate releases of all Go code only, and **DO NOT** include smart contracts. + This naming scheme is required by Golang. +-In the above list, this means these `v<semver` releases contain all `op-*` components, and exclude all `contracts-*` components. ++In the above list, this means these `v<semver` releases contain all `op-*` components and exclude all `contracts-*` components. +  + `op-geth` embeds upstream geth’s version inside it’s own version as follows: `vMAJOR.GETH_MAJOR GETH_MINOR GETH_PATCH.PATCH`. + Basically, geth’s version is our minor version. +@@ -112,6 +113,7 @@ Note that we pad out to three characters for the geth minor version and two characters for the geth patch version. + Since we cannot left-pad with zeroes, the geth major version is not padded. +  + See the [Node Software Releases](https://docs.optimism.io/builders/node-operators/releases) page of the documentation for more information about releases for the latest node components. + -+ // Old function signature for backwards compatibility -+ function creditGasFees( -+ address from, -+ address feeRecipient, -+ address, // gatewayFeeRecipient, unused -+ address communityFund, -+ uint256 refund, -+ uint256 tipTxFee, -+ uint256, // gatewayFee, unused -+ uint256 baseTxFee -+ ) -+ public -+ onlyVm -+ { -+ // Calling the new creditGasFees would make sense here, but that is not -+ // possible due to its calldata arguments. -+ _mint(from, refund); -+ _mint(feeRecipient, tipTxFee); -+ _mint(communityFund, baseTxFee); -+ } -+}
+ The full set of components that have releases are: +  + - `chain-mon`
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-4
+ +
+ +
+
+
diff --git OP/bedrock-devnet/devnet/__init__.py CELO/bedrock-devnet/devnet/__init__.py +index 182172d3412d82b008589c61c873d05fa7cd10f3..2bea550a1d1c8317fd9a42524eafe82309d9220c 100644 +--- OP/bedrock-devnet/devnet/__init__.py ++++ CELO/bedrock-devnet/devnet/__init__.py +@@ -4,12 +4,10 @@ import os + import subprocess + import json + import socket +-import calendar + import datetime + import time + import shutil + import http.client +-import gzip + from multiprocessing import Process, Queue + import concurrent.futures + from collections import namedtuple +@@ -145,7 +143,7 @@ def devnet_l1_allocs(paths): + log.info('Generating L1 genesis allocs') + init_devnet_l1_deploy_config(paths) +  +- fqn = 'scripts/Deploy.s.sol:Deploy' ++ fqn = 'scripts/deploy/Deploy.s.sol:Deploy' + run_command([ + # We need to set the sender here to an account we know the private key of, + # because the sender ends up being the owner of the ProxyAdmin SAFE +@@ -297,7 +295,7 @@ if not DEVNET_L2OO: + log.info('Bringing up `op-challenger`.') + run_command(['docker', 'compose', 'up', '-d', 'op-challenger'], cwd=paths.ops_bedrock_dir, env=docker_env) +  +- # Optionally bring up Plasma Mode components. ++ # Optionally bring up Alt-DA Mode components. + if DEVNET_PLASMA: + log.info('Bringing up `da-server`, `sentinel`.') # TODO(10141): We don't have public sentinel images yet + run_command(['docker', 'compose', 'up', '-d', 'da-server'], cwd=paths.ops_bedrock_dir, env=docker_env)
@@ -8688,13 +38174,13 @@
- (new) + OP
@@ -8704,119 +38190,35 @@
-
+91
-
-0
+
+1
+
-1
-
diff --git OP/packages/contracts-bedrock/src/celo/FeeCurrencyDirectory.sol CELO/packages/contracts-bedrock/src/celo/FeeCurrencyDirectory.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..21fc7ff3181a15e8d87b7f3ab89f713870197d48 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/FeeCurrencyDirectory.sol -@@ -0,0 +1,91 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.0; -+ -+import "./Initializable.sol"; -+import "./interfaces/IOracle.sol"; -+import "./interfaces/IFeeCurrencyDirectory.sol"; -+import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; -+ -+contract FeeCurrencyDirectory is IFeeCurrencyDirectory, Initializable, Ownable { -+ mapping(address => CurrencyConfig) public currencies; -+ address[] private currencyList; -+ -+ constructor(bool test) Initializable(test) { } -+ -+ /** -+ * @notice Initializes the contract with the owner set. -+ */ -+ function initialize() public initializer { -+ _transferOwnership(msg.sender); -+ } -+ -+ /** -+ * @notice Sets the currency configuration for a token. -+ * @dev This action can only be performed by the contract owner. -+ * @param token The token address. -+ * @param oracle The oracle address for price fetching. -+ * @param intrinsicGas The intrinsic gas value for transactions. -+ */ -+ function setCurrencyConfig(address token, address oracle, uint256 intrinsicGas) external onlyOwner { -+ require(oracle != address(0), "Oracle address cannot be zero"); -+ require(intrinsicGas > 0, "Intrinsic gas cannot be zero"); -+ require(currencies[token].oracle == address(0), "Currency already in the directory"); -+ -+ currencies[token] = CurrencyConfig({ oracle: oracle, intrinsicGas: intrinsicGas }); -+ currencyList.push(token); -+ } -+ -+ /** -+ * @notice Removes a token from the directory. -+ * @dev This action can only be performed by the contract owner. -+ * @param token The token address to remove. -+ * @param index The index in the list of directory currencies. -+ */ -+ function removeCurrencies(address token, uint256 index) external onlyOwner { -+ require(index < currencyList.length, "Index out of bounds"); -+ require(currencyList[index] == token, "Index does not match token"); -+ -+ delete currencies[token]; -+ currencyList[index] = currencyList[currencyList.length - 1]; -+ currencyList.pop(); -+ } -+ -+ /** -+ * @notice Returns the list of all currency addresses. -+ * @return An array of addresses. -+ */ -+ function getCurrencies() public view returns (address[] memory) { -+ return currencyList; -+ } -+ -+ /** -+ * @notice Returns the configuration for a currency. -+ * @param token The address of the token. -+ * @return Currency configuration of the token. -+ */ -+ function getCurrencyConfig(address token) public view returns (CurrencyConfig memory) { -+ return currencies[token]; -+ } -+ -+ /** -+ * @notice Retrieves exchange rate between token and CELO. -+ * @param token The token address whose price is to be fetched. -+ * @return numerator The exchange rate numerator. -+ * @return denominator The exchange rate denominator. -+ */ -+ function getExchangeRate(address token) public view returns (uint256 numerator, uint256 denominator) { -+ require(currencies[token].oracle != address(0), "Currency not in the directory"); -+ (numerator, denominator) = IOracle(currencies[token].oracle).getExchangeRate(token); -+ } -+ -+ /** -+ * @notice Returns the storage, major, minor, and patch version of the contract. -+ * @return Storage version of the contract. -+ * @return Major version of the contract. -+ * @return Minor version of the contract. -+ * @return Patch version of the contract. -+ */ -+ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { -+ return (1, 1, 0, 0); -+ } -+}
+
diff --git OP/cannon/Makefile CELO/cannon/Makefile +index 3accd2e11bc3ef34130158c44abe29c7910d647f..3be2e1b304e0d033c1d166ea96a9a0ec9406fc15 100644 +--- OP/cannon/Makefile ++++ CELO/cannon/Makefile +@@ -30,7 +30,7 @@ go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallBrk ./mipsevm + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallClone ./mipsevm + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallMmap ./mipsevm + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallExitGroup ./mipsevm +- go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallFnctl ./mipsevm ++ go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateSyscallFcntl ./mipsevm + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateHintRead ./mipsevm + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 20s -fuzz=FuzzStatePreimageRead ./mipsevm + go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzStateHintWrite ./mipsevm
@@ -8825,13 +38227,13 @@
- (new) + OP
@@ -8841,105 +38243,150 @@
-
+77
-
-0
+
+18
+
-6
-
diff --git OP/packages/contracts-bedrock/src/celo/FeeCurrencyWhitelist.sol CELO/packages/contracts-bedrock/src/celo/FeeCurrencyWhitelist.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..d52d4a155ea6536ad6559646881fd417278deb88 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/FeeCurrencyWhitelist.sol -@@ -0,0 +1,77 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+ -+import "./interfaces/IFeeCurrencyWhitelist.sol"; -+ -+import "./common/Initializable.sol"; -+ -+import "./common/interfaces/ICeloVersionedContract.sol"; -+ -+/** -+ * @title Holds a whitelist of the ERC20+ tokens that can be used to pay for gas -+ * Not including the native Celo token -+ */ -+contract FeeCurrencyWhitelist is IFeeCurrencyWhitelist, Ownable, Initializable, ICeloVersionedContract { -+ // Array of all the tokens enabled -+ address[] public whitelist; -+ -+ event FeeCurrencyWhitelisted(address token); -+ -+ event FeeCurrencyWhitelistRemoved(address token); -+ -+ /** -+ * @notice Sets initialized == true on implementation contracts -+ * @param test Set to true to skip implementation initialization -+ */ -+ constructor(bool test) Initializable(test) { } -+ -+ /** -+ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. -+ */ -+ function initialize() external initializer { -+ _transferOwnership(msg.sender); -+ } -+ -+ /** -+ * @notice Returns the storage, major, minor, and patch version of the contract. -+ * @return Storage version of the contract. -+ * @return Major version of the contract. -+ * @return Minor version of the contract. -+ * @return Patch version of the contract. -+ */ -+ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { -+ return (1, 1, 1, 0); -+ } -+ -+ /** -+ * @notice Removes a Mento token as enabled fee token. Tokens added with addToken should be -+ * removed with this function. -+ * @param tokenAddress The address of the token to remove. -+ * @param index The index of the token in the whitelist array. -+ */ -+ function removeToken(address tokenAddress, uint256 index) public onlyOwner { -+ require(whitelist[index] == tokenAddress, "Index does not match"); -+ uint256 length = whitelist.length; -+ whitelist[index] = whitelist[length - 1]; -+ whitelist.pop(); -+ emit FeeCurrencyWhitelistRemoved(tokenAddress); -+ } -+ -+ /** -+ * @dev Add a token to the whitelist -+ * @param tokenAddress The address of the token to add. -+ */ -+ function addToken(address tokenAddress) external onlyOwner { -+ whitelist.push(tokenAddress); -+ emit FeeCurrencyWhitelisted(tokenAddress); -+ } -+ -+ /** -+ * @return a list of all tokens enabled as gas fee currency. -+ */ -+ function getWhitelist() external view returns (address[] memory) { -+ return whitelist; -+ } -+}
+
diff --git OP/cannon/cmd/run.go CELO/cannon/cmd/run.go +index 4c0970e3a99c1d158ba7e3e44ffda700d477ee71..f4d57a114d5aeca7ea1e8c1995b896ecf56dd020 100644 +--- OP/cannon/cmd/run.go ++++ CELO/cannon/cmd/run.go +@@ -103,6 +103,12 @@ RunDebugFlag = &cli.BoolFlag{ + Name: "debug", + Usage: "enable debug mode, which includes stack traces and other debug info in the output. Requires --meta.", + } ++ RunDebugInfoFlag = &cli.PathFlag{ ++ Name: "debug-info", ++ Usage: "path to write debug info to", ++ TakesFile: true, ++ Required: false, ++ } +  + OutFilePerm = os.FileMode(0o755) + ) +@@ -380,16 +386,16 @@ if infoAt(state) { + delta := time.Since(start) + l.Info("processing", + "step", step, +- "pc", mipsevm.HexU32(state.PC), +- "insn", mipsevm.HexU32(state.Memory.GetMemory(state.PC)), ++ "pc", mipsevm.HexU32(state.Cpu.PC), ++ "insn", mipsevm.HexU32(state.Memory.GetMemory(state.Cpu.PC)), + "ips", float64(step-startStep)/(float64(delta)/float64(time.Second)), + "pages", state.Memory.PageCount(), + "mem", state.Memory.Usage(), +- "name", meta.LookupSymbol(state.PC), ++ "name", meta.LookupSymbol(state.Cpu.PC), + ) + } +  +- if sleepCheck(state.PC) { // don't loop forever when we get stuck because of an unexpected bad program ++ if sleepCheck(state.Cpu.PC) { // don't loop forever when we get stuck because of an unexpected bad program + return fmt.Errorf("got stuck in Go sleep at step %d", step) + } +  +@@ -411,7 +417,7 @@ return fmt.Errorf("failed to hash prestate witness: %w", err) + } + witness, err := stepFn(true) + if err != nil { +- return fmt.Errorf("failed at proof-gen step %d (PC: %08x): %w", step, state.PC, err) ++ return fmt.Errorf("failed at proof-gen step %d (PC: %08x): %w", step, state.Cpu.PC, err) + } + postStateHash, err := state.EncodeWitness().StateHash() + if err != nil { +@@ -435,7 +441,7 @@ } + } else { + _, err = stepFn(false) + if err != nil { +- return fmt.Errorf("failed at step %d (PC: %08x): %w", step, state.PC, err) ++ return fmt.Errorf("failed at step %d (PC: %08x): %w", step, state.Cpu.PC, err) + } + } +  +@@ -466,6 +472,11 @@ + if err := jsonutil.WriteJSON(ctx.Path(RunOutputFlag.Name), state, OutFilePerm); err != nil { + return fmt.Errorf("failed to write state output: %w", err) + } ++ if debugInfoFile := ctx.Path(RunDebugInfoFlag.Name); debugInfoFile != "" { ++ if err := jsonutil.WriteJSON(debugInfoFile, us.GetDebugInfo(), OutFilePerm); err != nil { ++ return fmt.Errorf("failed to write benchmark data: %w", err) ++ } ++ } + return nil + } +  +@@ -489,5 +500,6 @@ RunMetaFlag, + RunInfoAtFlag, + RunPProfCPU, + RunDebugFlag, ++ RunDebugInfoFlag, + }, + }
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/cannon/docs/README.md CELO/cannon/docs/README.md +index 90fbcd34dbd64f3ae8e65a2b23c1fea2a79182d1..61c9595e8f3c9347ecf25f2bc0dfcf51d4e845d7 100644 +--- OP/cannon/docs/README.md ++++ CELO/cannon/docs/README.md +@@ -45,7 +45,7 @@ + ### Packed State +  + The Packed State is provided in every executed onchain instruction. +-See [Cannon VM Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/experimental/fault-proof/cannon-fault-proof-vm.md#state) for ++See [Cannon VM Specs](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/cannon-fault-proof-vm.md#state) for + details on the state structure. +  + The packed state is small! The `State` data can be packed in such a small amount of EVM words,
@@ -8954,7 +38401,7 @@
@@ -8964,561 +38411,2095 @@
-
+543
+
+14
-0
-
diff --git OP/packages/contracts-bedrock/src/celo/FeeHandler.sol CELO/packages/contracts-bedrock/src/celo/FeeHandler.sol +
diff --git OP/cannon/example/alloc/go.mod CELO/cannon/example/alloc/go.mod new file mode 100644 -index 0000000000000000000000000000000000000000..00a1b0bde4fcb4af98c1cd85c71b2d45802d950c +index 0000000000000000000000000000000000000000..2f0739ca6c992a38757584d1e27ac96a4135f47e --- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/FeeHandler.sol -@@ -0,0 +1,543 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/utils/math/Math.sol"; -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+import "../../lib/openzeppelin-contracts/contracts/utils/structs/EnumerableSet.sol"; -+import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; -+ -+import "./UsingRegistry.sol"; -+import "./common/Freezable.sol"; -+import "./common/FixidityLib.sol"; -+import "./common/Initializable.sol"; -+ -+import "./common/interfaces/IFeeHandler.sol"; -+import "./common/interfaces/IFeeHandlerSeller.sol"; -+ -+// TODO move to IStableToken when it adds method getExchangeRegistryId -+import "./interfaces/IStableTokenMento.sol"; -+import "./common/interfaces/ICeloVersionedContract.sol"; -+import "./common/interfaces/ICeloToken.sol"; -+import "./stability/interfaces/ISortedOracles.sol"; -+ -+// Using the minimal required signatures in the interfaces so more contracts could be compatible -+import { ReentrancyGuard } from "@openzeppelin/contracts/security/ReentrancyGuard.sol"; -+ -+// An implementation of FeeHandler as described in CIP-52 -+// See https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md -+contract FeeHandler is -+ Ownable, -+ Initializable, -+ UsingRegistry, -+ ICeloVersionedContract, -+ Freezable, -+ IFeeHandler, -+ ReentrancyGuard -+{ -+ using FixidityLib for FixidityLib.Fraction; -+ using EnumerableSet for EnumerableSet.AddressSet; -+ -+ uint256 public constant FIXED1_UINT = 1000000000000000000000000; // TODO move to FIX and add check -+ -+ // Min units that can be burned -+ uint256 public constant MIN_BURN = 200; -+ -+ // last day the daily limits were updated -+ uint256 public lastLimitDay; -+ -+ FixidityLib.Fraction public burnFraction; // 80% -+ -+ address public feeBeneficiary; -+ -+ uint256 public celoToBeBurned; -+ -+ // This mapping can not be public because it contains a FixidityLib.Fraction -+ // and that'd be only supported with experimental features in this -+ // compiler version -+ mapping(address => TokenState) private tokenStates; -+ -+ struct TokenState { -+ address handler; -+ FixidityLib.Fraction maxSlippage; -+ // Max amounts that can be burned in a day for a token -+ uint256 dailySellLimit; -+ // Max amounts that can be burned today for a token -+ uint256 currentDaySellLimit; -+ uint256 toDistribute; -+ // Historical amounts burned by this contract -+ uint256 pastBurn; -+ } -+ -+ EnumerableSet.AddressSet private activeTokens; -+ -+ event SoldAndBurnedToken(address token, uint256 value); -+ event DailyLimitSet(address tokenAddress, uint256 newLimit); -+ event DailyLimitHit(address token, uint256 burning); -+ event MaxSlippageSet(address token, uint256 maxSlippage); -+ event DailySellLimitUpdated(uint256 amount); -+ event FeeBeneficiarySet(address newBeneficiary); -+ event BurnFractionSet(uint256 fraction); -+ event TokenAdded(address tokenAddress, address handlerAddress); -+ event TokenRemoved(address tokenAddress); -+ -+ /** -+ * @notice Sets initialized == true on implementation contracts. -+ * @param test Set to true to skip implementation initialisation. -+ */ -+ constructor(bool test) Initializable(test) { } -+ -+ /** -+ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. -+ */ -+ function initialize( -+ address _registryAddress, -+ address newFeeBeneficiary, -+ uint256 newBurnFraction, -+ address[] calldata tokens, -+ address[] calldata handlers, -+ uint256[] calldata newLimits, -+ uint256[] calldata newMaxSlippages -+ ) -+ external -+ initializer -+ { -+ require(tokens.length == handlers.length, "handlers length should match tokens length"); -+ require(tokens.length == newLimits.length, "limits length should match tokens length"); -+ require(tokens.length == newMaxSlippages.length, "maxSlippage length should match tokens length"); -+ -+ _transferOwnership(msg.sender); -+ setRegistry(_registryAddress); -+ _setFeeBeneficiary(newFeeBeneficiary); -+ _setBurnFraction(newBurnFraction); -+ -+ for (uint256 i = 0; i < tokens.length; i++) { -+ _addToken(tokens[i], handlers[i]); -+ _setDailySellLimit(tokens[i], newLimits[i]); -+ _setMaxSplippage(tokens[i], newMaxSlippages[i]); -+ } -+ } -+ -+ // Without this the contract cant receive Celo as native transfer -+ receive() external payable { } -+ -+ /** -+ * @dev Returns the handler address for the specified token. -+ * @param tokenAddress The address of the token for which to return the handler. -+ * @return The address of the handler contract for the specified token. -+ */ -+ function getTokenHandler(address tokenAddress) external view returns (address) { -+ return tokenStates[tokenAddress].handler; -+ } -+ -+ /** -+ * @dev Returns a boolean indicating whether the specified token is active or not. -+ * @param tokenAddress The address of the token for which to retrieve the active status. -+ * @return A boolean representing the active status of the specified token. -+ */ -+ function getTokenActive(address tokenAddress) external view returns (bool) { -+ return activeTokens.contains(tokenAddress); -+ } -+ -+ /** -+ * @dev Returns the maximum slippage percentage for the specified token. -+ * @param tokenAddress The address of the token for which to retrieve the maximum -+ * slippage percentage. -+ * @return The maximum slippage percentage as a uint256 value. -+ */ -+ function getTokenMaxSlippage(address tokenAddress) external view returns (uint256) { -+ return FixidityLib.unwrap(tokenStates[tokenAddress].maxSlippage); -+ } -+ -+ /** -+ * @dev Returns the daily burn limit for the specified token. -+ * @param tokenAddress The address of the token for which to retrieve the daily burn limit. -+ * @return The daily burn limit as a uint256 value. -+ */ -+ function getTokenDailySellLimit(address tokenAddress) external view returns (uint256) { -+ return tokenStates[tokenAddress].dailySellLimit; -+ } -+ -+ /** -+ * @dev Returns the current daily sell limit for the specified token. -+ * @param tokenAddress The address of the token for which to retrieve the current daily limit. -+ * @return The current daily limit as a uint256 value. -+ */ -+ function getTokenCurrentDaySellLimit(address tokenAddress) external view returns (uint256) { -+ return tokenStates[tokenAddress].currentDaySellLimit; -+ } -+ -+ /** -+ * @dev Returns the amount of tokens available to distribute for the specified token. -+ * @param tokenAddress The address of the token for which to retrieve the amount of -+ * tokens available to distribute. -+ * @return The amount of tokens available to distribute as a uint256 value. -+ */ -+ function getTokenToDistribute(address tokenAddress) external view returns (uint256) { -+ return tokenStates[tokenAddress].toDistribute; -+ } -+ -+ function getActiveTokens() public view returns (address[] memory) { -+ return activeTokens.values(); -+ } -+ -+ /** -+ * @dev Sets the fee beneficiary address to the specified address. -+ * @param beneficiary The address to set as the fee beneficiary. -+ */ -+ function setFeeBeneficiary(address beneficiary) external onlyOwner { -+ return _setFeeBeneficiary(beneficiary); -+ } -+ -+ function _setFeeBeneficiary(address beneficiary) private { -+ feeBeneficiary = beneficiary; -+ emit FeeBeneficiarySet(beneficiary); -+ } ++++ CELO/cannon/example/alloc/go.mod +@@ -0,0 +1,14 @@ ++module alloc + -+ /** -+ * @dev Sets the burn fraction to the specified value. -+ * @param fraction The value to set as the burn fraction. -+ */ -+ function setBurnFraction(uint256 fraction) external onlyOwner { -+ return _setBurnFraction(fraction); -+ } ++go 1.21 + -+ function _setBurnFraction(uint256 newFraction) private { -+ FixidityLib.Fraction memory fraction = FixidityLib.wrap(newFraction); -+ require(FixidityLib.lte(fraction, FixidityLib.fixed1()), "Burn fraction must be less than or equal to 1"); -+ burnFraction = fraction; -+ emit BurnFractionSet(newFraction); -+ } ++toolchain go1.21.1 + -+ /** -+ * @dev Sets the burn fraction to the specified value. Token has to have a handler set. -+ * @param tokenAddress The address of the token to sell -+ */ -+ function sell(address tokenAddress) external { -+ return _sell(tokenAddress); -+ } ++require github.com/ethereum-optimism/optimism v0.0.0 + -+ /** -+ * @dev Adds a new token to the contract with the specified token and handler addresses. -+ * @param tokenAddress The address of the token to add. -+ * @param handlerAddress The address of the handler contract for the specified token. -+ */ -+ function addToken(address tokenAddress, address handlerAddress) external onlyOwner { -+ _addToken(tokenAddress, handlerAddress); -+ } ++require ( ++ golang.org/x/crypto v0.24.0 // indirect ++ golang.org/x/sys v0.21.0 // indirect ++) + -+ function _addToken(address tokenAddress, address handlerAddress) private { -+ require(handlerAddress != address(0), "Can't set handler to zero"); -+ TokenState storage tokenState = tokenStates[tokenAddress]; -+ tokenState.handler = handlerAddress; ++replace github.com/ethereum-optimism/optimism v0.0.0 => ../../..
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+12
+
-0
+ +
+ +
+
+
diff --git OP/cannon/example/alloc/go.sum CELO/cannon/example/alloc/go.sum +new file mode 100644 +index 0000000000000000000000000000000000000000..e9147757ceeb84a3b77b4e3804f8157f8940484f +--- /dev/null ++++ CELO/cannon/example/alloc/go.sum +@@ -0,0 +1,12 @@ ++github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= ++github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= ++github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= ++github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= ++github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= ++github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= ++golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= ++golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= ++golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= ++golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= ++gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= ++gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+31
+
-0
+ +
+ +
+
+
diff --git OP/cannon/example/alloc/main.go CELO/cannon/example/alloc/main.go +new file mode 100644 +index 0000000000000000000000000000000000000000..41bc67000d2bbcb30e085430ea082a981585d005 +--- /dev/null ++++ CELO/cannon/example/alloc/main.go +@@ -0,0 +1,31 @@ ++package main + -+ activeTokens.add(tokenAddress); -+ emit TokenAdded(tokenAddress, handlerAddress); -+ } ++import ( ++ "encoding/binary" ++ "fmt" ++ "runtime" + -+ /** -+ * @notice Allows the owner to activate a specified token. -+ * @param tokenAddress The address of the token to be activated. -+ */ -+ function activateToken(address tokenAddress) external onlyOwner { -+ _activateToken(tokenAddress); -+ } ++ preimage "github.com/ethereum-optimism/optimism/op-preimage" ++) + -+ function _activateToken(address tokenAddress) private { -+ TokenState storage tokenState = tokenStates[tokenAddress]; -+ require( -+ tokenState.handler != address(0) || tokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), -+ "Handler has to be set to activate token" -+ ); -+ activeTokens.add(tokenAddress); -+ } ++func main() { ++ var mem []byte ++ po := preimage.NewOracleClient(preimage.ClientPreimageChannel()) ++ numAllocs := binary.LittleEndian.Uint64(po.Get(preimage.LocalIndexKey(0))) ++ ++ fmt.Printf("alloc program. numAllocs=%d\n", numAllocs) ++ var alloc int ++ for i := 0; i < int(numAllocs); i++ { ++ mem = make([]byte, 32*1024*1024) ++ alloc += len(mem) ++ // touch a couple pages to prevent the runtime from overcommitting memory ++ for j := 0; j < len(mem); j += 1024 { ++ mem[j] = 0xFF ++ } ++ fmt.Printf("allocated %d bytes\n", alloc) ++ } + -+ /** -+ * @dev Deactivates the specified token by marking it as inactive. -+ * @param tokenAddress The address of the token to deactivate. -+ */ -+ function deactivateToken(address tokenAddress) external onlyOwner { -+ _deactivateToken(tokenAddress); -+ } ++ var m runtime.MemStats ++ runtime.ReadMemStats(&m) ++ fmt.Printf("alloc program exit. memstats: heap_alloc=%d frees=%d mallocs=%d\n", m.HeapAlloc, m.Frees, m.Mallocs) ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/cannon/example/claim/main.go CELO/cannon/example/claim/main.go +index ac22fa909040b604f83b4527def08e9d73a2e9d5..72b2064dec983919370ad046ed4ef6ed2bc6a2d2 100644 +--- OP/cannon/example/claim/main.go ++++ CELO/cannon/example/claim/main.go +@@ -5,7 +5,7 @@ "encoding/binary" + "fmt" + "os" +  +- "github.com/ethereum-optimism/optimism/op-preimage" ++ preimage "github.com/ethereum-optimism/optimism/op-preimage" + ) +  + type rawHint string
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+14
+
-14
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/evm_test.go CELO/cannon/mipsevm/evm_test.go +index 703dc6e30a61263203b672c679955b737dcbc7fe..14c53220c432e9fb3cab88f15ad603b8a11fd314 100644 +--- OP/cannon/mipsevm/evm_test.go ++++ CELO/cannon/mipsevm/evm_test.go +@@ -172,7 +172,7 @@ + fn := path.Join("open_mips_tests/test/bin", f.Name()) + programMem, err := os.ReadFile(fn) + require.NoError(t, err) +- state := &State{PC: 0, NextPC: 4, Memory: NewMemory()} ++ state := &State{Cpu: CpuScalars{PC: 0, NextPC: 4}, Memory: NewMemory()} + err = state.Memory.SetMemoryRange(0, bytes.NewReader(programMem)) + require.NoError(t, err, "load program into state") +  +@@ -182,14 +182,14 @@ + goState := NewInstrumentedState(state, oracle, os.Stdout, os.Stderr) +  + for i := 0; i < 1000; i++ { +- if goState.state.PC == endAddr { ++ if goState.state.Cpu.PC == endAddr { + break + } + if exitGroup && goState.state.Exited { + break + } +- insn := state.Memory.GetMemory(state.PC) +- t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.PC, insn) ++ insn := state.Memory.GetMemory(state.Cpu.PC) ++ t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.Cpu.PC, insn) +  + stepWitness, err := goState.Step(true) + require.NoError(t, err) +@@ -201,11 +201,11 @@ require.Equalf(t, hexutil.Bytes(goPost).String(), hexutil.Bytes(evmPost).String(), + "mipsevm produced different state than EVM at step %d", state.Step) + } + if exitGroup { +- require.NotEqual(t, uint32(endAddr), goState.state.PC, "must not reach end") ++ require.NotEqual(t, uint32(endAddr), goState.state.Cpu.PC, "must not reach end") + require.True(t, goState.state.Exited, "must set exited state") + require.Equal(t, uint8(1), goState.state.ExitCode, "must exit with 1") + } else { +- require.Equal(t, uint32(endAddr), state.PC, "must reach end") ++ require.Equal(t, uint32(endAddr), state.Cpu.PC, "must reach end") + // inspect test result + done, result := state.Memory.GetMemory(baseAddrEnd+4), state.Memory.GetMemory(baseAddrEnd+8) + require.Equal(t, done, uint32(1), "must be done") +@@ -233,7 +233,7 @@ } +  + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { +- state := &State{PC: tt.pc, NextPC: tt.nextPC, Memory: NewMemory()} ++ state := &State{Cpu: CpuScalars{PC: tt.pc, NextPC: tt.nextPC}, Memory: NewMemory()} + state.Memory.SetMemory(tt.pc, tt.insn) +  + us := NewInstrumentedState(state, nil, os.Stdout, os.Stderr) +@@ -401,7 +401,7 @@ + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + oracle := hintTrackingOracle{} +- state := &State{PC: 0, NextPC: 4, Memory: NewMemory()} ++ state := &State{Cpu: CpuScalars{PC: 0, NextPC: 4}, Memory: NewMemory()} +  + state.LastHint = tt.lastHint + state.Registers[2] = sysWrite +@@ -448,8 +448,8 @@ } +  + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { +- state := &State{PC: 0, NextPC: tt.nextPC, Memory: NewMemory()} +- initialState := &State{PC: 0, NextPC: tt.nextPC, Memory: state.Memory} ++ state := &State{Cpu: CpuScalars{PC: 0, NextPC: tt.nextPC}, Memory: NewMemory()} ++ initialState := &State{Cpu: CpuScalars{PC: 0, NextPC: tt.nextPC}, Memory: state.Memory} + state.Memory.SetMemory(0, tt.insn) +  + // set the return address ($ra) to jump into when test completes +@@ -496,9 +496,9 @@ for i := 0; i < 400_000; i++ { + if goState.state.Exited { + break + } +- insn := state.Memory.GetMemory(state.PC) ++ insn := state.Memory.GetMemory(state.Cpu.PC) + if i%1000 == 0 { // avoid spamming test logs, we are executing many steps +- t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.PC, insn) ++ t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.Cpu.PC, insn) + } +  + evm := NewMIPSEVM(contracts, addrs) +@@ -548,9 +548,9 @@ if goState.state.Exited { + break + } +  +- insn := state.Memory.GetMemory(state.PC) ++ insn := state.Memory.GetMemory(state.Cpu.PC) + if i%1000 == 0 { // avoid spamming test logs, we are executing many steps +- t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.PC, insn) ++ t.Logf("step: %4d pc: 0x%08x insn: 0x%08x", state.Step, state.Cpu.PC, insn) + } +  + stepWitness, err := goState.Step(true)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+91
+
-73
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/fuzz_evm_test.go CELO/cannon/mipsevm/fuzz_evm_test.go +index 3ed3eefb7b38637fc16420b416047a04625a70b4..9b11f9363c931ae9a11798ba19db9680b1119784 100644 +--- OP/cannon/mipsevm/fuzz_evm_test.go ++++ CELO/cannon/mipsevm/fuzz_evm_test.go +@@ -21,10 +21,12 @@ f.Fuzz(func(t *testing.T, pc uint32, step uint64, preimageOffset uint32) { + pc = pc & 0xFF_FF_FF_FC // align PC + nextPC := pc + 4 + state := &State{ +- PC: pc, +- NextPC: nextPC, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: pc, ++ NextPC: nextPC, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -44,10 +46,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, pc+4, state.PC) +- require.Equal(t, nextPC+4, state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, pc+4, state.Cpu.PC) ++ require.Equal(t, nextPC+4, state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) +@@ -71,10 +73,12 @@ f.Fuzz(func(t *testing.T, pc uint32, step uint64, preimageOffset uint32) { + pc = pc & 0xFF_FF_FF_FC // align PC + nextPC := pc + 4 + state := &State{ +- PC: pc, +- NextPC: nextPC, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: pc, ++ NextPC: nextPC, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -93,10 +97,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, pc+4, state.PC) +- require.Equal(t, nextPC+4, state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, pc+4, state.Cpu.PC) ++ require.Equal(t, nextPC+4, state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) +@@ -118,10 +122,12 @@ func FuzzStateSyscallMmap(f *testing.F) { + contracts, addrs := testContractsSetup(f) + f.Fuzz(func(t *testing.T, addr uint32, siz uint32, heap uint32) { + state := &State{ +- PC: 0, +- NextPC: 4, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: 0, ++ NextPC: 4, ++ LO: 0, ++ HI: 0, ++ }, + Heap: heap, + ExitCode: 0, + Exited: false, +@@ -139,10 +145,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, uint32(4), state.PC) +- require.Equal(t, uint32(8), state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, uint32(4), state.Cpu.PC) ++ require.Equal(t, uint32(8), state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) + require.Equal(t, preStateRoot, state.Memory.MerkleRoot()) +@@ -179,10 +185,12 @@ f.Fuzz(func(t *testing.T, exitCode uint8, pc uint32, step uint64) { + pc = pc & 0xFF_FF_FF_FC // align PC + nextPC := pc + 4 + state := &State{ +- PC: pc, +- NextPC: nextPC, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: pc, ++ NextPC: nextPC, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -200,10 +208,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, pc, state.PC) +- require.Equal(t, nextPC, state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, pc, state.Cpu.PC) ++ require.Equal(t, nextPC, state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(exitCode), state.ExitCode) + require.Equal(t, true, state.Exited) +@@ -221,14 +229,16 @@ "mipsevm produced different state than EVM") + }) + } +  +-func FuzzStateSyscallFnctl(f *testing.F) { ++func FuzzStateSyscallFcntl(f *testing.F) { + contracts, addrs := testContractsSetup(f) + f.Fuzz(func(t *testing.T, fd uint32, cmd uint32) { + state := &State{ +- PC: 0, +- NextPC: 4, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: 0, ++ NextPC: 4, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -246,10 +256,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, uint32(4), state.PC) +- require.Equal(t, uint32(8), state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, uint32(4), state.Cpu.PC) ++ require.Equal(t, uint32(8), state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) +@@ -289,10 +299,12 @@ contracts, addrs := testContractsSetup(f) + f.Fuzz(func(t *testing.T, addr uint32, count uint32) { + preimageData := []byte("hello world") + state := &State{ +- PC: 0, +- NextPC: 4, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: 0, ++ NextPC: 4, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -314,10 +326,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, uint32(4), state.PC) +- require.Equal(t, uint32(8), state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, uint32(4), state.Cpu.PC) ++ require.Equal(t, uint32(8), state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) +@@ -342,10 +354,12 @@ if preimageOffset >= uint32(len(preimageData)) { + t.SkipNow() + } + state := &State{ +- PC: 0, +- NextPC: 4, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: 0, ++ NextPC: 4, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -372,10 +386,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.True(t, stepWitness.HasPreimage()) +  +- require.Equal(t, uint32(4), state.PC) +- require.Equal(t, uint32(8), state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, uint32(4), state.Cpu.PC) ++ require.Equal(t, uint32(8), state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) +@@ -403,10 +417,12 @@ contracts, addrs := testContractsSetup(f) + f.Fuzz(func(t *testing.T, addr uint32, count uint32, randSeed int64) { + preimageData := []byte("hello world") + state := &State{ +- PC: 0, +- NextPC: 4, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: 0, ++ NextPC: 4, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -436,10 +452,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, uint32(4), state.PC) +- require.Equal(t, uint32(8), state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, uint32(4), state.Cpu.PC) ++ require.Equal(t, uint32(8), state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited) +@@ -461,10 +477,12 @@ contracts, addrs := testContractsSetup(f) + f.Fuzz(func(t *testing.T, addr uint32, count uint32) { + preimageData := []byte("hello world") + state := &State{ +- PC: 0, +- NextPC: 4, +- LO: 0, +- HI: 0, ++ Cpu: CpuScalars{ ++ PC: 0, ++ NextPC: 4, ++ LO: 0, ++ HI: 0, ++ }, + Heap: 0, + ExitCode: 0, + Exited: false, +@@ -489,10 +507,10 @@ stepWitness, err := goState.Step(true) + require.NoError(t, err) + require.False(t, stepWitness.HasPreimage()) +  +- require.Equal(t, uint32(4), state.PC) +- require.Equal(t, uint32(8), state.NextPC) +- require.Equal(t, uint32(0), state.LO) +- require.Equal(t, uint32(0), state.HI) ++ require.Equal(t, uint32(4), state.Cpu.PC) ++ require.Equal(t, uint32(8), state.Cpu.NextPC) ++ require.Equal(t, uint32(0), state.Cpu.LO) ++ require.Equal(t, uint32(0), state.Cpu.HI) + require.Equal(t, uint32(0), state.Heap) + require.Equal(t, uint8(0), state.ExitCode) + require.Equal(t, false, state.Exited)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+34
+
-18
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/instrumented.go CELO/cannon/mipsevm/instrumented.go +index 6e1ac81f104ed97e7a3ec41d409b2f6783519166..011c7c4d06309f90b2a2686fd14b622a7464453a 100644 +--- OP/cannon/mipsevm/instrumented.go ++++ CELO/cannon/mipsevm/instrumented.go +@@ -26,7 +26,7 @@ lastMemAccess uint32 + memProofEnabled bool + memProof [28 * 32]byte +  +- preimageOracle PreimageOracle ++ preimageOracle *trackingOracle +  + // cached pre-image data, including 8 byte length prefix + lastPreimage []byte +@@ -39,27 +39,12 @@ debug Debug + debugEnabled bool + } +  +-const ( +- fdStdin = 0 +- fdStdout = 1 +- fdStderr = 2 +- fdHintRead = 3 +- fdHintWrite = 4 +- fdPreimageRead = 5 +- fdPreimageWrite = 6 +-) +- +-const ( +- MipsEBADF = 0x9 +- MipsEINVAL = 0x16 +-) +- + func NewInstrumentedState(state *State, po PreimageOracle, stdOut, stdErr io.Writer) *InstrumentedState { + return &InstrumentedState{ + state: state, + stdOut: stdOut, + stdErr: stdErr, +- preimageOracle: po, ++ preimageOracle: &trackingOracle{po: po}, + } + } +  +@@ -78,7 +63,7 @@ m.lastMemAccess = ^uint32(0) + m.lastPreimageOffset = ^uint32(0) +  + if proof { +- insnProof := m.state.Memory.MerkleProof(m.state.PC) ++ insnProof := m.state.Memory.MerkleProof(m.state.Cpu.PC) + wit = &StepWitness{ + State: m.state.EncodeWitness(), + MemProof: insnProof[:], +@@ -103,3 +88,34 @@ + func (m *InstrumentedState) LastPreimage() ([32]byte, []byte, uint32) { + return m.lastPreimageKey, m.lastPreimage, m.lastPreimageOffset + } + -+ function _deactivateToken(address tokenAddress) private { -+ activeTokens.remove(tokenAddress); -+ } ++func (d *InstrumentedState) GetDebugInfo() *DebugInfo { ++ return &DebugInfo{ ++ Pages: d.state.Memory.PageCount(), ++ NumPreimageRequests: d.preimageOracle.numPreimageRequests, ++ TotalPreimageSize: d.preimageOracle.totalPreimageSize, ++ } ++} + -+ /** -+ * @notice Allows the owner to set a handler contract for a specified token. -+ * @param tokenAddress The address of the token to set the handler for. -+ * @param handlerAddress The address of the handler contract to be set. -+ */ -+ function setHandler(address tokenAddress, address handlerAddress) external onlyOwner { -+ _setHandler(tokenAddress, handlerAddress); -+ } ++type DebugInfo struct { ++ Pages int `json:"pages"` ++ NumPreimageRequests int `json:"num_preimage_requests"` ++ TotalPreimageSize int `json:"total_preimage_size"` ++} + -+ function _setHandler(address tokenAddress, address handlerAddress) private { -+ require(handlerAddress != address(0), "Can't set handler to zero, use deactivateToken"); -+ TokenState storage tokenState = tokenStates[tokenAddress]; -+ tokenState.handler = handlerAddress; -+ } ++type trackingOracle struct { ++ po PreimageOracle ++ totalPreimageSize int ++ numPreimageRequests int ++} + -+ function removeToken(address tokenAddress) external onlyOwner { -+ _removeToken(tokenAddress); -+ } ++func (d *trackingOracle) Hint(v []byte) { ++ d.po.Hint(v) ++} + -+ function _removeToken(address tokenAddress) private { -+ _deactivateToken(tokenAddress); -+ TokenState storage tokenState = tokenStates[tokenAddress]; -+ tokenState.handler = address(0); -+ emit TokenRemoved(tokenAddress); -+ } ++func (d *trackingOracle) GetPreimage(k [32]byte) []byte { ++ d.numPreimageRequests++ ++ preimage := d.po.GetPreimage(k) ++ d.totalPreimageSize += len(preimage) ++ return preimage ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+32
+
-232
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/mips.go CELO/cannon/mipsevm/mips.go +index 9758c1fd75e505d3862b380057ccc58cd9c1e31f..e25bee13a275cc2290cad6e7156294b25ce5ebd0 100644 +--- OP/cannon/mipsevm/mips.go ++++ CELO/cannon/mipsevm/mips.go +@@ -3,17 +3,9 @@ + import ( + "encoding/binary" + "fmt" +- "io" +-) +  +-const ( +- sysMmap = 4090 +- sysBrk = 4045 +- sysClone = 4120 +- sysExitGroup = 4246 +- sysRead = 4003 +- sysWrite = 4004 +- sysFcntl = 4055 ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/common/hexutil" + ) +  + func (m *InstrumentedState) readPreimage(key [32]byte, offset uint32) (dat [32]byte, datLen uint32) { +@@ -43,29 +35,17 @@ } + } +  + func (m *InstrumentedState) handleSyscall() error { +- syscallNum := m.state.Registers[2] // v0 ++ syscallNum, a0, a1, a2 := getSyscallArgs(&m.state.Registers) + -+ function _sell(address tokenAddress) private onlyWhenNotFrozen nonReentrant { -+ IERC20 token = IERC20(tokenAddress); + v0 := uint32(0) + v1 := uint32(0) +  +- a0 := m.state.Registers[4] +- a1 := m.state.Registers[5] +- a2 := m.state.Registers[6] +- + //fmt.Printf("syscall: %d\n", syscallNum) + switch syscallNum { + case sysMmap: +- sz := a1 +- if sz&PageAddrMask != 0 { // adjust size to align with page size +- sz += PageSize - (sz & PageAddrMask) +- } +- if a0 == 0 { +- v0 = m.state.Heap +- //fmt.Printf("mmap heap 0x%x size 0x%x\n", v0, sz) +- m.state.Heap += sz +- } else { +- v0 = a0 +- //fmt.Printf("mmap hint 0x%x size 0x%x\n", v0, sz) +- } ++ var newHeap uint32 ++ v0, v1, newHeap = handleSysMmap(a0, a1, m.state.Heap) ++ m.state.Heap = newHeap + case sysBrk: + v0 = 0x40000000 + case sysClone: // clone (not supported) +@@ -75,107 +55,22 @@ m.state.Exited = true + m.state.ExitCode = uint8(a0) + return nil + case sysRead: +- // args: a0 = fd, a1 = addr, a2 = count +- // returns: v0 = read, v1 = err code +- switch a0 { +- case fdStdin: +- // leave v0 and v1 zero: read nothing, no error +- case fdPreimageRead: // pre-image oracle +- effAddr := a1 & 0xFFffFFfc +- m.trackMemAccess(effAddr) +- mem := m.state.Memory.GetMemory(effAddr) +- dat, datLen := m.readPreimage(m.state.PreimageKey, m.state.PreimageOffset) +- //fmt.Printf("reading pre-image data: addr: %08x, offset: %d, datLen: %d, data: %x, key: %s count: %d\n", a1, m.state.PreimageOffset, datLen, dat[:datLen], m.state.PreimageKey, a2) +- alignment := a1 & 3 +- space := 4 - alignment +- if space < datLen { +- datLen = space +- } +- if a2 < datLen { +- datLen = a2 +- } +- var outMem [4]byte +- binary.BigEndian.PutUint32(outMem[:], mem) +- copy(outMem[alignment:], dat[:datLen]) +- m.state.Memory.SetMemory(effAddr, binary.BigEndian.Uint32(outMem[:])) +- m.state.PreimageOffset += datLen +- v0 = datLen +- //fmt.Printf("read %d pre-image bytes, new offset: %d, eff addr: %08x mem: %08x\n", datLen, m.state.PreimageOffset, effAddr, outMem) +- case fdHintRead: // hint response +- // don't actually read into memory, just say we read it all, we ignore the result anyway +- v0 = a2 +- default: +- v0 = 0xFFffFFff +- v1 = MipsEBADF +- } ++ var newPreimageOffset uint32 ++ v0, v1, newPreimageOffset = handleSysRead(a0, a1, a2, m.state.PreimageKey, m.state.PreimageOffset, m.readPreimage, m.state.Memory, m.trackMemAccess) ++ m.state.PreimageOffset = newPreimageOffset + case sysWrite: +- // args: a0 = fd, a1 = addr, a2 = count +- // returns: v0 = written, v1 = err code +- switch a0 { +- case fdStdout: +- _, _ = io.Copy(m.stdOut, m.state.Memory.ReadMemoryRange(a1, a2)) +- v0 = a2 +- case fdStderr: +- _, _ = io.Copy(m.stdErr, m.state.Memory.ReadMemoryRange(a1, a2)) +- v0 = a2 +- case fdHintWrite: +- hintData, _ := io.ReadAll(m.state.Memory.ReadMemoryRange(a1, a2)) +- m.state.LastHint = append(m.state.LastHint, hintData...) +- for len(m.state.LastHint) >= 4 { // process while there is enough data to check if there are any hints +- hintLen := binary.BigEndian.Uint32(m.state.LastHint[:4]) +- if hintLen <= uint32(len(m.state.LastHint[4:])) { +- hint := m.state.LastHint[4 : 4+hintLen] // without the length prefix +- m.state.LastHint = m.state.LastHint[4+hintLen:] +- m.preimageOracle.Hint(hint) +- } else { +- break // stop processing hints if there is incomplete data buffered +- } +- } +- v0 = a2 +- case fdPreimageWrite: +- effAddr := a1 & 0xFFffFFfc +- m.trackMemAccess(effAddr) +- mem := m.state.Memory.GetMemory(effAddr) +- key := m.state.PreimageKey +- alignment := a1 & 3 +- space := 4 - alignment +- if space < a2 { +- a2 = space +- } +- copy(key[:], key[a2:]) +- var tmp [4]byte +- binary.BigEndian.PutUint32(tmp[:], mem) +- copy(key[32-a2:], tmp[alignment:]) +- m.state.PreimageKey = key +- m.state.PreimageOffset = 0 +- //fmt.Printf("updating pre-image key: %s\n", m.state.PreimageKey) +- v0 = a2 +- default: +- v0 = 0xFFffFFff +- v1 = MipsEBADF +- } ++ var newLastHint hexutil.Bytes ++ var newPreimageKey common.Hash ++ var newPreimageOffset uint32 ++ v0, v1, newLastHint, newPreimageKey, newPreimageOffset = handleSysWrite(a0, a1, a2, m.state.LastHint, m.state.PreimageKey, m.state.PreimageOffset, m.preimageOracle, m.state.Memory, m.trackMemAccess, m.stdOut, m.stdErr) ++ m.state.LastHint = newLastHint ++ m.state.PreimageKey = newPreimageKey ++ m.state.PreimageOffset = newPreimageOffset + case sysFcntl: +- // args: a0 = fd, a1 = cmd +- if a1 == 3 { // F_GETFL: get file descriptor flags +- switch a0 { +- case fdStdin, fdPreimageRead, fdHintRead: +- v0 = 0 // O_RDONLY +- case fdStdout, fdStderr, fdPreimageWrite, fdHintWrite: +- v0 = 1 // O_WRONLY +- default: +- v0 = 0xFFffFFff +- v1 = MipsEBADF +- } +- } else { +- v0 = 0xFFffFFff +- v1 = MipsEINVAL // cmd not recognized by this kernel +- } ++ v0, v1 = handleSysFcntl(a0, a1) + } +- m.state.Registers[2] = v0 +- m.state.Registers[7] = v1 +  +- m.state.PC = m.state.NextPC +- m.state.NextPC = m.state.NextPC + 4 ++ handleSyscallUpdates(&m.state.Cpu, &m.state.Registers, v0, v1) + return nil + } +  +@@ -184,7 +79,7 @@ if !m.debugEnabled { + return + } + m.debug.stack = append(m.debug.stack, target) +- m.debug.caller = append(m.debug.caller, m.state.PC) ++ m.debug.caller = append(m.debug.caller, m.state.Cpu.PC) + } +  + func (m *InstrumentedState) popStack() { +@@ -192,7 +87,7 @@ if !m.debugEnabled { + return + } + if len(m.debug.stack) != 0 { +- fn := m.debug.meta.LookupSymbol(m.state.PC) ++ fn := m.debug.meta.LookupSymbol(m.state.Cpu.PC) + topFn := m.debug.meta.LookupSymbol(m.debug.stack[len(m.debug.stack)-1]) + if fn != topFn { + // most likely the function was inlined. Snap back to the last return. +@@ -209,12 +104,12 @@ m.debug.stack = m.debug.stack[:len(m.debug.stack)-1] + m.debug.caller = m.debug.caller[:len(m.debug.caller)-1] + } + } else { +- fmt.Printf("ERROR: stack underflow at pc=%x. step=%d\n", m.state.PC, m.state.Step) ++ fmt.Printf("ERROR: stack underflow at pc=%x. step=%d\n", m.state.Cpu.PC, m.state.Step) + } + } +  + func (m *InstrumentedState) Traceback() { +- fmt.Printf("traceback at pc=%x. step=%d\n", m.state.PC, m.state.Step) ++ fmt.Printf("traceback at pc=%x. step=%d\n", m.state.Cpu.PC, m.state.Step) + for i := len(m.debug.stack) - 1; i >= 0; i-- { + s := m.debug.stack[i] + idx := len(m.debug.stack) - i - 1 +@@ -222,108 +117,13 @@ fmt.Printf("\t%d %x in %s caller=%08x\n", idx, s, m.debug.meta.LookupSymbol(s), m.debug.caller[i]) + } + } +  +-func (m *InstrumentedState) handleBranch(opcode uint32, insn uint32, rtReg uint32, rs uint32) error { +- if m.state.NextPC != m.state.PC+4 { +- panic("branch in delay slot") +- } +- +- shouldBranch := false +- if opcode == 4 || opcode == 5 { // beq/bne +- rt := m.state.Registers[rtReg] +- shouldBranch = (rs == rt && opcode == 4) || (rs != rt && opcode == 5) +- } else if opcode == 6 { +- shouldBranch = int32(rs) <= 0 // blez +- } else if opcode == 7 { +- shouldBranch = int32(rs) > 0 // bgtz +- } else if opcode == 1 { +- // regimm +- rtv := (insn >> 16) & 0x1F +- if rtv == 0 { // bltz +- shouldBranch = int32(rs) < 0 +- } +- if rtv == 1 { // bgez +- shouldBranch = int32(rs) >= 0 +- } +- } +- +- prevPC := m.state.PC +- m.state.PC = m.state.NextPC // execute the delay slot first +- if shouldBranch { +- m.state.NextPC = prevPC + 4 + (signExtend(insn&0xFFFF, 16) << 2) // then continue with the instruction the branch jumps to. +- } else { +- m.state.NextPC = m.state.NextPC + 4 // branch not taken +- } +- return nil +-} +- +-func (m *InstrumentedState) handleHiLo(fun uint32, rs uint32, rt uint32, storeReg uint32) error { +- val := uint32(0) +- switch fun { +- case 0x10: // mfhi +- val = m.state.HI +- case 0x11: // mthi +- m.state.HI = rs +- case 0x12: // mflo +- val = m.state.LO +- case 0x13: // mtlo +- m.state.LO = rs +- case 0x18: // mult +- acc := uint64(int64(int32(rs)) * int64(int32(rt))) +- m.state.HI = uint32(acc >> 32) +- m.state.LO = uint32(acc) +- case 0x19: // multu +- acc := uint64(uint64(rs) * uint64(rt)) +- m.state.HI = uint32(acc >> 32) +- m.state.LO = uint32(acc) +- case 0x1a: // div +- m.state.HI = uint32(int32(rs) % int32(rt)) +- m.state.LO = uint32(int32(rs) / int32(rt)) +- case 0x1b: // divu +- m.state.HI = rs % rt +- m.state.LO = rs / rt +- } +- +- if storeReg != 0 { +- m.state.Registers[storeReg] = val +- } +- +- m.state.PC = m.state.NextPC +- m.state.NextPC = m.state.NextPC + 4 +- return nil +-} +- +-func (m *InstrumentedState) handleJump(linkReg uint32, dest uint32) error { +- if m.state.NextPC != m.state.PC+4 { +- panic("jump in delay slot") +- } +- prevPC := m.state.PC +- m.state.PC = m.state.NextPC +- m.state.NextPC = dest +- if linkReg != 0 { +- m.state.Registers[linkReg] = prevPC + 8 // set the link-register to the instr after the delay slot instruction. +- } +- return nil +-} +- +-func (m *InstrumentedState) handleRd(storeReg uint32, val uint32, conditional bool) error { +- if storeReg >= 32 { +- panic("invalid register") +- } +- if storeReg != 0 && conditional { +- m.state.Registers[storeReg] = val +- } +- m.state.PC = m.state.NextPC +- m.state.NextPC = m.state.NextPC + 4 +- return nil +-} +- + func (m *InstrumentedState) mipsStep() error { + if m.state.Exited { + return nil + } + m.state.Step += 1 + // instruction fetch +- insn := m.state.Memory.GetMemory(m.state.PC) ++ insn := m.state.Memory.GetMemory(m.state.Cpu.PC) + opcode := insn >> 26 // 6-bits +  + // j-type j/jal +@@ -333,9 +133,9 @@ if opcode == 3 { + linkReg = 31 + } + // Take top 4 bits of the next PC (its 256 MB region), and concatenate with the 26-bit offset +- target := (m.state.NextPC & 0xF0000000) | ((insn & 0x03FFFFFF) << 2) ++ target := (m.state.Cpu.NextPC & 0xF0000000) | ((insn & 0x03FFFFFF) << 2) + m.pushStack(target) +- return m.handleJump(linkReg, target) ++ return handleJump(&m.state.Cpu, &m.state.Registers, linkReg, target) + } +  + // register fetch +@@ -369,7 +169,7 @@ rdReg = rtReg + } +  + if (opcode >= 4 && opcode < 8) || opcode == 1 { +- return m.handleBranch(opcode, insn, rtReg, rs) ++ return handleBranch(&m.state.Cpu, &m.state.Registers, opcode, insn, rtReg, rs) + } +  + storeAddr := uint32(0xFF_FF_FF_FF) +@@ -401,14 +201,14 @@ if fun == 9 { + linkReg = rdReg + } + m.popStack() +- return m.handleJump(linkReg, rs) ++ return handleJump(&m.state.Cpu, &m.state.Registers, linkReg, rs) + } +  + if fun == 0xa { // movz +- return m.handleRd(rdReg, rs, rt == 0) ++ return handleRd(&m.state.Cpu, &m.state.Registers, rdReg, rs, rt == 0) + } + if fun == 0xb { // movn +- return m.handleRd(rdReg, rs, rt != 0) ++ return handleRd(&m.state.Cpu, &m.state.Registers, rdReg, rs, rt != 0) + } +  + // syscall (can read and write) +@@ -419,7 +219,7 @@ + // lo and hi registers + // can write back + if fun >= 0x10 && fun < 0x1c { +- return m.handleHiLo(fun, rs, rt, rdReg) ++ return handleHiLo(&m.state.Cpu, &m.state.Registers, fun, rs, rt, rdReg) + } + } +  +@@ -435,5 +235,5 @@ m.state.Memory.SetMemory(storeAddr, val) + } +  + // write back the value to destination register +- return m.handleRd(rdReg, val, true) ++ return handleRd(&m.state.Cpu, &m.state.Registers, rdReg, val, true) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+95
+
-0
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/mips_instructions.go CELO/cannon/mipsevm/mips_instructions.go +index cd3920d10eb5b47d41cd17f89a1cd96ad04a5f81..285ed26b6e1b3f7708210d2d2ae1e23850a091fe 100644 +--- OP/cannon/mipsevm/mips_instructions.go ++++ CELO/cannon/mipsevm/mips_instructions.go +@@ -174,3 +174,98 @@ } else { + return dat & mask + } + } + -+ TokenState storage tokenState = tokenStates[tokenAddress]; -+ require(tokenState.handler != address(0), "Handler has to be set to sell token"); -+ require(FixidityLib.unwrap(tokenState.maxSlippage) != 0, "Max slippage has to be set to sell token"); -+ FixidityLib.Fraction memory balanceToProcess = -+ FixidityLib.newFixed(token.balanceOf(address(this)) - tokenState.toDistribute); ++func handleBranch(cpu *CpuScalars, registers *[32]uint32, opcode uint32, insn uint32, rtReg uint32, rs uint32) error { ++ if cpu.NextPC != cpu.PC+4 { ++ panic("branch in delay slot") ++ } + -+ uint256 balanceToBurn = (burnFraction.multiply(balanceToProcess).fromFixed()); ++ shouldBranch := false ++ if opcode == 4 || opcode == 5 { // beq/bne ++ rt := registers[rtReg] ++ shouldBranch = (rs == rt && opcode == 4) || (rs != rt && opcode == 5) ++ } else if opcode == 6 { ++ shouldBranch = int32(rs) <= 0 // blez ++ } else if opcode == 7 { ++ shouldBranch = int32(rs) > 0 // bgtz ++ } else if opcode == 1 { ++ // regimm ++ rtv := (insn >> 16) & 0x1F ++ if rtv == 0 { // bltz ++ shouldBranch = int32(rs) < 0 ++ } ++ if rtv == 1 { // bgez ++ shouldBranch = int32(rs) >= 0 ++ } ++ } + -+ tokenState.toDistribute = tokenState.toDistribute + balanceToProcess.fromFixed() - balanceToBurn; ++ prevPC := cpu.PC ++ cpu.PC = cpu.NextPC // execute the delay slot first ++ if shouldBranch { ++ cpu.NextPC = prevPC + 4 + (signExtend(insn&0xFFFF, 16) << 2) // then continue with the instruction the branch jumps to. ++ } else { ++ cpu.NextPC = cpu.NextPC + 4 // branch not taken ++ } ++ return nil ++} + -+ // small numbers cause rounding errors and zero case should be skipped -+ if (balanceToBurn < MIN_BURN) { -+ return; -+ } ++func handleHiLo(cpu *CpuScalars, registers *[32]uint32, fun uint32, rs uint32, rt uint32, storeReg uint32) error { ++ val := uint32(0) ++ switch fun { ++ case 0x10: // mfhi ++ val = cpu.HI ++ case 0x11: // mthi ++ cpu.HI = rs ++ case 0x12: // mflo ++ val = cpu.LO ++ case 0x13: // mtlo ++ cpu.LO = rs ++ case 0x18: // mult ++ acc := uint64(int64(int32(rs)) * int64(int32(rt))) ++ cpu.HI = uint32(acc >> 32) ++ cpu.LO = uint32(acc) ++ case 0x19: // multu ++ acc := uint64(uint64(rs) * uint64(rt)) ++ cpu.HI = uint32(acc >> 32) ++ cpu.LO = uint32(acc) ++ case 0x1a: // div ++ cpu.HI = uint32(int32(rs) % int32(rt)) ++ cpu.LO = uint32(int32(rs) / int32(rt)) ++ case 0x1b: // divu ++ cpu.HI = rs % rt ++ cpu.LO = rs / rt ++ } + -+ if (dailySellLimitHit(tokenAddress, balanceToBurn)) { -+ // in case the limit is hit, burn the max possible -+ balanceToBurn = tokenState.currentDaySellLimit; -+ emit DailyLimitHit(tokenAddress, balanceToBurn); -+ } ++ if storeReg != 0 { ++ registers[storeReg] = val ++ } + -+ token.transfer(tokenState.handler, balanceToBurn); -+ IFeeHandlerSeller handler = IFeeHandlerSeller(tokenState.handler); ++ cpu.PC = cpu.NextPC ++ cpu.NextPC = cpu.NextPC + 4 ++ return nil ++} + -+ uint256 celoReceived = handler.sell( -+ tokenAddress, -+ registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), -+ balanceToBurn, -+ FixidityLib.unwrap(tokenState.maxSlippage) -+ ); ++func handleJump(cpu *CpuScalars, registers *[32]uint32, linkReg uint32, dest uint32) error { ++ if cpu.NextPC != cpu.PC+4 { ++ panic("jump in delay slot") ++ } ++ prevPC := cpu.PC ++ cpu.PC = cpu.NextPC ++ cpu.NextPC = dest ++ if linkReg != 0 { ++ registers[linkReg] = prevPC + 8 // set the link-register to the instr after the delay slot instruction. ++ } ++ return nil ++} + -+ celoToBeBurned = celoToBeBurned + celoReceived; -+ tokenState.pastBurn = tokenState.pastBurn + balanceToBurn; -+ updateLimits(tokenAddress, balanceToBurn); ++func handleRd(cpu *CpuScalars, registers *[32]uint32, storeReg uint32, val uint32, conditional bool) error { ++ if storeReg >= 32 { ++ panic("invalid register") ++ } ++ if storeReg != 0 && conditional { ++ registers[storeReg] = val ++ } ++ cpu.PC = cpu.NextPC ++ cpu.NextPC = cpu.NextPC + 4 ++ return nil ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+195
+
-0
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/mips_syscalls.go CELO/cannon/mipsevm/mips_syscalls.go +new file mode 100644 +index 0000000000000000000000000000000000000000..098cf0cd7cfad28128dd889ee1a1648ea155af12 +--- /dev/null ++++ CELO/cannon/mipsevm/mips_syscalls.go +@@ -0,0 +1,195 @@ ++package mipsevm + -+ emit SoldAndBurnedToken(tokenAddress, balanceToBurn); -+ } ++import ( ++ "encoding/binary" ++ "io" + -+ /** -+ * @dev Distributes the available tokens for the specified token address to the fee beneficiary. -+ * @param tokenAddress The address of the token for which to distribute the available tokens. -+ */ -+ function distribute(address tokenAddress) external { -+ return _distribute(tokenAddress); -+ } ++ "github.com/ethereum/go-ethereum/common" ++ "github.com/ethereum/go-ethereum/common/hexutil" ++) + -+ function _distribute(address tokenAddress) private onlyWhenNotFrozen nonReentrant { -+ require(feeBeneficiary != address(0), "Can't distribute to the zero address"); -+ IERC20 token = IERC20(tokenAddress); -+ uint256 tokenBalance = token.balanceOf(address(this)); ++const ( ++ sysMmap = 4090 ++ sysBrk = 4045 ++ sysClone = 4120 ++ sysExitGroup = 4246 ++ sysRead = 4003 ++ sysWrite = 4004 ++ sysFcntl = 4055 ++) + -+ TokenState storage tokenState = tokenStates[tokenAddress]; -+ require( -+ tokenState.handler != address(0) || tokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), -+ "Handler has to be set to sell token" -+ ); ++const ( ++ fdStdin = 0 ++ fdStdout = 1 ++ fdStderr = 2 ++ fdHintRead = 3 ++ fdHintWrite = 4 ++ fdPreimageRead = 5 ++ fdPreimageWrite = 6 ++) + -+ // safty check to avoid a revert due balance -+ uint256 balanceToDistribute = Math.min(tokenBalance, tokenState.toDistribute); ++const ( ++ MipsEBADF = 0x9 ++ MipsEINVAL = 0x16 ++) + -+ if (balanceToDistribute == 0) { -+ // don't distribute with zero balance -+ return; -+ } ++type PreimageReader func(key [32]byte, offset uint32) (dat [32]byte, datLen uint32) ++type MemTracker func(addr uint32) + -+ token.transfer(feeBeneficiary, balanceToDistribute); -+ tokenState.toDistribute = tokenState.toDistribute - balanceToDistribute; -+ } ++func getSyscallArgs(registers *[32]uint32) (syscallNum, a0, a1, a2 uint32) { ++ syscallNum = registers[2] // v0 + -+ /** -+ * @notice Returns the storage, major, minor, and patch version of the contract. -+ * @return Storage version of the contract. -+ * @return Major version of the contract. -+ * @return Minor version of the contract. -+ * @return Patch version of the contract. -+ */ -+ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { -+ return (1, 1, 0, 0); -+ } ++ a0 = registers[4] ++ a1 = registers[5] ++ a2 = registers[6] + -+ /** -+ * @notice Allows owner to set max slippage for a token. -+ * @param token Address of the token to set. -+ * @param newMax New sllipage to set, as Fixidity fraction. -+ */ -+ function setMaxSplippage(address token, uint256 newMax) external onlyOwner { -+ _setMaxSplippage(token, newMax); -+ } ++ return syscallNum, a0, a1, a2 ++} + -+ function _setMaxSplippage(address token, uint256 newMax) private { -+ TokenState storage tokenState = tokenStates[token]; -+ require(newMax != 0, "Cannot set max slippage to zero"); -+ tokenState.maxSlippage = FixidityLib.wrap(newMax); -+ require( -+ FixidityLib.lte(tokenState.maxSlippage, FixidityLib.fixed1()), "Splippage must be less than or equal to 1" -+ ); -+ emit MaxSlippageSet(token, newMax); -+ } ++func handleSysMmap(a0, a1, heap uint32) (v0, v1, newHeap uint32) { ++ v1 = uint32(0) ++ newHeap = heap + -+ /** -+ * @notice Allows owner to set the daily burn limit for a token. -+ * @param token Address of the token to set. -+ * @param newLimit The new limit to set, in the token units. -+ */ -+ function setDailySellLimit(address token, uint256 newLimit) external onlyOwner { -+ _setDailySellLimit(token, newLimit); -+ } ++ sz := a1 ++ if sz&PageAddrMask != 0 { // adjust size to align with page size ++ sz += PageSize - (sz & PageAddrMask) ++ } ++ if a0 == 0 { ++ v0 = heap ++ //fmt.Printf("mmap heap 0x%x size 0x%x\n", v0, sz) ++ newHeap += sz ++ } else { ++ v0 = a0 ++ //fmt.Printf("mmap hint 0x%x size 0x%x\n", v0, sz) ++ } + -+ function _setDailySellLimit(address token, uint256 newLimit) private { -+ TokenState storage tokenState = tokenStates[token]; -+ tokenState.dailySellLimit = newLimit; -+ emit DailyLimitSet(token, newLimit); -+ } ++ return v0, v1, newHeap ++} + -+ /** -+ * @dev Burns CELO tokens according to burnFraction. -+ */ -+ function burnCelo() external { -+ return _burnCelo(); -+ } ++func handleSysRead(a0, a1, a2 uint32, preimageKey [32]byte, preimageOffset uint32, preimageReader PreimageReader, memory *Memory, memTracker MemTracker) (v0, v1, newPreimageOffset uint32) { ++ // args: a0 = fd, a1 = addr, a2 = count ++ // returns: v0 = read, v1 = err code ++ v0 = uint32(0) ++ v1 = uint32(0) ++ newPreimageOffset = preimageOffset ++ ++ switch a0 { ++ case fdStdin: ++ // leave v0 and v1 zero: read nothing, no error ++ case fdPreimageRead: // pre-image oracle ++ effAddr := a1 & 0xFFffFFfc ++ memTracker(effAddr) ++ mem := memory.GetMemory(effAddr) ++ dat, datLen := preimageReader(preimageKey, preimageOffset) ++ //fmt.Printf("reading pre-image data: addr: %08x, offset: %d, datLen: %d, data: %x, key: %s count: %d\n", a1, m.state.PreimageOffset, datLen, dat[:datLen], m.state.PreimageKey, a2) ++ alignment := a1 & 3 ++ space := 4 - alignment ++ if space < datLen { ++ datLen = space ++ } ++ if a2 < datLen { ++ datLen = a2 ++ } ++ var outMem [4]byte ++ binary.BigEndian.PutUint32(outMem[:], mem) ++ copy(outMem[alignment:], dat[:datLen]) ++ memory.SetMemory(effAddr, binary.BigEndian.Uint32(outMem[:])) ++ newPreimageOffset += datLen ++ v0 = datLen ++ //fmt.Printf("read %d pre-image bytes, new offset: %d, eff addr: %08x mem: %08x\n", datLen, m.state.PreimageOffset, effAddr, outMem) ++ case fdHintRead: // hint response ++ // don't actually read into memory, just say we read it all, we ignore the result anyway ++ v0 = a2 ++ default: ++ v0 = 0xFFffFFff ++ v1 = MipsEBADF ++ } + -+ /** -+ * @dev Distributes the available tokens for all registered tokens to the feeBeneficiary. -+ */ -+ function distributeAll() external { -+ return _distributeAll(); -+ } ++ return v0, v1, newPreimageOffset ++} + -+ function _distributeAll() private { -+ for (uint256 i = 0; i < EnumerableSet.length(activeTokens); i++) { -+ address token = activeTokens.at(i); -+ _distribute(token); -+ } -+ // distribute Celo -+ _distribute(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); -+ } ++func handleSysWrite(a0, a1, a2 uint32, lastHint hexutil.Bytes, preimageKey [32]byte, preimageOffset uint32, oracle PreimageOracle, memory *Memory, memTracker MemTracker, stdOut, stdErr io.Writer) (v0, v1 uint32, newLastHint hexutil.Bytes, newPreimageKey common.Hash, newPreimageOffset uint32) { ++ // args: a0 = fd, a1 = addr, a2 = count ++ // returns: v0 = written, v1 = err code ++ v1 = uint32(0) ++ newLastHint = lastHint ++ newPreimageKey = preimageKey ++ newPreimageOffset = preimageOffset ++ ++ switch a0 { ++ case fdStdout: ++ _, _ = io.Copy(stdOut, memory.ReadMemoryRange(a1, a2)) ++ v0 = a2 ++ case fdStderr: ++ _, _ = io.Copy(stdErr, memory.ReadMemoryRange(a1, a2)) ++ v0 = a2 ++ case fdHintWrite: ++ hintData, _ := io.ReadAll(memory.ReadMemoryRange(a1, a2)) ++ lastHint = append(lastHint, hintData...) ++ for len(lastHint) >= 4 { // process while there is enough data to check if there are any hints ++ hintLen := binary.BigEndian.Uint32(lastHint[:4]) ++ if hintLen <= uint32(len(lastHint[4:])) { ++ hint := lastHint[4 : 4+hintLen] // without the length prefix ++ lastHint = lastHint[4+hintLen:] ++ oracle.Hint(hint) ++ } else { ++ break // stop processing hints if there is incomplete data buffered ++ } ++ } ++ newLastHint = lastHint ++ v0 = a2 ++ case fdPreimageWrite: ++ effAddr := a1 & 0xFFffFFfc ++ memTracker(effAddr) ++ mem := memory.GetMemory(effAddr) ++ key := preimageKey ++ alignment := a1 & 3 ++ space := 4 - alignment ++ if space < a2 { ++ a2 = space ++ } ++ copy(key[:], key[a2:]) ++ var tmp [4]byte ++ binary.BigEndian.PutUint32(tmp[:], mem) ++ copy(key[32-a2:], tmp[alignment:]) ++ newPreimageKey = key ++ newPreimageOffset = 0 ++ //fmt.Printf("updating pre-image key: %s\n", m.state.PreimageKey) ++ v0 = a2 ++ default: ++ v0 = 0xFFffFFff ++ v1 = MipsEBADF ++ } + -+ /** -+ * @dev Distributes the available tokens for all registered tokens to the feeBeneficiary. -+ */ -+ function handleAll() external { -+ return _handleAll(); -+ } ++ return v0, v1, newLastHint, newPreimageKey, newPreimageOffset ++} + -+ function _handleAll() private { -+ for (uint256 i = 0; i < EnumerableSet.length(activeTokens); i++) { -+ // calling _handle would trigger may burn Celo and distributions -+ // that can be just batched at the end -+ address token = activeTokens.at(i); -+ _sell(token); -+ } -+ _distributeAll(); // distributes Celo as well -+ _burnCelo(); -+ } ++func handleSysFcntl(a0, a1 uint32) (v0, v1 uint32) { ++ // args: a0 = fd, a1 = cmd ++ v1 = uint32(0) + -+ /** -+ * @dev Distributes the the token for to the feeBeneficiary. -+ */ -+ function handle(address tokenAddress) external { -+ return _handle(tokenAddress); -+ } ++ if a1 == 3 { // F_GETFL: get file descriptor flags ++ switch a0 { ++ case fdStdin, fdPreimageRead, fdHintRead: ++ v0 = 0 // O_RDONLY ++ case fdStdout, fdStderr, fdPreimageWrite, fdHintWrite: ++ v0 = 1 // O_WRONLY ++ default: ++ v0 = 0xFFffFFff ++ v1 = MipsEBADF ++ } ++ } else { ++ v0 = 0xFFffFFff ++ v1 = MipsEINVAL // cmd not recognized by this kernel ++ } + -+ function _handle(address tokenAddress) private { -+ // Celo doesn't have to be exchanged for anything -+ if (tokenAddress != registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)) { -+ _sell(tokenAddress); -+ } -+ _burnCelo(); -+ _distribute(tokenAddress); -+ _distribute(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); -+ } ++ return v0, v1 ++} + -+ /** -+ * @notice Burns all the Celo balance of this contract. -+ */ -+ function _burnCelo() private { -+ TokenState storage tokenState = tokenStates[registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)]; -+ ICeloToken celo = ICeloToken(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); ++func handleSyscallUpdates(cpu *CpuScalars, registers *[32]uint32, v0, v1 uint32) { ++ registers[2] = v0 ++ registers[7] = v1 + -+ uint256 balanceOfCelo = address(this).balance; ++ cpu.PC = cpu.NextPC ++ cpu.NextPC = cpu.NextPC + 4 ++}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/open_mips_tests/test/fcntl.asm CELO/cannon/mipsevm/open_mips_tests/test/fcntl.asm +index 451e90a117884ddc7d2eef98419cb79a381cfea2..5f597bbe0c43319b48c6b2d9da4d39c0e6cc87fe 100644 +--- OP/cannon/mipsevm/open_mips_tests/test/fcntl.asm ++++ CELO/cannon/mipsevm/open_mips_tests/test/fcntl.asm +@@ -5,7 +5,7 @@ .global test + .ent test +  + test: +- # fnctl(0, 3) ++ # fcntl(0, 3) + li $v0, 4055 + li $a0, 0x0 + li $a1, 0x3
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+6
+
-4
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/patch.go CELO/cannon/mipsevm/patch.go +index 64a05e9611a45bc6740c51a8033afe547b830674..47abb41e0915919afee71eea5d9b3659a50f1b63 100644 +--- OP/cannon/mipsevm/patch.go ++++ CELO/cannon/mipsevm/patch.go +@@ -12,10 +12,12 @@ const HEAP_START = 0x05000000 +  + func LoadELF(f *elf.File) (*State, error) { + s := &State{ +- PC: uint32(f.Entry), +- NextPC: uint32(f.Entry + 4), +- HI: 0, +- LO: 0, ++ Cpu: CpuScalars{ ++ PC: uint32(f.Entry), ++ NextPC: uint32(f.Entry + 4), ++ LO: 0, ++ HI: 0, ++ }, + Heap: HEAP_START, + Registers: [32]uint32{}, + Memory: NewMemory(),
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+71
+
-9
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/state.go CELO/cannon/mipsevm/state.go +index d8a5dcfe9ae258bae62fa66e58c01f5db8d64ad4..c8a681f23e37eb9436722f89e4c454fa8a115f13 100644 +--- OP/cannon/mipsevm/state.go ++++ CELO/cannon/mipsevm/state.go +@@ -2,6 +2,7 @@ package mipsevm +  + import ( + "encoding/binary" ++ "encoding/json" + "fmt" +  + "github.com/ethereum/go-ethereum/common" +@@ -12,17 +13,22 @@ + // StateWitnessSize is the size of the state witness encoding in bytes. + var StateWitnessSize = 226 +  ++type CpuScalars struct { ++ PC uint32 `json:"pc"` ++ NextPC uint32 `json:"nextPC"` ++ LO uint32 `json:"lo"` ++ HI uint32 `json:"hi"` ++} + -+ uint256 balanceToProcess = balanceOfCelo - tokenState.toDistribute - celoToBeBurned; -+ uint256 currentBalanceToBurn = FixidityLib.newFixed(balanceToProcess).multiply(burnFraction).fromFixed(); -+ uint256 totalBalanceToBurn = currentBalanceToBurn + celoToBeBurned; -+ celo.burn(totalBalanceToBurn); + type State struct { + Memory *Memory `json:"memory"` +  + PreimageKey common.Hash `json:"preimageKey"` + PreimageOffset uint32 `json:"preimageOffset"` // note that the offset includes the 8-byte length prefix +  +- PC uint32 `json:"pc"` +- NextPC uint32 `json:"nextPC"` +- LO uint32 `json:"lo"` +- HI uint32 `json:"hi"` +- Heap uint32 `json:"heap"` // to handle mmap growth ++ Cpu CpuScalars `json:"cpu"` ++ ++ Heap uint32 `json:"heap"` // to handle mmap growth +  + ExitCode uint8 `json:"exit"` + Exited bool `json:"exited"` +@@ -42,6 +48,62 @@ // and should only be read when len(LastHint) > 4 && uint32(LastHint[:4]) <= len(LastHint[4:]) + LastHint hexutil.Bytes `json:"lastHint,omitempty"` + } +  ++type stateMarshaling struct { ++ Memory *Memory `json:"memory"` ++ PreimageKey common.Hash `json:"preimageKey"` ++ PreimageOffset uint32 `json:"preimageOffset"` ++ PC uint32 `json:"pc"` ++ NextPC uint32 `json:"nextPC"` ++ LO uint32 `json:"lo"` ++ HI uint32 `json:"hi"` ++ Heap uint32 `json:"heap"` ++ ExitCode uint8 `json:"exit"` ++ Exited bool `json:"exited"` ++ Step uint64 `json:"step"` ++ Registers [32]uint32 `json:"registers"` ++ LastHint hexutil.Bytes `json:"lastHint,omitempty"` ++} + -+ celoToBeBurned = 0; -+ tokenState.toDistribute = tokenState.toDistribute + balanceToProcess - currentBalanceToBurn; -+ } ++func (s *State) MarshalJSON() ([]byte, error) { // nosemgrep ++ sm := &stateMarshaling{ ++ Memory: s.Memory, ++ PreimageKey: s.PreimageKey, ++ PreimageOffset: s.PreimageOffset, ++ PC: s.Cpu.PC, ++ NextPC: s.Cpu.NextPC, ++ LO: s.Cpu.LO, ++ HI: s.Cpu.HI, ++ Heap: s.Heap, ++ ExitCode: s.ExitCode, ++ Exited: s.Exited, ++ Step: s.Step, ++ Registers: s.Registers, ++ LastHint: s.LastHint, ++ } ++ return json.Marshal(sm) ++} + -+ /** -+ * @param token The address of the token to query. -+ * @return The amount burned for a token. -+ */ -+ function getPastBurnForToken(address token) external view returns (uint256) { -+ return tokenStates[token].pastBurn; -+ } ++func (s *State) UnmarshalJSON(data []byte) error { ++ sm := new(stateMarshaling) ++ if err := json.Unmarshal(data, sm); err != nil { ++ return err ++ } ++ s.Memory = sm.Memory ++ s.PreimageKey = sm.PreimageKey ++ s.PreimageOffset = sm.PreimageOffset ++ s.Cpu.PC = sm.PC ++ s.Cpu.NextPC = sm.NextPC ++ s.Cpu.LO = sm.LO ++ s.Cpu.HI = sm.HI ++ s.Heap = sm.Heap ++ s.ExitCode = sm.ExitCode ++ s.Exited = sm.Exited ++ s.Step = sm.Step ++ s.Registers = sm.Registers ++ s.LastHint = sm.LastHint ++ return nil ++} + -+ /** -+ * @param token The address of the token to query. -+ * @param amountToBurn The amount of the token to burn. -+ * @return Returns true if burning amountToBurn would exceed the daily limit. -+ */ -+ function dailySellLimitHit(address token, uint256 amountToBurn) public returns (bool) { -+ TokenState storage tokenState = tokenStates[token]; + func (s *State) GetStep() uint64 { return s.Step } +  + func (s *State) VMStatus() uint8 { +@@ -54,10 +116,10 @@ memRoot := s.Memory.MerkleRoot() + out = append(out, memRoot[:]...) + out = append(out, s.PreimageKey[:]...) + out = binary.BigEndian.AppendUint32(out, s.PreimageOffset) +- out = binary.BigEndian.AppendUint32(out, s.PC) +- out = binary.BigEndian.AppendUint32(out, s.NextPC) +- out = binary.BigEndian.AppendUint32(out, s.LO) +- out = binary.BigEndian.AppendUint32(out, s.HI) ++ out = binary.BigEndian.AppendUint32(out, s.Cpu.PC) ++ out = binary.BigEndian.AppendUint32(out, s.Cpu.NextPC) ++ out = binary.BigEndian.AppendUint32(out, s.Cpu.LO) ++ out = binary.BigEndian.AppendUint32(out, s.Cpu.HI) + out = binary.BigEndian.AppendUint32(out, s.Heap) + out = append(out, s.ExitCode) + if s.Exited {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+79
+
-22
+ +
+ +
+
+
diff --git OP/cannon/mipsevm/state_test.go CELO/cannon/mipsevm/state_test.go +index b430398c389b1c8405955c49a9ac3b324b308fea..dfb90674ec78ae902fcc339e2988ecc83bc2cf8d 100644 +--- OP/cannon/mipsevm/state_test.go ++++ CELO/cannon/mipsevm/state_test.go +@@ -44,7 +44,7 @@ //state, err := LoadELF(elfProgram) + //require.NoError(t, err, "must load ELF into state") + programMem, err := os.ReadFile(fn) + require.NoError(t, err) +- state := &State{PC: 0, NextPC: 4, Memory: NewMemory()} ++ state := &State{Cpu: CpuScalars{PC: 0, NextPC: 4}, Memory: NewMemory()} + err = state.Memory.SetMemoryRange(0, bytes.NewReader(programMem)) + require.NoError(t, err, "load program into state") +  +@@ -54,7 +54,7 @@ + us := NewInstrumentedState(state, oracle, os.Stdout, os.Stderr) +  + for i := 0; i < 1000; i++ { +- if us.state.PC == endAddr { ++ if us.state.Cpu.PC == endAddr { + break + } + if exitGroup && us.state.Exited { +@@ -65,11 +65,11 @@ require.NoError(t, err) + } +  + if exitGroup { +- require.NotEqual(t, uint32(endAddr), us.state.PC, "must not reach end") ++ require.NotEqual(t, uint32(endAddr), us.state.Cpu.PC, "must not reach end") + require.True(t, us.state.Exited, "must set exited state") + require.Equal(t, uint8(1), us.state.ExitCode, "must exit with 1") + } else { +- require.Equal(t, uint32(endAddr), us.state.PC, "must reach end") ++ require.Equal(t, uint32(endAddr), us.state.Cpu.PC, "must reach end") + done, result := state.Memory.GetMemory(baseAddrEnd+4), state.Memory.GetMemory(baseAddrEnd+8) + // inspect test result + require.Equal(t, done, uint32(1), "must be done") +@@ -127,15 +127,7 @@ } + } +  + func TestHello(t *testing.T) { +- elfProgram, err := elf.Open("../example/bin/hello.elf") +- require.NoError(t, err, "open ELF file") +- +- state, err := LoadELF(elfProgram) +- require.NoError(t, err, "load ELF into state") +- +- err = PatchGo(elfProgram, state) +- require.NoError(t, err, "apply Go runtime patches") +- require.NoError(t, PatchStack(state), "add initial stack") ++ state := loadELFProgram(t, "../example/bin/hello.elf") +  + var stdOutBuf, stdErrBuf bytes.Buffer + us := NewInstrumentedState(state, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr)) +@@ -225,15 +217,7 @@ return oracle, fmt.Sprintf("computing %d * %d + %d\nclaim %d is good!\n", s, a, b, s*a+b), "started!" + } +  + func TestClaim(t *testing.T) { +- elfProgram, err := elf.Open("../example/bin/claim.elf") +- require.NoError(t, err, "open ELF file") +- +- state, err := LoadELF(elfProgram) +- require.NoError(t, err, "load ELF into state") +- +- err = PatchGo(elfProgram, state) +- require.NoError(t, err, "apply Go runtime patches") +- require.NoError(t, PatchStack(state), "add initial stack") ++ state := loadELFProgram(t, "../example/bin/claim.elf") +  + oracle, expectedStdOut, expectedStdErr := claimTestOracle(t) +  +@@ -255,6 +239,44 @@ require.Equal(t, expectedStdOut, stdOutBuf.String(), "stdout") + require.Equal(t, expectedStdErr, stdErrBuf.String(), "stderr") + } +  ++func TestAlloc(t *testing.T) { ++ t.Skip("TODO(client-pod#906): Currently fails on Single threaded Cannon. Re-enable for the MT FPVM") ++ ++ state := loadELFProgram(t, "../example/bin/alloc.elf") ++ const numAllocs = 100 // where each alloc is a 32 MiB chunk ++ oracle := allocOracle(t, numAllocs) ++ ++ // completes in ~870 M steps ++ us := NewInstrumentedState(state, oracle, os.Stdout, os.Stderr) ++ for i := 0; i < 20_000_000_000; i++ { ++ if us.state.Exited { ++ break ++ } ++ _, err := us.Step(false) ++ require.NoError(t, err) ++ if state.Step%10_000_000 == 0 { ++ t.Logf("Completed %d steps", state.Step) ++ } ++ } ++ t.Logf("Completed in %d steps", state.Step) ++ require.True(t, state.Exited, "must complete program") ++ require.Equal(t, uint8(0), state.ExitCode, "exit with 0") ++ require.Less(t, state.Memory.PageCount()*PageSize, 1*1024*1024*1024, "must not allocate more than 1 GiB") ++} + -+ if (tokenState.dailySellLimit == 0) { -+ // if no limit set, assume uncapped -+ return false; -+ } ++func loadELFProgram(t *testing.T, name string) *State { ++ elfProgram, err := elf.Open(name) ++ require.NoError(t, err, "open ELF file") + -+ uint256 currentDay = block.timestamp / 1 days; -+ // Pattern borrowed from Reserve.sol -+ if (currentDay > lastLimitDay) { -+ lastLimitDay = currentDay; -+ tokenState.currentDaySellLimit = tokenState.dailySellLimit; -+ } ++ state, err := LoadELF(elfProgram) ++ require.NoError(t, err, "load ELF into state") + -+ return amountToBurn >= tokenState.currentDaySellLimit; -+ } ++ err = PatchGo(elfProgram, state) ++ require.NoError(t, err, "apply Go runtime patches") ++ require.NoError(t, PatchStack(state), "add initial stack") ++ return state ++} + -+ /** -+ * @notice Updates the current day limit for a token. -+ * @param token The address of the token to query. -+ * @param amountBurned the amount of the token that was burned. -+ */ -+ function updateLimits(address token, uint256 amountBurned) private { -+ TokenState storage tokenState = tokenStates[token]; + func staticOracle(t *testing.T, preimageData []byte) *testOracle { + return &testOracle{ + hint: func(v []byte) {}, +@@ -289,6 +311,18 @@ }, + } + } +  ++func allocOracle(t *testing.T, numAllocs int) *testOracle { ++ return &testOracle{ ++ hint: func(v []byte) {}, ++ getPreimage: func(k [32]byte) []byte { ++ if k != preimage.LocalIndexKey(0).PreimageKey() { ++ t.Fatalf("invalid preimage request for %x", k) ++ } ++ return binary.LittleEndian.AppendUint64(nil, uint64(numAllocs)) ++ }, ++ } ++} + -+ if (tokenState.dailySellLimit == 0) { -+ // if no limit set, assume uncapped -+ return; -+ } -+ tokenState.currentDaySellLimit = tokenState.currentDaySellLimit - amountBurned; -+ emit DailySellLimitUpdated(amountBurned); -+ } + func selectOracleFixture(t *testing.T, programName string) PreimageOracle { + if strings.HasPrefix(programName, "oracle_kzg") { + precompile := common.BytesToAddress([]byte{0xa}) +@@ -301,3 +335,26 @@ } else { + return nil + } + } + -+ /** -+ * @notice Allows owner to transfer tokens of this contract. It's meant for governance to -+ * trigger use cases not contemplated in this contract. -+ * @param token The address of the token to transfer. -+ * @param recipient The address of the recipient to transfer the tokens to. -+ * @param value The amount of tokens to transfer. -+ * @return A boolean indicating whether the transfer was successful or not. -+ */ -+ function transfer(address token, address recipient, uint256 value) external onlyOwner returns (bool) { -+ return IERC20(token).transfer(recipient, value); -+ } ++func TestStateJSONCodec(t *testing.T) { ++ elfProgram, err := elf.Open("../example/bin/hello.elf") ++ require.NoError(t, err, "open ELF file") ++ state, err := LoadELF(elfProgram) ++ require.NoError(t, err, "load ELF into state") ++ ++ stateJSON, err := state.MarshalJSON() ++ require.NoError(t, err) ++ ++ newState := new(State) ++ require.NoError(t, newState.UnmarshalJSON(stateJSON)) ++ ++ require.Equal(t, state.PreimageKey, newState.PreimageKey) ++ require.Equal(t, state.PreimageOffset, newState.PreimageOffset) ++ require.Equal(t, state.Cpu, newState.Cpu) ++ require.Equal(t, state.Heap, newState.Heap) ++ require.Equal(t, state.ExitCode, newState.ExitCode) ++ require.Equal(t, state.Exited, newState.Exited) ++ require.Equal(t, state.Memory.MerkleRoot(), newState.Memory.MerkleRoot()) ++ require.Equal(t, state.Registers, newState.Registers) ++ require.Equal(t, state.Step, newState.Step) +}
@@ -9526,9 +40507,166 @@
+ +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+16
+
-12
+ +
+ +
+
+
diff --git OP/docker-bake.hcl CELO/docker-bake.hcl +index 977aaf2ddbf95367b536285885aaa82e43afcf52..3fe9fabaf1727ae04b7697b69c925b236e068ebe 100644 +--- OP/docker-bake.hcl ++++ CELO/docker-bake.hcl +@@ -61,6 +61,10 @@ variable "OP_PROGRAM_VERSION" { + default = "${GIT_VERSION}" + } +  ++variable "OP_SUPERVISOR_VERSION" { ++ default = "${GIT_VERSION}" ++} ++ + variable "CANNON_VERSION" { + default = "${GIT_VERSION}" + } +@@ -186,30 +190,30 @@ platforms = split(",", PLATFORMS) + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-program:${tag}"] + } +  +-target "cannon" { ++target "op-supervisor" { + dockerfile = "ops/docker/op-stack-go/Dockerfile" + context = "." + args = { + GIT_COMMIT = "${GIT_COMMIT}" + GIT_DATE = "${GIT_DATE}" +- CANNON_VERSION = "${CANNON_VERSION}" ++ OP_SUPERVISOR_VERSION = "${OP_SUPERVISOR_VERSION}" + } +- target = "cannon-target" ++ target = "op-supervisor-target" + platforms = split(",", PLATFORMS) +- tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon:${tag}"] ++ tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-supervisor:${tag}"] + } +  +-target "proxyd" { +- dockerfile = "./proxyd/Dockerfile" +- context = "./" ++target "cannon" { ++ dockerfile = "ops/docker/op-stack-go/Dockerfile" ++ context = "." + args = { +- // proxyd dockerfile has no _ in the args +- GITCOMMIT = "${GIT_COMMIT}" +- GITDATE = "${GIT_DATE}" +- GITVERSION = "${GIT_VERSION}" ++ GIT_COMMIT = "${GIT_COMMIT}" ++ GIT_DATE = "${GIT_DATE}" ++ CANNON_VERSION = "${CANNON_VERSION}" + } ++ target = "cannon-target" + platforms = split(",", PLATFORMS) +- tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/proxyd:${tag}"] ++ tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon:${tag}"] + } +  + target "chain-mon" {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+4
+
-4
+ +
+ +
+
+
diff --git OP/docs/fault-proof-alpha/README.md CELO/docs/fault-proof-alpha/README.md +index f46e51ba7dd2650af174e433d578111b6475602b..0e647cf60d9dfe12985e551d13503c27fdf03437 100644 +--- OP/docs/fault-proof-alpha/README.md ++++ CELO/docs/fault-proof-alpha/README.md +@@ -16,10 +16,10 @@ + ### Contents +  + * Specifications +- * [Generic Fault Proof System](../../specs/fault-proof.md) +- * [Generic Dispute Game Interface](../../specs/dispute-game-interface.md) +- * [Fault Dispute Game](../../specs/fault-dispute-game.md) +- * [Cannon VM](../../specs/cannon-fault-proof-vm.md) ++ * [Generic Fault Proof System](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/index.md) ++ * [Generic Dispute Game Interface](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/stage-one/dispute-game-interface.md) ++ * [Fault Dispute Game](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/stage-one/fault-dispute-game.md) ++ * [Cannon VM](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/cannon-fault-proof-vm.md) + * [Deployment Details](./deployments.md) + * [Manual Usage](./manual.md) + * [Creating Traces with Cannon](./cannon.md)
+
+ + +
+ @@ -9537,13 +40675,13 @@
- (new) + OP
@@ -9553,120 +40691,411 @@
-
+92
-
-0
+
+3
+
-3
+ +
+ +
+
+
diff --git OP/docs/fault-proof-alpha/immunefi.md CELO/docs/fault-proof-alpha/immunefi.md +index f3ad942950b02f30486c1cfc0c4b184513fd8e0d..a8becf158f5f8bc560f980dc1a60890a5bdbadc9 100644 +--- OP/docs/fault-proof-alpha/immunefi.md ++++ CELO/docs/fault-proof-alpha/immunefi.md +@@ -88,13 +88,13 @@ See our bounty program on [Immunefi][immunefi] for information regarding reward sizes. +  + <!-- LINKS --> + [cannon]: https://github.com/ethereum-optimism/optimism/tree/develop/cannon +-[cannon-vm-specs]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/cannon-fault-proof-vm.md ++[cannon-vm-specs]: https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/cannon-fault-proof-vm.md + [dispute-game]: https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock/src/dispute +-[fault-dispute-specs]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/fault-dispute-game.md ++[fault-dispute-specs]: https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/stage-one/fault-dispute-game.md + [cannon-contracts]: https://github.com/ethereum-optimism/optimism/tree/develop/packages/contracts-bedrock/src/cannon + [op-program]: https://github.com/ethereum-optimism/optimism/tree/develop/op-program + [op-challenger]: https://github.com/ethereum-optimism/optimism/tree/develop/op-challenger + [alphabet-vm]: https://github.com/ethereum-optimism/optimism/blob/c1cbacef0097c28f999e3655200e6bd0d4dba9f2/packages/contracts-bedrock/test/FaultDisputeGame.t.sol#L977-L1005 +-[fault-proof-specs]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/fault-proof.md ++[fault-proof-specs]: https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/index.md + [immunefi]: https://immunefi.com/bounty/optimism/ + [invalid-proposal-doc]: https://github.com/ethereum-optimism/optimism/blob/develop/docs/fault-proof-alpha/invalid-proposals.md
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-3
+ +
+ +
+
+
diff --git OP/docs/fault-proof-alpha/manual.md CELO/docs/fault-proof-alpha/manual.md +index 36580defa0d69433f436c8a8b0ccbf8100740671..0963cdf44f94be51d297f503597a926af6961732 100644 +--- OP/docs/fault-proof-alpha/manual.md ++++ CELO/docs/fault-proof-alpha/manual.md +@@ -30,7 +30,7 @@ arbitrary hash can be used for claim values. For more advanced cases [cannon can be used](./cannon.md) to generate a + trace, including the claim values to use at specific steps. Note that it is not valid to create a game that disputes an + output root, using the final hash from a trace that confirms the output root is valid. To dispute an output root + successfully, the trace must resolve that the disputed output root is invalid. This is indicated by the first byte of +-the claim value being set to the invalid [VM status](../../specs/cannon-fault-proof-vm.md#state-hash) (`0x01`). ++the claim value being set to the invalid [VM status](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/cannon-fault-proof-vm.md#state-hash) (`0x01`). +  + The game can then be created by calling the `create` method on the `DisputeGameFactory` contract. This requires three + parameters: +@@ -94,9 +94,9 @@ #### Populating the Pre-image Oracle +  + When the instruction to be executed as part of a `step` call reads from some pre-image data, that data must be loaded + into the pre-image oracle prior to calling `step`. +-For [local pre-image keys](../../specs/fault-proof.md#type-1-local-key), the pre-image must be populated via ++For [local pre-image keys](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/index.md#type-1-local-key), the pre-image must be populated via + the `FaultDisputeGame` contract by calling the `addLocalData` function. +-For [global keccak256 keys](../../specs/fault-proof.md#type-2-global-keccak256-key), the data should be added directly ++For [global keccak256 keys](https://github.com/ethereum-optimism/specs/blob/main/specs/fault-proof/index.md#type-2-global-keccak256-key), the data should be added directly + to the pre-image oracle contract. +  + ### Resolving a Game
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/docs/postmortems/2022-02-02-inflation-vuln.md CELO/docs/postmortems/2022-02-02-inflation-vuln.md +index 11574eed70c2c75b8fa06e13745ffb380628dea7..a755b0fdfe38f07bdf15eda8e9d5580a2bf1d588 100644 +--- OP/docs/postmortems/2022-02-02-inflation-vuln.md ++++ CELO/docs/postmortems/2022-02-02-inflation-vuln.md +@@ -5,7 +5,7 @@ It also details our response, lessons learned, and subsequent changes to our processes. +  + ## Incident Summary +  +-A vulnerability in Optimism’s fork of [Geth](https://github.com/ethereum/go-ethereum) (which we refer to as [L2Geth](../../l2geth/README.md)) was reported ++A vulnerability in Optimism’s fork of [Geth](https://github.com/ethereum/go-ethereum) (which we refer to as [L2Geth](https://github.com/ethereum-optimism/optimism-legacy/blob/8205f678b7b4ac4625c2afe351b9c82ffaa2e795/l2geth/README.md)) was reported + to us by [Jay Freeman](https://twitter.com/saurik) (AKA saurik) on February 2nd, 2022. If exploited, + this vulnerability would allow anyone to mint an unbounded amount of ETH on Optimism. +  +@@ -133,7 +133,7 @@ the PR (36,311 lines added, 47,430 lines removed), which consumed the attention of our entire + engineering team with a sense of urgency for several months. +  + An additional factor contributing to this bug was the significant complexity of the +-[L2Geth](https://github.com/ethereum-optimism/optimism/tree/master/l2geth) codebase, which is a fork ++[L2Geth](https://github.com/ethereum-optimism/optimism-legacy/blob/8205f678b7b4ac4625c2afe351b9c82ffaa2e795/l2geth) codebase, which is a fork + of [Geth](https://github.com/ethereum/go-ethereum). Geth itself is already a very complex codebase. + The changes introduced to L2Geth in order to support the OVM made it much more complex, such that + very few people properly understood how it worked.
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO
+
+
+ +
+ + (binary file)
-
diff --git OP/packages/contracts-bedrock/src/celo/FeeHandlerSeller.sol CELO/packages/contracts-bedrock/src/celo/FeeHandlerSeller.sol +
diff --git OP/docs/security-reviews/2024_05-FaultProofs-Sherlock.pdf CELO/docs/security-reviews/2024_05-FaultProofs-Sherlock.pdf new file mode 100644 -index 0000000000000000000000000000000000000000..4d22125af4d647021d77e0ed4b59d09049dd6bac ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/FeeHandlerSeller.sol -@@ -0,0 +1,92 @@ -+// SPDX-License-Identifier: LGPL-3.0-only -+pragma solidity ^0.8.15; -+ -+import "./common/FixidityLib.sol"; -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; -+import "./UsingRegistry.sol"; -+import "./common/Initializable.sol"; -+ -+// Abstract class for a FeeHandlerSeller, as defined in CIP-52 -+// https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md -+abstract contract FeeHandlerSeller is Ownable, Initializable, UsingRegistry { -+ using FixidityLib for FixidityLib.Fraction; -+ -+ // Address of the token -+ // Minimal number of reports in SortedOracles contract -+ mapping(address => uint256) public minimumReports; -+ -+ event MinimumReportsSet(address tokenAddress, uint256 minimumReports); -+ event TokenSold(address soldTokenAddress, address boughtTokenAddress, uint256 amount); -+ -+ constructor(bool testingDeployment) Initializable(testingDeployment) { } -+ -+ function initialize( -+ address _registryAddress, -+ address[] calldata tokenAddresses, -+ uint256[] calldata newMininumReports -+ ) -+ external -+ initializer -+ { -+ _transferOwnership(msg.sender); -+ setRegistry(_registryAddress); -+ -+ for (uint256 i = 0; i < tokenAddresses.length; i++) { -+ _setMinimumReports(tokenAddresses[i], newMininumReports[i]); -+ } -+ } -+ -+ /** -+ * @notice Allows owner to set the minimum number of reports required. -+ * @param newMininumReports The new update minimum number of reports required. -+ */ -+ function setMinimumReports(address tokenAddress, uint256 newMininumReports) public onlyOwner { -+ _setMinimumReports(tokenAddress, newMininumReports); -+ } -+ -+ function _setMinimumReports(address tokenAddress, uint256 newMininumReports) internal { -+ minimumReports[tokenAddress] = newMininumReports; -+ emit MinimumReportsSet(tokenAddress, newMininumReports); -+ } -+ -+ /** -+ * @dev Calculates the minimum amount of tokens that should be received for the specified -+ * amount with the given mid-price and maximum slippage. -+ * @param midPriceNumerator The numerator of the mid-price for the token pair. -+ * @param midPriceDenominator The denominator of the mid-price for the token pair. -+ * @param amount The amount of tokens to be exchanged. -+ * @param maxSlippage The maximum slippage percentage as a fraction of the mid-price. -+ * @return The minimum amount of tokens that should be received as a uint256 value. -+ */ -+ function calculateMinAmount( -+ uint256 midPriceNumerator, -+ uint256 midPriceDenominator, -+ uint256 amount, -+ uint256 maxSlippage // as fraction -+ ) -+ public -+ pure -+ returns (uint256) -+ { -+ FixidityLib.Fraction memory maxSlippageFraction = FixidityLib.wrap(maxSlippage); -+ -+ FixidityLib.Fraction memory price = FixidityLib.newFixedFraction(midPriceNumerator, midPriceDenominator); -+ FixidityLib.Fraction memory amountFraction = FixidityLib.newFixed(amount); -+ FixidityLib.Fraction memory totalAmount = price.multiply(amountFraction); -+ -+ return totalAmount.subtract(price.multiply(maxSlippageFraction).multiply(amountFraction)).fromFixed(); -+ } -+ -+ /** -+ * @notice Allows owner to transfer tokens of this contract. It's meant for governance to -+ * trigger use cases not contemplated in this contract. -+ * @param token The address of the token to transfer. -+ * @param amount The amount of tokens to transfer. -+ * @param to The address of the recipient to transfer the tokens to. -+ * @return A boolean indicating whether the transfer was successful or not. -+ */ -+ function transfer(address token, uint256 amount, address to) external onlyOwner returns (bool) { -+ return IERC20(token).transfer(to, amount); -+ } -+}
+index 0000000000000000000000000000000000000000..ba6da27b4b26446123f40e618a0aba5f83cf7446 +Binary files /dev/null and CELO/docs/security-reviews/2024_05-FaultProofs-Sherlock.pdf differ
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-batcher/batcher/channel_builder.go CELO/op-batcher/batcher/channel_builder.go +index 1a4ed28bb3f380ed0283cfc7bdce22622c0187e4..9e496b290d9254ba5147a8e0650189790c142b9b 100644 +--- OP/op-batcher/batcher/channel_builder.go ++++ CELO/op-batcher/batcher/channel_builder.go +@@ -82,7 +82,7 @@ // total amount of output data of all frames created yet + outputBytes int + } +  +-// newChannelBuilder creates a new channel builder or returns an error if the ++// NewChannelBuilder creates a new channel builder or returns an error if the + // channel out could not be created. + // it acts as a factory for either a span or singular channel out + func NewChannelBuilder(cfg ChannelConfig, rollupCfg rollup.Config, latestL1OriginBlockNum uint64) (*ChannelBuilder, error) {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+13
+
-12
+ +
+ +
+
+
diff --git OP/op-batcher/batcher/driver.go CELO/op-batcher/batcher/driver.go +index 8d8e73bee2b4ae673736c4085ba25677a4a2e2a5..b01bb51484c7bcd64cd0963700977ca4320b5747 100644 +--- OP/op-batcher/batcher/driver.go ++++ CELO/op-batcher/batcher/driver.go +@@ -167,7 +167,7 @@ l.Log.Warn("Found L2 reorg", "block_number", i) + l.lastStoredBlock = eth.BlockID{} + return err + } else if err != nil { +- l.Log.Warn("failed to load block into state", "err", err) ++ l.Log.Warn("Failed to load block into state", "err", err) + return err + } + l.lastStoredBlock = eth.ToBlockID(block) +@@ -203,7 +203,7 @@ if err := l.state.AddL2Block(block); err != nil { + return nil, fmt.Errorf("adding L2 block to state: %w", err) + } +  +- l.Log.Info("added L2 block to local state", "block", eth.ToBlockID(block), "tx_count", len(block.Transactions()), "time", block.Time()) ++ l.Log.Info("Added L2 block to local state", "block", eth.ToBlockID(block), "tx_count", len(block.Transactions()), "time", block.Time()) + return block, nil + } +  +@@ -233,7 +233,7 @@ if l.lastStoredBlock == (eth.BlockID{}) { + l.Log.Info("Starting batch-submitter work at safe-head", "safe", syncStatus.SafeL2) + l.lastStoredBlock = syncStatus.SafeL2.ID() + } else if l.lastStoredBlock.Number < syncStatus.SafeL2.Number { +- l.Log.Warn("last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", l.lastStoredBlock, "safe", syncStatus.SafeL2) ++ l.Log.Warn("Last submitted block lagged behind L2 safe head: batch submission will continue from the safe head now", "last", l.lastStoredBlock, "safe", syncStatus.SafeL2) + l.lastStoredBlock = syncStatus.SafeL2.ID() + } +  +@@ -276,10 +276,10 @@ go func() { + for { + select { + case r := <-receiptsCh: +- l.Log.Info("handling receipt", "id", r.ID) ++ l.Log.Info("Handling receipt", "id", r.ID) + l.handleReceipt(r) + case <-receiptLoopDone: +- l.Log.Info("receipt processing loop done") ++ l.Log.Info("Receipt processing loop done") + return + } + } +@@ -382,7 +382,7 @@ } + err := l.publishTxToL1(l.killCtx, queue, receiptsCh) + if err != nil { + if err != io.EOF { +- l.Log.Error("error publishing tx to l1", "err", err) ++ l.Log.Error("Error publishing tx to l1", "err", err) + } + return + } +@@ -442,10 +442,10 @@ // Collect next transaction data + txdata, err := l.state.TxData(l1tip.ID()) +  + if err == io.EOF { +- l.Log.Trace("no transaction data available") ++ l.Log.Trace("No transaction data available") + return err + } else if err != nil { +- l.Log.Error("unable to get tx data", "err", err) ++ l.Log.Error("Unable to get tx data", "err", err) + return err + } +  +@@ -497,7 +497,7 @@ } + } else { + // sanity check + if nf := len(txdata.frames); nf != 1 { +- l.Log.Crit("unexpected number of frames in calldata tx", "num_frames", nf) ++ l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) + } + data := txdata.CallData() + // if plasma DA is enabled we post the txdata to the DA Provider and replace it with the commitment. +@@ -509,13 +509,14 @@ // requeue frame if we fail to post to the DA Provider so it can be retried + l.recordFailedTx(txdata.ID(), err) + return nil + } ++ l.Log.Info("Set plasma input", "commitment", comm, "tx", txdata.ID()) + // signal plasma commitment tx with TxDataVersion1 + data = comm.TxData() + } + candidate = l.calldataTxCandidate(data) + } +  +- intrinsicGas, err := core.IntrinsicGas(candidate.TxData, nil, false, true, true, false) ++ intrinsicGas, err := core.IntrinsicGas(candidate.TxData, nil, false, true, true, false, nil) + if err != nil { + // we log instead of return an error here because txmgr can do its own gas estimation + l.Log.Error("Failed to calculate intrinsic gas", "err", err) +@@ -534,7 +535,7 @@ return nil, fmt.Errorf("generating blobs for tx data: %w", err) + } + size := data.Len() + lastSize := len(data.frames[len(data.frames)-1].data) +- l.Log.Info("building Blob transaction candidate", ++ l.Log.Info("Building Blob transaction candidate", + "size", size, "last_size", lastSize, "num_blobs", len(blobs)) + l.Metr.RecordBlobUsedBytes(lastSize) + return &txmgr.TxCandidate{ +@@ -544,7 +545,7 @@ }, nil + } +  + func (l *BatchSubmitter) calldataTxCandidate(data []byte) *txmgr.TxCandidate { +- l.Log.Info("building Calldata transaction candidate", "size", len(data)) ++ l.Log.Info("Building Calldata transaction candidate", "size", len(data)) + return &txmgr.TxCandidate{ + To: &l.RollupConfig.BatchInboxAddress, + TxData: data,
@@ -9675,13 +41104,13 @@
- (new) + OP
@@ -9691,290 +41120,407 @@
-
+272
-
-0
+
+4
+
-4
-
diff --git OP/packages/contracts-bedrock/src/celo/GoldToken.sol CELO/packages/contracts-bedrock/src/celo/GoldToken.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..e7236678670a7bedf86f7769ef74888dc5f2488c ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/GoldToken.sol -@@ -0,0 +1,272 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; -+ -+import "./UsingRegistry.sol"; -+import "./CalledByVm.sol"; -+import "./Initializable.sol"; -+import "./interfaces/ICeloToken.sol"; -+import "./common/interfaces/ICeloVersionedContract.sol"; -+ -+contract GoldToken is Initializable, CalledByVm, UsingRegistry, IERC20, ICeloToken, ICeloVersionedContract { -+ // Address of the TRANSFER precompiled contract. -+ // solhint-disable state-visibility -+ address constant TRANSFER = address(0xff - 2); -+ string constant NAME = "Celo native asset"; -+ string constant SYMBOL = "CELO"; -+ uint8 constant DECIMALS = 18; -+ uint256 internal totalSupply_; -+ // solhint-enable state-visibility -+ -+ mapping(address => mapping(address => uint256)) internal allowed; -+ -+ // Burn address is 0xdEaD because truffle is having buggy behaviour with the zero address -+ address constant BURN_ADDRESS = address(0x000000000000000000000000000000000000dEaD); -+ -+ event TransferComment(string comment); -+ -+ /** -+ * @notice Sets initialized == true on implementation contracts -+ * @param test Set to true to skip implementation initialization -+ */ -+ constructor(bool test) Initializable(test) { } -+ -+ /** -+ * @notice Returns the storage, major, minor, and patch version of the contract. -+ * @return Storage version of the contract. -+ * @return Major version of the contract. -+ * @return Minor version of the contract. -+ * @return Patch version of the contract. -+ */ -+ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { -+ return (1, 1, 2, 0); -+ } -+ -+ /** -+ * @notice Used in place of the constructor to allow the contract to be upgradable via proxy. -+ * @param registryAddress Address of the Registry contract. -+ */ -+ function initialize(address registryAddress) external initializer { -+ totalSupply_ = 0; -+ _transferOwnership(msg.sender); -+ setRegistry(registryAddress); -+ } -+ -+ /** -+ * @notice Transfers CELO from one address to another. -+ * @param to The address to transfer CELO to. -+ * @param value The amount of CELO to transfer. -+ * @return True if the transaction succeeds. -+ */ -+ // solhint-disable-next-line no-simple-event-func-name -+ function transfer(address to, uint256 value) external returns (bool) { -+ return _transferWithCheck(to, value); -+ } -+ -+ /** -+ * @notice Transfers CELO from one address to another with a comment. -+ * @param to The address to transfer CELO to. -+ * @param value The amount of CELO to transfer. -+ * @param comment The transfer comment -+ * @return True if the transaction succeeds. -+ */ -+ function transferWithComment(address to, uint256 value, string calldata comment) external returns (bool) { -+ bool succeeded = _transferWithCheck(to, value); -+ emit TransferComment(comment); -+ return succeeded; -+ } -+ -+ /** -+ * @notice This function allows a user to burn a specific amount of tokens. -+ * Burning is implemented by sending tokens to the burn address. -+ * @param value: The amount of CELO to burn. -+ * @return True if burn was successful. -+ */ -+ function burn(uint256 value) external returns (bool) { -+ // not using transferWithCheck as the burn address can potentially be the zero address -+ return _transfer(BURN_ADDRESS, value); -+ } -+ -+ /** -+ * @notice Approve a user to transfer CELO on behalf of another user. -+ * @param spender The address which is being approved to spend CELO. -+ * @param value The amount of CELO approved to the spender. -+ * @return True if the transaction succeeds. -+ */ -+ function approve(address spender, uint256 value) external returns (bool) { -+ require(spender != address(0), "cannot set allowance for 0"); -+ allowed[msg.sender][spender] = value; -+ emit Approval(msg.sender, spender, value); -+ return true; -+ } -+ -+ /** -+ * @notice Increases the allowance of another user. -+ * @param spender The address which is being approved to spend CELO. -+ * @param value The increment of the amount of CELO approved to the spender. -+ * @return True if the transaction succeeds. -+ */ -+ function increaseAllowance(address spender, uint256 value) external returns (bool) { -+ require(spender != address(0), "cannot set allowance for 0"); -+ uint256 oldValue = allowed[msg.sender][spender]; -+ uint256 newValue = oldValue + value; -+ allowed[msg.sender][spender] = newValue; -+ emit Approval(msg.sender, spender, newValue); -+ return true; -+ } -+ -+ /** -+ * @notice Decreases the allowance of another user. -+ * @param spender The address which is being approved to spend CELO. -+ * @param value The decrement of the amount of CELO approved to the spender. -+ * @return True if the transaction succeeds. -+ */ -+ function decreaseAllowance(address spender, uint256 value) external returns (bool) { -+ uint256 oldValue = allowed[msg.sender][spender]; -+ uint256 newValue = oldValue - value; -+ allowed[msg.sender][spender] = newValue; -+ emit Approval(msg.sender, spender, newValue); -+ return true; -+ } +
diff --git OP/op-batcher/batcher/service.go CELO/op-batcher/batcher/service.go +index 8ef40b346a0be3de0d5bbfcebd87038db319c032..1c35c1c4ea25f5dec961a8352f693ed619615084 100644 +--- OP/op-batcher/batcher/service.go ++++ CELO/op-batcher/batcher/service.go +@@ -248,7 +248,7 @@ "max_channel_duration", cc.MaxChannelDuration, + "channel_timeout", cc.ChannelTimeout, + "sub_safety_margin", cc.SubSafetyMargin) + if bs.UsePlasma { +- bs.Log.Warn("Plasma Mode is a Beta feature of the MIT licensed OP Stack. While it has received initial review from core contributors, it is still undergoing testing, and may have bugs or other issues.") ++ bs.Log.Warn("Alt-DA Mode is a Beta feature of the MIT licensed OP Stack. While it has received initial review from core contributors, it is still undergoing testing, and may have bugs or other issues.") + } + bs.ChannelConfig = cc + return nil +@@ -282,19 +282,19 @@ } +  + func (bs *BatcherService) initMetricsServer(cfg *CLIConfig) error { + if !cfg.MetricsConfig.Enabled { +- bs.Log.Info("metrics disabled") ++ bs.Log.Info("Metrics disabled") + return nil + } + m, ok := bs.Metrics.(opmetrics.RegistryMetricer) + if !ok { + return fmt.Errorf("metrics were enabled, but metricer %T does not expose registry for metrics-server", bs.Metrics) + } +- bs.Log.Debug("starting metrics server", "addr", cfg.MetricsConfig.ListenAddr, "port", cfg.MetricsConfig.ListenPort) ++ bs.Log.Debug("Starting metrics server", "addr", cfg.MetricsConfig.ListenAddr, "port", cfg.MetricsConfig.ListenPort) + metricsSrv, err := opmetrics.StartServer(m.Registry(), cfg.MetricsConfig.ListenAddr, cfg.MetricsConfig.ListenPort) + if err != nil { + return fmt.Errorf("failed to start metrics server: %w", err) + } +- bs.Log.Info("started metrics server", "addr", metricsSrv.Addr()) ++ bs.Log.Info("Started metrics server", "addr", metricsSrv.Addr()) + bs.metricsSrv = metricsSrv + return nil + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-1
+ +
+ +
+
+
diff --git OP/op-batcher/batcher/tx_data.go CELO/op-batcher/batcher/tx_data.go +index 73e1adbbe179f7732241d23cfcaf953aa3379078..5937acf6f7974a920314c8678d956c5240dbc13b 100644 +--- OP/op-batcher/batcher/tx_data.go ++++ CELO/op-batcher/batcher/tx_data.go +@@ -62,7 +62,7 @@ } + return l + } +  +-// Frame returns the single frame of this tx data. ++// Frames returns the single frame of this tx data. + func (td *txData) Frames() []frameData { + return td.frames + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+15
+
-3
+ +
+ +
+
+
diff --git OP/op-batcher/flags/flags_test.go CELO/op-batcher/flags/flags_test.go +index af303724e25d4b2a1b36bfe40f0d40ee021316a2..486342375b1f89f0ffebd123ef215a7ae0f89a9f 100644 +--- OP/op-batcher/flags/flags_test.go ++++ CELO/op-batcher/flags/flags_test.go +@@ -5,6 +5,7 @@ "slices" + "strings" + "testing" +  ++ plasma "github.com/ethereum-optimism/optimism/op-plasma" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/txmgr" +  +@@ -57,6 +58,14 @@ } + } +  + func TestHasEnvVar(t *testing.T) { ++ // known exceptions to the number of env vars ++ expEnvVars := map[string]int{ ++ plasma.EnabledFlagName: 2, ++ plasma.DaServerAddressFlagName: 2, ++ plasma.VerifyOnReadFlagName: 2, ++ plasma.DaServiceFlag: 2, ++ } + -+ /** -+ * @notice Transfers CELO from one address to another on behalf of a user. -+ * @param from The address to transfer CELO from. -+ * @param to The address to transfer CELO to. -+ * @param value The amount of CELO to transfer. -+ * @return True if the transaction succeeds. -+ */ -+ function transferFrom(address from, address to, uint256 value) external returns (bool) { -+ require(to != address(0), "transfer attempted to reserved address 0x0"); -+ require(value <= balanceOf(from), "transfer value exceeded balance of sender"); -+ require(value <= allowed[from][msg.sender], "transfer value exceeded sender's allowance for spender"); + for _, flag := range Flags { + flag := flag + flagName := flag.Names()[0] +@@ -65,9 +74,13 @@ t.Run(flagName, func(t *testing.T) { + envFlagGetter, ok := flag.(interface { + GetEnvVars() []string + }) +- envFlags := envFlagGetter.GetEnvVars() + require.True(t, ok, "must be able to cast the flag to an EnvVar interface") +- require.Equal(t, 1, len(envFlags), "flags should have exactly one env var") ++ envFlags := envFlagGetter.GetEnvVars() ++ if numEnvVars, ok := expEnvVars[flagName]; ok { ++ require.Equalf(t, numEnvVars, len(envFlags), "flags should have %d env vars", numEnvVars) ++ } else { ++ require.Equal(t, 1, len(envFlags), "flags should have exactly one env var") ++ } + }) + } + } +@@ -92,7 +105,6 @@ GetEnvVars() []string + }) + envFlags := envFlagGetter.GetEnvVars() + require.True(t, ok, "must be able to cast the flag to an EnvVar interface") +- require.Equal(t, 1, len(envFlags), "flags should have exactly one env var") + expectedEnvVar := opservice.FlagNameToEnvVarName(flagName, "OP_BATCHER") + require.Equal(t, expectedEnvVar, envFlags[0]) + })
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+89
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json CELO/op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json +new file mode 100644 +index 0000000000000000000000000000000000000000..6b9dbe97e0682fb1129d33992733460bafb86c21 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/testdata/deploy-config-holesky-alfajores.json +@@ -0,0 +1,89 @@ ++{ ++ "l1StartingBlockTag": "0xbbed3612407993e67f8ca7a423b181837ae164a531941e78f5ee48e766d39cad", + -+ bool success; -+ (success,) = TRANSFER.call{ value: 0, gas: gasleft() }(abi.encode(from, to, value)); -+ require(success, "CELO transfer failed"); ++ "l1ChainID": 17000, ++ "l2ChainID": 44787, ++ "l2BlockTime": 2, ++ "l1BlockTime": 12, + -+ allowed[from][msg.sender] = allowed[from][msg.sender] - value; -+ emit Transfer(from, to, value); -+ return true; -+ } ++ "maxSequencerDrift": 600, ++ "sequencerWindowSize": 3600, ++ "channelTimeout": 300, + -+ /** -+ * @notice Mints new CELO and gives it to 'to'. -+ * @param to The account for which to mint tokens. -+ * @param value The amount of CELO to mint. -+ */ -+ function mint(address to, uint256 value) external onlyVm returns (bool) { -+ if (value == 0) { -+ return true; -+ } ++ "p2pSequencerAddress": "0x644C82d76A43Fe9c76eda0EEd0f0DC17235c3005", ++ "batchInboxAddress": "0xff00000000000000000000000000000000044787", ++ "batchSenderAddress": "0x1660B1F70De0f32490b50f976e8983213dCF7FD5", + -+ require(to != address(0), "mint attempted to reserved address 0x0"); -+ totalSupply_ = totalSupply_ + value; ++ "l2OutputOracleSubmissionInterval": 120, ++ "l2OutputOracleStartingBlockNumber": 0, ++ "l2OutputOracleStartingTimestamp": 1718312256, + -+ bool success; -+ (success,) = TRANSFER.call{ value: 0, gas: gasleft() }(abi.encode(address(0), to, value)); -+ require(success, "CELO transfer failed"); ++ "l2OutputOracleProposer": "0x1BA11Ec6581FC8C3e35D6E345aEC977796Ffe89b", ++ "l2OutputOracleChallenger": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + -+ emit Transfer(address(0), to, value); -+ return true; -+ } ++ "finalizationPeriodSeconds": 12, + -+ /** -+ * @return The name of the CELO token. -+ */ -+ function name() external pure returns (string memory) { -+ return NAME; -+ } ++ "proxyAdminOwner": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", ++ "baseFeeVaultRecipient": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", ++ "l1FeeVaultRecipient": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", ++ "sequencerFeeVaultRecipient": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", ++ "finalSystemOwner": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", ++ "superchainConfigGuardian": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + -+ /** -+ * @return The symbol of the CELO token. -+ */ -+ function symbol() external pure returns (string memory) { -+ return SYMBOL; -+ } ++ "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", ++ "baseFeeVaultWithdrawalNetwork": 0, ++ "l1FeeVaultWithdrawalNetwork": 0, ++ "sequencerFeeVaultWithdrawalNetwork": 0, + -+ /** -+ * @return The number of decimal places to which CELO is divisible. -+ */ -+ function decimals() external pure returns (uint8) { -+ return DECIMALS; -+ } ++ "gasPriceOracleOverhead": 0, ++ "gasPriceOracleScalar": 1000000, + -+ /** -+ * @return The total amount of CELO in existence, including what the burn address holds. -+ */ -+ function totalSupply() external view returns (uint256) { -+ return totalSupply_; -+ } ++ "enableGovernance": false, ++ "governanceTokenSymbol": "OP", ++ "governanceTokenName": "Optimism", ++ "governanceTokenOwner": "0xc07C5A1fBF6c7BC6b4f321E7dd031c0E1E98d32d", + -+ /** -+ * @return The total amount of CELO in existence, not including what the burn address holds. -+ */ -+ function circulatingSupply() external view returns (uint256) { -+ return totalSupply_ - getBurnedAmount() - balanceOf(address(0)); -+ } ++ "l2GenesisBlockGasLimit": "0x1c9c380", ++ "l2GenesisBlockBaseFeePerGas": "0x3b9aca00", ++ "l2GenesisRegolithTimeOffset": "0x0", + -+ /** -+ * @notice Gets the amount of owner's CELO allowed to be spent by spender. -+ * @param owner The owner of the CELO. -+ * @param spender The spender of the CELO. -+ * @return The amount of CELO owner is allowing spender to spend. -+ */ -+ function allowance(address owner, address spender) external view returns (uint256) { -+ return allowed[owner][spender]; -+ } ++ "eip1559Denominator": 50, ++ "eip1559DenominatorCanyon": 250, ++ "eip1559Elasticity": 6, + -+ /** -+ * @notice Increases the variable for total amount of CELO in existence. -+ * @param amount The amount to increase counter by -+ */ -+ function increaseSupply(uint256 amount) external onlyVm { -+ totalSupply_ = totalSupply_ + amount; -+ } ++ "l2GenesisEcotoneTimeOffset": "0x0", ++ "l2GenesisDeltaTimeOffset": "0x0", ++ "l2GenesisCanyonTimeOffset": "0x0", + -+ /** -+ * @notice Gets the amount of CELO that has been burned. -+ * @return The total amount of Celo that has been sent to the burn address. -+ */ -+ function getBurnedAmount() public view returns (uint256) { -+ return balanceOf(BURN_ADDRESS); -+ } ++ "systemConfigStartBlock": 0, + -+ /** -+ * @notice Gets the balance of the specified address. -+ * @param owner The address to query the balance of. -+ * @return The balance of the specified address. -+ */ -+ function balanceOf(address owner) public view returns (uint256) { -+ return owner.balance; -+ } ++ "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + -+ /** -+ * @notice internal CELO transfer from one address to another. -+ * @param to The address to transfer CELO to. -+ * @param value The amount of CELO to transfer. -+ * @return True if the transaction succeeds. -+ */ -+ function _transfer(address to, uint256 value) internal returns (bool) { -+ require(value <= balanceOf(msg.sender), "transfer value exceeded balance of sender"); ++ "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", ++ "faultGameMaxDepth": 44, ++ "faultGameClockExtension": 0, ++ "faultGameMaxClockDuration": 600, ++ "faultGameGenesisBlock": 0, ++ "faultGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "faultGameSplitDepth": 14, ++ "faultGameWithdrawalDelay": 604800, + -+ bool success; -+ (success,) = TRANSFER.call{ value: 0, gas: gasleft() }(abi.encode(msg.sender, to, value)); -+ require(success, "CELO transfer failed"); -+ emit Transfer(msg.sender, to, value); -+ return true; -+ } ++ "preimageOracleMinProposalSize": 1800000, ++ "preimageOracleChallengePeriod": 86400, + -+ /** -+ * @notice Internal CELO transfer from one address to another. -+ * @param to The address to transfer CELO to. Zero address will revert. -+ * @param value The amount of CELO to transfer. -+ * @return True if the transaction succeeds. -+ */ -+ function _transferWithCheck(address to, uint256 value) internal returns (bool) { -+ require(to != address(0), "transfer attempted to reserved address 0x0"); -+ return _transfer(to, value); -+ } ++ "fundDevAccounts": false, ++ "useFaultProofs": false, ++ "proofMaturityDelaySeconds": 604800, ++ "disputeGameFinalityDelaySeconds": 302400, ++ "respectedGameType": 0, ++ ++ "usePlasma": false, ++ "daCommitmentType": "KeccakCommitment", ++ "daChallengeWindow": 160, ++ "daResolveWindow": 160, ++ "daBondSize": 1000000, ++ "daResolverRefundPercentage": 0 ++}
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+34
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json CELO/op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json +new file mode 100644 +index 0000000000000000000000000000000000000000..b37b79f4d4c8f1ee70c7aae7e7f351713226b8a3 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/testdata/deployment-l1-holesky.json +@@ -0,0 +1,34 @@ ++{ ++ "AddressManager": "0x2d256f3b82f673Ee377C393fBF2Cf3DcA5D1D901", ++ "AnchorStateRegistry": "0x036fDE501893043825356Ce49dfd554809F07597", ++ "AnchorStateRegistryProxy": "0xe5077701c64782954d27384da76D95ABf320460f", ++ "DelayedWETH": "0x408Ad04Dd953958B080226025E17d6Ba12987EB7", ++ "DelayedWETHProxy": "0x27f7Ade64F031A39553Be8104bF8B0b410735845", ++ "DisputeGameFactory": "0xd7771F9687804Bba1D360B08AD9e4d8CB4523738", ++ "DisputeGameFactoryProxy": "0x193FdDF22D31c227f1Af1286cf2B051d701FF86E", ++ "L1CrossDomainMessenger": "0x1e3513a619AA4f2550CDD95709B92C1FE0397184", ++ "L1CrossDomainMessengerProxy": "0x35841aC1f5FdC5b812562adB17F6A0B9A178F643", ++ "L1ERC721Bridge": "0x695b01393f0539ec64AC316d4998E4130309efB0", ++ "L1ERC721BridgeProxy": "0x2b9C1e5b9a0D01256388cc4A0F8F290E839F2d82", ++ "L1StandardBridge": "0x2d1A818544b657Bc5d1E8c8B80F953bd0CA1C9B2", ++ "L1StandardBridgeProxy": "0xD10A531CB9b80BD507501F34D87Ad4083E9b7F98", ++ "L2OutputOracle": "0x04CD14625ff0Da62d6E0820a816b4dD3eCd0FF27", ++ "L2OutputOracleProxy": "0x5636f9D582DB69EAf1Eb9f05B0738225C91fBC1E", ++ "Mips": "0x60E1b8b535626Fc9fFCdf6147B45879634645771", ++ "OptimismMintableERC20Factory": "0x3fcd69a03857aA6e79AE9408fc7c887EE70FC145", ++ "OptimismMintableERC20FactoryProxy": "0x23c80F2503b93a58EC620D20b6b9B6AB8cCa2a12", ++ "OptimismPortal": "0xdF803FAC1d84a31Ff5aee841f11659f9a3787CE5", ++ "OptimismPortal2": "0x60bc423dDf0B24fa5104EcacAC5000674Ac3EBfB", ++ "OptimismPortalProxy": "0xa292B051eA58e2558243f4A9f74262B1796c9648", ++ "PreimageOracle": "0xEC19353B7364Fb85b9b0A57EaEEC6aCeBbFb6a53", ++ "ProtocolVersions": "0x077d61D4fb3378025950Bb60AD69179B38921107", ++ "ProtocolVersionsProxy": "0x791D5101840A547F1EE91148d34E061412A57ECD", ++ "ProxyAdmin": "0x4ddC758DA1697Ad58D86D03150872c042390dCa2", ++ "SafeProxyFactory": "0xa6B71E26C5e0845f74c812102Ca7114b6a896AB2", ++ "SafeSingleton": "0xd9Db270c1B5E3Bd161E8c8503c55cEABeE709552", ++ "SuperchainConfig": "0xA4f7dB67A6e098613B107be3F8441475Ec30FCC2", ++ "SuperchainConfigProxy": "0xB21214DA32a85A0d43372310D62095cf91d67765", ++ "SystemConfig": "0xeFA98Ba3ada6c6AC4bB84074820685E1F01C835d", ++ "SystemConfigProxy": "0x733043Aa78d25F6759d9e6Ce2B2897bE6d630E08", ++ "SystemOwnerSafe": "0xD2a6B91aB77691D6F8688eAFA7a5f188bc5baA3a" +}
@@ -9982,9 +41528,985 @@
+ +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+36
+
-0
+ +
+ +
+
+
diff --git OP/op-chain-ops/cmd/celo-migrate/testdata/rollup-config.json CELO/op-chain-ops/cmd/celo-migrate/testdata/rollup-config.json +new file mode 100644 +index 0000000000000000000000000000000000000000..8dfd1f25e28d86be0d6188ab982c2439b077a2f0 +--- /dev/null ++++ CELO/op-chain-ops/cmd/celo-migrate/testdata/rollup-config.json +@@ -0,0 +1,36 @@ ++{ ++ "genesis": { ++ "l1": { ++ "hash": "0xbbed3612407993e67f8ca7a423b181837ae164a531941e78f5ee48e766d39cad", ++ "number": 1729797 ++ }, ++ "l2": { ++ "hash": "0x2664d0a1f45dc9a010e553e815a25f33c6d949cbb0d38e179c6209fc0486aa41", ++ "number": 23912613 ++ }, ++ "l2_time": 1718312256, ++ "system_config": { ++ "batcherAddr": "0x1660b1f70de0f32490b50f976e8983213dcf7fd5", ++ "overhead": "0x0000000000000000000000000000000000000000000000000000000000000000", ++ "scalar": "0x00000000000000000000000000000000000000000000000000000000000f4240", ++ "gasLimit": 30000000 ++ } ++ }, ++ "block_time": 2, ++ "max_sequencer_drift": 600, ++ "seq_window_size": 3600, ++ "channel_timeout": 300, ++ "l1_chain_id": 17000, ++ "l2_chain_id": 44787, ++ "regolith_time": 0, ++ "cel2_time": 0, ++ "canyon_time": 0, ++ "delta_time": 0, ++ "ecotone_time": 0, ++ "batch_inbox_address": "0xff00000000000000000000000000000000044787", ++ "deposit_contract_address": "0xa292b051ea58e2558243f4a9f74262b1796c9648", ++ "l1_system_config_address": "0x733043aa78d25f6759d9e6ce2b2897be6d630e08", ++ "protocol_versions_address": "0x0000000000000000000000000000000000000000", ++ "da_challenge_contract_address": "0x0000000000000000000000000000000000000000" ++} ++
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+10
+
-1
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/contracts/oracle.go CELO/op-challenger/game/fault/contracts/oracle.go +index be2520bef8b5b139b8151cb33a30922f0e38325d..bc0ff338615c94df26b2a37da86f23bd3b2ef8a2 100644 +--- OP/op-challenger/game/fault/contracts/oracle.go ++++ CELO/op-challenger/game/fault/contracts/oracle.go +@@ -129,8 +129,17 @@ } + } +  + func (c *PreimageOracleContract) InitLargePreimage(uuid *big.Int, partOffset uint32, claimedSize uint32) (txmgr.TxCandidate, error) { ++ bond, err := c.GetMinBondLPP(context.Background()) ++ if err != nil { ++ return txmgr.TxCandidate{}, fmt.Errorf("failed to get min bond for large preimage proposal: %w", err) ++ } + call := c.contract.Call(methodInitLPP, uuid, partOffset, claimedSize) +- return call.ToTxCandidate() ++ candidate, err := call.ToTxCandidate() ++ if err != nil { ++ return txmgr.TxCandidate{}, fmt.Errorf("failed to create initLPP tx candidate: %w", err) ++ } ++ candidate.Value = bond ++ return candidate, nil + } +  + func (c *PreimageOracleContract) AddLeaves(uuid *big.Int, startingBlockIndex *big.Int, input []byte, commitments []common.Hash, finalize bool) (txmgr.TxCandidate, error) {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+3
+
-0
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/contracts/oracle_test.go CELO/op-challenger/game/fault/contracts/oracle_test.go +index 480d738bd478b674bdd48449766613969b5daea7..7bb8158ef6a50b65efde7bb76f90ae069a4305fc 100644 +--- OP/op-challenger/game/fault/contracts/oracle_test.go ++++ CELO/op-challenger/game/fault/contracts/oracle_test.go +@@ -164,6 +164,8 @@ + uuid := big.NewInt(123) + partOffset := uint32(1) + claimedSize := uint32(2) ++ bond := big.NewInt(42984) ++ stubRpc.SetResponse(oracleAddr, methodMinBondSizeLPP, rpcblock.Latest, nil, []interface{}{bond}) + stubRpc.SetResponse(oracleAddr, methodInitLPP, rpcblock.Latest, []interface{}{ + uuid, + partOffset, +@@ -173,6 +175,7 @@ + tx, err := oracle.InitLargePreimage(uuid, partOffset, claimedSize) + require.NoError(t, err) + stubRpc.VerifyTxCandidate(tx) ++ require.Truef(t, bond.Cmp(tx.Value) == 0, "Expected bond %v got %v", bond, tx.Value) + } +  + func TestPreimageOracleContract_AddLeaves(t *testing.T) {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+0
+
-5
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/preimages/large.go CELO/op-challenger/game/fault/preimages/large.go +index da28fdcf16fdbd9d692389aa8c3c49dab9f09c21..fe311bdfb1504ba3b465ccc2c5708478f424922f 100644 +--- OP/op-challenger/game/fault/preimages/large.go ++++ CELO/op-challenger/game/fault/preimages/large.go +@@ -164,11 +164,6 @@ candidate, err := p.contract.InitLargePreimage(uuid, partOffset, claimedSize) + if err != nil { + return fmt.Errorf("failed to create pre-image oracle tx: %w", err) + } +- bond, err := p.contract.GetMinBondLPP(context.Background()) +- if err != nil { +- return fmt.Errorf("failed to get min bond for large preimage proposal: %w", err) +- } +- candidate.Value = bond + if err := p.txSender.SendAndWaitSimple("init large preimage", candidate); err != nil { + return fmt.Errorf("failed to populate pre-image oracle: %w", err) + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-127
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/asterisc/executor.go CELO/op-challenger/game/fault/trace/asterisc/executor.go +deleted file mode 100644 +index b84d5e444568a1b3449ec7831d4b1674890301ca..0000000000000000000000000000000000000000 +--- OP/op-challenger/game/fault/trace/asterisc/executor.go ++++ /dev/null +@@ -1,127 +0,0 @@ +-package asterisc +- +-import ( +- "context" +- "fmt" +- "math" +- "os" +- "path/filepath" +- "strconv" +- "strings" +- "time" +- +- "github.com/ethereum-optimism/optimism/op-challenger/config" +- "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" +- "github.com/ethereum/go-ethereum/log" +-) +- +-type Executor struct { +- logger log.Logger +- metrics AsteriscMetricer +- l1 string +- l1Beacon string +- l2 string +- inputs utils.LocalGameInputs +- asterisc string +- server string +- network string +- rollupConfig string +- l2Genesis string +- absolutePreState string +- snapshotFreq uint +- infoFreq uint +- selectSnapshot utils.SnapshotSelect +- cmdExecutor utils.CmdExecutor +-} +- +-func NewExecutor(logger log.Logger, m AsteriscMetricer, cfg *config.Config, prestate string, inputs utils.LocalGameInputs) *Executor { +- return &Executor{ +- logger: logger, +- metrics: m, +- l1: cfg.L1EthRpc, +- l1Beacon: cfg.L1Beacon, +- l2: cfg.L2Rpc, +- inputs: inputs, +- asterisc: cfg.AsteriscBin, +- server: cfg.AsteriscServer, +- network: cfg.AsteriscNetwork, +- rollupConfig: cfg.AsteriscRollupConfigPath, +- l2Genesis: cfg.AsteriscL2GenesisPath, +- absolutePreState: prestate, +- snapshotFreq: cfg.AsteriscSnapshotFreq, +- infoFreq: cfg.AsteriscInfoFreq, +- selectSnapshot: utils.FindStartingSnapshot, +- cmdExecutor: utils.RunCmd, +- } +-} +- +-// GenerateProof executes asterisc to generate a proof at the specified trace index. +-// The proof is stored at the specified directory. +-func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) error { +- return e.generateProof(ctx, dir, i, i) +-} +- +-// generateProof executes asterisc from the specified starting trace index until the end trace index. +-// The proof is stored at the specified directory. +-func (e *Executor) generateProof(ctx context.Context, dir string, begin uint64, end uint64, extraAsteriscArgs ...string) error { +- snapshotDir := filepath.Join(dir, utils.SnapsDir) +- start, err := e.selectSnapshot(e.logger, snapshotDir, e.absolutePreState, begin) +- if err != nil { +- return fmt.Errorf("find starting snapshot: %w", err) +- } +- proofDir := filepath.Join(dir, proofsDir) +- dataDir := utils.PreimageDir(dir) +- lastGeneratedState := filepath.Join(dir, utils.FinalState) +- args := []string{ +- "run", +- "--input", start, +- "--output", lastGeneratedState, +- "--meta", "", +- "--info-at", "%" + strconv.FormatUint(uint64(e.infoFreq), 10), +- "--proof-at", "=" + strconv.FormatUint(end, 10), +- "--proof-fmt", filepath.Join(proofDir, "%d.json.gz"), +- "--snapshot-at", "%" + strconv.FormatUint(uint64(e.snapshotFreq), 10), +- "--snapshot-fmt", filepath.Join(snapshotDir, "%d.json.gz"), +- } +- if end < math.MaxUint64 { +- args = append(args, "--stop-at", "="+strconv.FormatUint(end+1, 10)) +- } +- args = append(args, extraAsteriscArgs...) +- args = append(args, +- "--", +- e.server, "--server", +- "--l1", e.l1, +- "--l1.beacon", e.l1Beacon, +- "--l2", e.l2, +- "--datadir", dataDir, +- "--l1.head", e.inputs.L1Head.Hex(), +- "--l2.head", e.inputs.L2Head.Hex(), +- "--l2.outputroot", e.inputs.L2OutputRoot.Hex(), +- "--l2.claim", e.inputs.L2Claim.Hex(), +- "--l2.blocknumber", e.inputs.L2BlockNumber.Text(10), +- ) +- if e.network != "" { +- args = append(args, "--network", e.network) +- } +- if e.rollupConfig != "" { +- args = append(args, "--rollup.config", e.rollupConfig) +- } +- if e.l2Genesis != "" { +- args = append(args, "--l2.genesis", e.l2Genesis) +- } +- +- if err := os.MkdirAll(snapshotDir, 0755); err != nil { +- return fmt.Errorf("could not create snapshot directory %v: %w", snapshotDir, err) +- } +- if err := os.MkdirAll(dataDir, 0755); err != nil { +- return fmt.Errorf("could not create preimage cache directory %v: %w", dataDir, err) +- } +- if err := os.MkdirAll(proofDir, 0755); err != nil { +- return fmt.Errorf("could not create proofs directory %v: %w", proofDir, err) +- } +- e.logger.Info("Generating trace", "proof", end, "cmd", e.asterisc, "args", strings.Join(args, ", ")) +- execStart := time.Now() +- err = e.cmdExecutor(ctx, e.logger.New("proof", end), e.asterisc, args...) +- e.metrics.RecordAsteriscExecutionTime(time.Since(execStart).Seconds()) +- return err +-}
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+10
+
-18
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/asterisc/provider.go CELO/op-challenger/game/fault/trace/asterisc/provider.go +index cbf7b6241ae2d6f363eb7e5f246772da74ff57ea..5c481777b6ce1e15710f8313ebd4ea7ce1f2f588 100644 +--- OP/op-challenger/game/fault/trace/asterisc/provider.go ++++ CELO/op-challenger/game/fault/trace/asterisc/provider.go +@@ -12,6 +12,7 @@ "path/filepath" +  + "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +@@ -19,15 +20,6 @@ "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + ) +  +-const ( +- proofsDir = "proofs" +- diskStateCache = "state.json.gz" +-) +- +-type AsteriscMetricer interface { +- RecordAsteriscExecutionTime(t float64) +-} +- + type AsteriscTraceProvider struct { + logger log.Logger + dir string +@@ -43,14 +35,14 @@ // Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace. + lastStep uint64 + } +  +-func NewTraceProvider(logger log.Logger, m AsteriscMetricer, cfg *config.Config, prestateProvider types.PrestateProvider, asteriscPrestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProvider { ++func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, prestateProvider types.PrestateProvider, asteriscPrestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProvider { + return &AsteriscTraceProvider{ + logger: logger, + dir: dir, + prestate: asteriscPrestate, +- generator: NewExecutor(logger, m, cfg, asteriscPrestate, localInputs), ++ generator: vm.NewExecutor(logger, m, cfg, asteriscPrestate, localInputs), + gameDepth: gameDepth, +- preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), ++ preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), + PrestateProvider: prestateProvider, + } + } +@@ -116,7 +108,7 @@ // If the last step is tracked, set i to the last step to generate or load the final proof + if p.lastStep != 0 && i > p.lastStep { + i = p.lastStep + } +- path := filepath.Join(p.dir, proofsDir, fmt.Sprintf("%d.json.gz", i)) ++ path := filepath.Join(p.dir, utils.ProofsDir, fmt.Sprintf("%d.json.gz", i)) + file, err := ioutil.OpenDecompressed(path) + if errors.Is(err, os.ErrNotExist) { + if err := p.generator.GenerateProof(ctx, p.dir, i); err != nil { +@@ -167,7 +159,7 @@ return &proof, nil + } +  + func (c *AsteriscTraceProvider) finalState() (*VMState, error) { +- state, err := parseState(filepath.Join(c.dir, utils.FinalState)) ++ state, err := parseState(filepath.Join(c.dir, vm.FinalState)) + if err != nil { + return nil, fmt.Errorf("cannot read final state: %w", err) + } +@@ -180,21 +172,21 @@ type AsteriscTraceProviderForTest struct { + *AsteriscTraceProvider + } +  +-func NewTraceProviderForTest(logger log.Logger, m AsteriscMetricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProviderForTest { ++func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProviderForTest { + p := &AsteriscTraceProvider{ + logger: logger, + dir: dir, + prestate: cfg.AsteriscAbsolutePreState, +- generator: NewExecutor(logger, m, cfg, cfg.AsteriscAbsolutePreState, localInputs), ++ generator: vm.NewExecutor(logger, m, cfg.Asterisc, cfg.AsteriscAbsolutePreState, localInputs), + gameDepth: gameDepth, +- preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), ++ preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), + } + return &AsteriscTraceProviderForTest{p} + } +  + func (p *AsteriscTraceProviderForTest) FindStep(ctx context.Context, start uint64, preimage utils.PreimageOpt) (uint64, error) { + // Run asterisc to find the step that meets the preimage conditions +- if err := p.generator.(*Executor).generateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { ++ if err := p.generator.(*vm.Executor).DoGenerateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { + return 0, fmt.Errorf("generate asterisc trace (until preimage read): %w", err) + } + // Load the step from the state asterisc finished with
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+5
+
-4
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/asterisc/provider_test.go CELO/op-challenger/game/fault/trace/asterisc/provider_test.go +index 950a5ad70375a6acbbf0aa7dcf6ac785d74358d6..939a27decc304e256a04b8b7118b3cd1e9dc4844 100644 +--- OP/op-challenger/game/fault/trace/asterisc/provider_test.go ++++ CELO/op-challenger/game/fault/trace/asterisc/provider_test.go +@@ -12,6 +12,7 @@ "path/filepath" + "testing" +  + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/testlog" +@@ -205,12 +206,12 @@ srcDir := filepath.Join("test_data", "proofs") + entries, err := testData.ReadDir(srcDir) + require.NoError(t, err) + dataDir := t.TempDir() +- require.NoError(t, os.Mkdir(filepath.Join(dataDir, proofsDir), 0o777)) ++ require.NoError(t, os.Mkdir(filepath.Join(dataDir, utils.ProofsDir), 0o777)) + for _, entry := range entries { + path := filepath.Join(srcDir, entry.Name()) + file, err := testData.ReadFile(path) + require.NoErrorf(t, err, "reading %v", path) +- proofFile := filepath.Join(dataDir, proofsDir, entry.Name()+".gz") ++ proofFile := filepath.Join(dataDir, utils.ProofsDir, entry.Name()+".gz") + err = ioutil.WriteCompressedBytes(proofFile, file, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + require.NoErrorf(t, err, "writing %v", path) + } +@@ -241,7 +242,7 @@ var data []byte + var err error + if e.finalState != nil && e.finalState.Step <= i { + // Requesting a trace index past the end of the trace +- proofFile = filepath.Join(dir, utils.FinalState) ++ proofFile = filepath.Join(dir, vm.FinalState) + data, err = json.Marshal(e.finalState) + if err != nil { + return err +@@ -249,7 +250,7 @@ } + return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) + } + if e.proof != nil { +- proofFile = filepath.Join(dir, proofsDir, fmt.Sprintf("%d.json.gz", i)) ++ proofFile = filepath.Join(dir, utils.ProofsDir, fmt.Sprintf("%d.json.gz", i)) + data, err = json.Marshal(e.proof) + if err != nil { + return err
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+6
+
-5
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/asterisc/state.go CELO/op-challenger/game/fault/trace/asterisc/state.go +index b766cda6f50caa4b37307e2185bbbf16c66b1726..ac269d192924b053edccf6532b400c0e7790e9dd 100644 +--- OP/op-challenger/game/fault/trace/asterisc/state.go ++++ CELO/op-challenger/game/fault/trace/asterisc/state.go +@@ -7,6 +7,7 @@ "io" +  + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-service/ioutil" ++ "github.com/ethereum/go-ethereum/common" + ) +  + var asteriscWitnessLen = 362 +@@ -14,11 +15,11 @@ + // The state struct will be read from json. + // other fields included in json are specific to FPVM implementation, and not required for trace provider. + type VMState struct { +- PC uint64 `json:"pc"` +- Exited bool `json:"exited"` +- Step uint64 `json:"step"` +- Witness []byte `json:"witness"` +- StateHash [32]byte `json:"stateHash"` ++ PC uint64 `json:"pc"` ++ Exited bool `json:"exited"` ++ Step uint64 `json:"step"` ++ Witness []byte `json:"witness"` ++ StateHash common.Hash `json:"stateHash"` + } +  + func (state *VMState) validateStateHash() error {
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+1
+
-34
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/asterisc/test_data/state.json CELO/op-challenger/game/fault/trace/asterisc/test_data/state.json +index a1bf2e5b412e4b2d14002e7f9ae052047a283b20..00dfc2d666c84b89561d6eea4f4fdc67e1159faa 100644 +--- OP/op-challenger/game/fault/trace/asterisc/test_data/state.json ++++ CELO/op-challenger/game/fault/trace/asterisc/test_data/state.json +@@ -3,38 +3,5 @@ "pc": 0, + "exited": false, + "step": 0, + "witness": "wOSi8Cm62dDmKt1OGwxlLrSznk6zE4ghp7evP1rfrXYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGCAAAAAAAAAAAAAAAAB/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", +- "stateHash": [ +- 3, +- 33, +- 111, +- 220, +- 74, +- 123, +- 253, +- 76, +- 113, +- 96, +- 250, +- 148, +- 109, +- 27, +- 254, +- 69, +- 29, +- 19, +- 255, +- 50, +- 218, +- 73, +- 102, +- 9, +- 254, +- 24, +- 53, +- 82, +- 130, +- 185, +- 16, +- 198 +- ] ++ "stateHash": "0x03216fdc4a7bfd4c7160fa946d1bfe451d13ff32da496609fe18355282b910c6" + }
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + (deleted) + +
+
+
+ +
+ +
+ +
+0
+
-127
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/cannon/executor.go CELO/op-challenger/game/fault/trace/cannon/executor.go +deleted file mode 100644 +index 688fd87d7fa2c7daf0d5ed8619aec2ad04f4d025..0000000000000000000000000000000000000000 +--- OP/op-challenger/game/fault/trace/cannon/executor.go ++++ /dev/null +@@ -1,127 +0,0 @@ +-package cannon +- +-import ( +- "context" +- "fmt" +- "math" +- "os" +- "path/filepath" +- "strconv" +- "strings" +- "time" +- +- "github.com/ethereum-optimism/optimism/op-challenger/config" +- "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" +- "github.com/ethereum/go-ethereum/log" +-) +- +-type Executor struct { +- logger log.Logger +- metrics CannonMetricer +- l1 string +- l1Beacon string +- l2 string +- inputs utils.LocalGameInputs +- cannon string +- server string +- network string +- rollupConfig string +- l2Genesis string +- absolutePreState string +- snapshotFreq uint +- infoFreq uint +- selectSnapshot utils.SnapshotSelect +- cmdExecutor utils.CmdExecutor +-} +- +-func NewExecutor(logger log.Logger, m CannonMetricer, cfg *config.Config, prestate string, inputs utils.LocalGameInputs) *Executor { +- return &Executor{ +- logger: logger, +- metrics: m, +- l1: cfg.L1EthRpc, +- l1Beacon: cfg.L1Beacon, +- l2: cfg.L2Rpc, +- inputs: inputs, +- cannon: cfg.CannonBin, +- server: cfg.CannonServer, +- network: cfg.CannonNetwork, +- rollupConfig: cfg.CannonRollupConfigPath, +- l2Genesis: cfg.CannonL2GenesisPath, +- absolutePreState: prestate, +- snapshotFreq: cfg.CannonSnapshotFreq, +- infoFreq: cfg.CannonInfoFreq, +- selectSnapshot: utils.FindStartingSnapshot, +- cmdExecutor: utils.RunCmd, +- } +-} +- +-// GenerateProof executes cannon to generate a proof at the specified trace index. +-// The proof is stored at the specified directory. +-func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) error { +- return e.generateProof(ctx, dir, i, i) +-} +- +-// generateProof executes cannon from the specified starting trace index until the end trace index. +-// The proof is stored at the specified directory. +-func (e *Executor) generateProof(ctx context.Context, dir string, begin uint64, end uint64, extraCannonArgs ...string) error { +- snapshotDir := filepath.Join(dir, utils.SnapsDir) +- start, err := e.selectSnapshot(e.logger, snapshotDir, e.absolutePreState, begin) +- if err != nil { +- return fmt.Errorf("find starting snapshot: %w", err) +- } +- proofDir := filepath.Join(dir, utils.ProofsDir) +- dataDir := utils.PreimageDir(dir) +- lastGeneratedState := filepath.Join(dir, utils.FinalState) +- args := []string{ +- "run", +- "--input", start, +- "--output", lastGeneratedState, +- "--meta", "", +- "--info-at", "%" + strconv.FormatUint(uint64(e.infoFreq), 10), +- "--proof-at", "=" + strconv.FormatUint(end, 10), +- "--proof-fmt", filepath.Join(proofDir, "%d.json.gz"), +- "--snapshot-at", "%" + strconv.FormatUint(uint64(e.snapshotFreq), 10), +- "--snapshot-fmt", filepath.Join(snapshotDir, "%d.json.gz"), +- } +- if end < math.MaxUint64 { +- args = append(args, "--stop-at", "="+strconv.FormatUint(end+1, 10)) +- } +- args = append(args, extraCannonArgs...) +- args = append(args, +- "--", +- e.server, "--server", +- "--l1", e.l1, +- "--l1.beacon", e.l1Beacon, +- "--l2", e.l2, +- "--datadir", dataDir, +- "--l1.head", e.inputs.L1Head.Hex(), +- "--l2.head", e.inputs.L2Head.Hex(), +- "--l2.outputroot", e.inputs.L2OutputRoot.Hex(), +- "--l2.claim", e.inputs.L2Claim.Hex(), +- "--l2.blocknumber", e.inputs.L2BlockNumber.Text(10), +- ) +- if e.network != "" { +- args = append(args, "--network", e.network) +- } +- if e.rollupConfig != "" { +- args = append(args, "--rollup.config", e.rollupConfig) +- } +- if e.l2Genesis != "" { +- args = append(args, "--l2.genesis", e.l2Genesis) +- } +- +- if err := os.MkdirAll(snapshotDir, 0755); err != nil { +- return fmt.Errorf("could not create snapshot directory %v: %w", snapshotDir, err) +- } +- if err := os.MkdirAll(dataDir, 0755); err != nil { +- return fmt.Errorf("could not create preimage cache directory %v: %w", dataDir, err) +- } +- if err := os.MkdirAll(proofDir, 0755); err != nil { +- return fmt.Errorf("could not create proofs directory %v: %w", proofDir, err) +- } +- e.logger.Info("Generating trace", "proof", end, "cmd", e.cannon, "args", strings.Join(args, ", ")) +- execStart := time.Now() +- err = e.cmdExecutor(ctx, e.logger.New("proof", end), e.cannon, args...) +- e.metrics.RecordCannonExecutionTime(time.Since(execStart).Seconds()) +- return err +-}
+
+ + +
+ @@ -9993,13 +42515,13 @@
- (new) + OP
- CELO + (deleted)
@@ -10009,46 +42531,255 @@
-
+18
-
-0
+
+0
+
-227
-
diff --git OP/packages/contracts-bedrock/src/celo/Initializable.sol CELO/packages/contracts-bedrock/src/celo/Initializable.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..7929728eef4ed9063c81aea6f2a0a1758d4ef728 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/Initializable.sol -@@ -0,0 +1,18 @@ -+// SPDX-License-Identifier: LGPL-3.0-only -+pragma solidity ^0.8.15; -+ -+contract Initializable { -+ bool public initialized; -+ -+ modifier initializer() { -+ require(!initialized, "contract already initialized"); -+ initialized = true; -+ _; -+ } -+ -+ constructor(bool testingDeployment) { -+ if (!testingDeployment) { -+ initialized = true; -+ } -+ } -+}
+
diff --git OP/op-challenger/game/fault/trace/cannon/executor_test.go CELO/op-challenger/game/fault/trace/cannon/executor_test.go +deleted file mode 100644 +index 4f20c426e7780951c1797b76015c2e181a293040..0000000000000000000000000000000000000000 +--- OP/op-challenger/game/fault/trace/cannon/executor_test.go ++++ /dev/null +@@ -1,227 +0,0 @@ +-package cannon +- +-import ( +- "context" +- "fmt" +- "math" +- "math/big" +- "os" +- "path/filepath" +- "testing" +- "time" +- +- "github.com/ethereum-optimism/optimism/op-challenger/config" +- "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" +- "github.com/ethereum-optimism/optimism/op-challenger/metrics" +- "github.com/ethereum-optimism/optimism/op-service/testlog" +- "github.com/ethereum/go-ethereum/common" +- "github.com/ethereum/go-ethereum/log" +- "github.com/stretchr/testify/require" +-) +- +-const execTestCannonPrestate = "/foo/pre.json" +- +-func TestGenerateProof(t *testing.T) { +- input := "starting.json" +- tempDir := t.TempDir() +- dir := filepath.Join(tempDir, "gameDir") +- cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", "http://localhost:9000", "http://localhost:9096", "http://localhost:9095", tempDir, config.TraceTypeCannon) +- cfg.L2Rpc = "http://localhost:9999" +- prestate := "pre.json" +- cfg.CannonBin = "./bin/cannon" +- cfg.CannonServer = "./bin/op-program" +- cfg.CannonSnapshotFreq = 500 +- cfg.CannonInfoFreq = 900 +- +- inputs := utils.LocalGameInputs{ +- L1Head: common.Hash{0x11}, +- L2Head: common.Hash{0x22}, +- L2OutputRoot: common.Hash{0x33}, +- L2Claim: common.Hash{0x44}, +- L2BlockNumber: big.NewInt(3333), +- } +- captureExec := func(t *testing.T, cfg config.Config, proofAt uint64) (string, string, map[string]string) { +- m := &cannonDurationMetrics{} +- executor := NewExecutor(testlog.Logger(t, log.LevelInfo), m, &cfg, prestate, inputs) +- executor.selectSnapshot = func(logger log.Logger, dir string, absolutePreState string, i uint64) (string, error) { +- return input, nil +- } +- var binary string +- var subcommand string +- args := make(map[string]string) +- executor.cmdExecutor = func(ctx context.Context, l log.Logger, b string, a ...string) error { +- binary = b +- subcommand = a[0] +- for i := 1; i < len(a); { +- if a[i] == "--" { +- // Skip over the divider between cannon and server program +- i += 1 +- continue +- } +- args[a[i]] = a[i+1] +- i += 2 +- } +- return nil +- } +- err := executor.GenerateProof(context.Background(), dir, proofAt) +- require.NoError(t, err) +- require.Equal(t, 1, m.executionTimeRecordCount, "Should record cannon execution time") +- return binary, subcommand, args +- } +- +- t.Run("Network", func(t *testing.T) { +- cfg.CannonNetwork = "mainnet" +- cfg.CannonRollupConfigPath = "" +- cfg.CannonL2GenesisPath = "" +- binary, subcommand, args := captureExec(t, cfg, 150_000_000) +- require.DirExists(t, filepath.Join(dir, utils.PreimagesDir)) +- require.DirExists(t, filepath.Join(dir, utils.ProofsDir)) +- require.DirExists(t, filepath.Join(dir, utils.SnapsDir)) +- require.Equal(t, cfg.CannonBin, binary) +- require.Equal(t, "run", subcommand) +- require.Equal(t, input, args["--input"]) +- require.Contains(t, args, "--meta") +- require.Equal(t, "", args["--meta"]) +- require.Equal(t, filepath.Join(dir, utils.FinalState), args["--output"]) +- require.Equal(t, "=150000000", args["--proof-at"]) +- require.Equal(t, "=150000001", args["--stop-at"]) +- require.Equal(t, "%500", args["--snapshot-at"]) +- require.Equal(t, "%900", args["--info-at"]) +- // Slight quirk of how we pair off args +- // The server binary winds up as the key and the first arg --server as the value which has no value +- // Then everything else pairs off correctly again +- require.Equal(t, "--server", args[cfg.CannonServer]) +- require.Equal(t, cfg.L1EthRpc, args["--l1"]) +- require.Equal(t, cfg.L1Beacon, args["--l1.beacon"]) +- require.Equal(t, cfg.L2Rpc, args["--l2"]) +- require.Equal(t, filepath.Join(dir, utils.PreimagesDir), args["--datadir"]) +- require.Equal(t, filepath.Join(dir, utils.ProofsDir, "%d.json.gz"), args["--proof-fmt"]) +- require.Equal(t, filepath.Join(dir, utils.SnapsDir, "%d.json.gz"), args["--snapshot-fmt"]) +- require.Equal(t, cfg.CannonNetwork, args["--network"]) +- require.NotContains(t, args, "--rollup.config") +- require.NotContains(t, args, "--l2.genesis") +- +- // Local game inputs +- require.Equal(t, inputs.L1Head.Hex(), args["--l1.head"]) +- require.Equal(t, inputs.L2Head.Hex(), args["--l2.head"]) +- require.Equal(t, inputs.L2OutputRoot.Hex(), args["--l2.outputroot"]) +- require.Equal(t, inputs.L2Claim.Hex(), args["--l2.claim"]) +- require.Equal(t, "3333", args["--l2.blocknumber"]) +- }) +- +- t.Run("RollupAndGenesis", func(t *testing.T) { +- cfg.CannonNetwork = "" +- cfg.CannonRollupConfigPath = "rollup.json" +- cfg.CannonL2GenesisPath = "genesis.json" +- _, _, args := captureExec(t, cfg, 150_000_000) +- require.NotContains(t, args, "--network") +- require.Equal(t, cfg.CannonRollupConfigPath, args["--rollup.config"]) +- require.Equal(t, cfg.CannonL2GenesisPath, args["--l2.genesis"]) +- }) +- +- t.Run("NoStopAtWhenProofIsMaxUInt", func(t *testing.T) { +- cfg.CannonNetwork = "mainnet" +- cfg.CannonRollupConfigPath = "rollup.json" +- cfg.CannonL2GenesisPath = "genesis.json" +- _, _, args := captureExec(t, cfg, math.MaxUint64) +- // stop-at would need to be one more than the proof step which would overflow back to 0 +- // so expect that it will be omitted. We'll ultimately want cannon to execute until the program exits. +- require.NotContains(t, args, "--stop-at") +- }) +-} +- +-func TestRunCmdLogsOutput(t *testing.T) { +- bin := "/bin/echo" +- if _, err := os.Stat(bin); err != nil { +- t.Skip(bin, " not available", err) +- } +- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +- defer cancel() +- logger, logs := testlog.CaptureLogger(t, log.LevelInfo) +- err := utils.RunCmd(ctx, logger, bin, "Hello World") +- require.NoError(t, err) +- levelFilter := testlog.NewLevelFilter(log.LevelInfo) +- msgFilter := testlog.NewMessageFilter("Hello World") +- require.NotNil(t, logs.FindLog(levelFilter, msgFilter)) +-} +- +-func TestFindStartingSnapshot(t *testing.T) { +- logger := testlog.Logger(t, log.LevelInfo) +- +- withSnapshots := func(t *testing.T, files ...string) string { +- dir := t.TempDir() +- for _, file := range files { +- require.NoError(t, os.WriteFile(fmt.Sprintf("%v/%v", dir, file), nil, 0o644)) +- } +- return dir +- } +- +- t.Run("UsePrestateWhenSnapshotsDirDoesNotExist", func(t *testing.T) { +- dir := t.TempDir() +- snapshot, err := utils.FindStartingSnapshot(logger, filepath.Join(dir, "doesNotExist"), execTestCannonPrestate, 1200) +- require.NoError(t, err) +- require.Equal(t, execTestCannonPrestate, snapshot) +- }) +- +- t.Run("UsePrestateWhenSnapshotsDirEmpty", func(t *testing.T) { +- dir := withSnapshots(t) +- snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 1200) +- require.NoError(t, err) +- require.Equal(t, execTestCannonPrestate, snapshot) +- }) +- +- t.Run("UsePrestateWhenNoSnapshotBeforeTraceIndex", func(t *testing.T) { +- dir := withSnapshots(t, "100.json", "200.json") +- snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 99) +- require.NoError(t, err) +- require.Equal(t, execTestCannonPrestate, snapshot) +- +- snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 100) +- require.NoError(t, err) +- require.Equal(t, execTestCannonPrestate, snapshot) +- }) +- +- t.Run("UseClosestAvailableSnapshot", func(t *testing.T) { +- dir := withSnapshots(t, "100.json.gz", "123.json.gz", "250.json.gz") +- +- snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 101) +- require.NoError(t, err) +- require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) +- +- snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 123) +- require.NoError(t, err) +- require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) +- +- snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 124) +- require.NoError(t, err) +- require.Equal(t, filepath.Join(dir, "123.json.gz"), snapshot) +- +- snapshot, err = utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 256) +- require.NoError(t, err) +- require.Equal(t, filepath.Join(dir, "250.json.gz"), snapshot) +- }) +- +- t.Run("IgnoreDirectories", func(t *testing.T) { +- dir := withSnapshots(t, "100.json.gz") +- require.NoError(t, os.Mkdir(filepath.Join(dir, "120.json.gz"), 0o777)) +- snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 150) +- require.NoError(t, err) +- require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) +- }) +- +- t.Run("IgnoreUnexpectedFiles", func(t *testing.T) { +- dir := withSnapshots(t, ".file", "100.json.gz", "foo", "bar.json.gz") +- snapshot, err := utils.FindStartingSnapshot(logger, dir, execTestCannonPrestate, 150) +- require.NoError(t, err) +- require.Equal(t, filepath.Join(dir, "100.json.gz"), snapshot) +- }) +-} +- +-type cannonDurationMetrics struct { +- metrics.NoopMetricsImpl +- executionTimeRecordCount int +-} +- +-func (c *cannonDurationMetrics) RecordCannonExecutionTime(_ float64) { +- c.executionTimeRecordCount++ +-}
@@ -10057,13 +42788,13 @@
- (new) + OP
@@ -10073,113 +42804,230 @@
-
+85
-
-0
+
+11
+
-9
-
diff --git OP/packages/contracts-bedrock/src/celo/MentoFeeHandlerSeller.sol CELO/packages/contracts-bedrock/src/celo/MentoFeeHandlerSeller.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..e5a9ff455f391f797bbc2ace5101c0ef58c3c192 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/MentoFeeHandlerSeller.sol -@@ -0,0 +1,85 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; -+ -+import "./interfaces/IStableTokenMento.sol"; -+ -+import "./common/interfaces/IFeeHandlerSeller.sol"; -+import "./stability/interfaces/ISortedOracles.sol"; -+import "./common/FixidityLib.sol"; -+import "./common/Initializable.sol"; -+ -+import "./FeeHandlerSeller.sol"; -+ -+// An implementation of FeeHandlerSeller supporting interfaces compatible with -+// Mento -+// See https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md -+contract MentoFeeHandlerSeller is FeeHandlerSeller { -+ using FixidityLib for FixidityLib.Fraction; -+ -+ /** -+ * @notice Sets initialized == true on implementation contracts. -+ * @param test Set to true to skip implementation initialisation. -+ */ -+ constructor(bool test) FeeHandlerSeller(test) { } -+ -+ // without this line the contract can't receive native Celo transfers -+ receive() external payable { } -+ -+ /** -+ * @notice Returns the storage, major, minor, and patch version of the contract. -+ * @return Storage version of the contract. -+ * @return Major version of the contract. -+ * @return Minor version of the contract. -+ * @return Patch version of the contract. -+ */ -+ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { -+ return (1, 1, 0, 0); -+ } -+ -+ function sell( -+ address sellTokenAddress, -+ address buyTokenAddress, -+ uint256 amount, -+ uint256 maxSlippage // as fraction, -+ ) -+ external -+ returns (uint256) -+ { -+ require( -+ buyTokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), "Buy token can only be gold token" -+ ); -+ -+ IStableTokenMento stableToken = IStableTokenMento(sellTokenAddress); -+ require(amount <= stableToken.balanceOf(address(this)), "Balance of token to burn not enough"); -+ -+ address exchangeAddress = registry.getAddressForOrDie(stableToken.getExchangeRegistryId()); -+ -+ IExchange exchange = IExchange(exchangeAddress); -+ -+ uint256 minAmount = 0; -+ -+ ISortedOracles sortedOracles = getSortedOracles(); -+ -+ require( -+ sortedOracles.numRates(sellTokenAddress) >= minimumReports[sellTokenAddress], -+ "Number of reports for token not enough" -+ ); -+ -+ (uint256 rateNumerator, uint256 rateDenominator) = sortedOracles.medianRate(sellTokenAddress); -+ minAmount = calculateMinAmount(rateNumerator, rateDenominator, amount, maxSlippage); -+ -+ // TODO an upgrade would be to compare using routers as well -+ stableToken.approve(exchangeAddress, amount); -+ exchange.sell(amount, minAmount, false); -+ -+ IERC20 goldToken = getGoldToken(); -+ uint256 celoAmount = goldToken.balanceOf(address(this)); -+ goldToken.transfer(msg.sender, celoAmount); -+ -+ emit TokenSold(sellTokenAddress, buyTokenAddress, amount); -+ return celoAmount; -+ } -+}
+
diff --git OP/op-challenger/game/fault/trace/cannon/prestate_test.go CELO/op-challenger/game/fault/trace/cannon/prestate_test.go +index 1297da54bd887103ee88ece84d693207d645a07e..a8616f1711808af738ff7ec8e401677dd4c1c556 100644 +--- OP/op-challenger/game/fault/trace/cannon/prestate_test.go ++++ CELO/op-challenger/game/fault/trace/cannon/prestate_test.go +@@ -44,15 +44,17 @@ state := mipsevm.State{ + Memory: mipsevm.NewMemory(), + PreimageKey: common.HexToHash("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"), + PreimageOffset: 0, +- PC: 0, +- NextPC: 1, +- LO: 0, +- HI: 0, +- Heap: 0, +- ExitCode: 0, +- Exited: false, +- Step: 0, +- Registers: [32]uint32{}, ++ Cpu: mipsevm.CpuScalars{ ++ PC: 0, ++ NextPC: 1, ++ LO: 0, ++ HI: 0, ++ }, ++ Heap: 0, ++ ExitCode: 0, ++ Exited: false, ++ Step: 0, ++ Registers: [32]uint32{}, + } + expected, err := state.EncodeWitness().StateHash() + require.NoError(t, err)
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+9
+
-12
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/cannon/provider.go CELO/op-challenger/game/fault/trace/cannon/provider.go +index 704d88e2e6a413ca1c8795d3eede39b9357d2ecb..c565b9b39b75df40161b8a7dc679a2292469ca97 100644 +--- OP/op-challenger/game/fault/trace/cannon/provider.go ++++ CELO/op-challenger/game/fault/trace/cannon/provider.go +@@ -12,6 +12,7 @@ "path/filepath" +  + "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +@@ -21,10 +22,6 @@ "github.com/ethereum/go-ethereum/log" +  + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + ) +- +-type CannonMetricer interface { +- RecordCannonExecutionTime(t float64) +-} +  + type CannonTraceProvider struct { + logger log.Logger +@@ -41,14 +38,14 @@ // Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace. + lastStep uint64 + } +  +-func NewTraceProvider(logger log.Logger, m CannonMetricer, cfg *config.Config, prestateProvider types.PrestateProvider, prestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProvider { ++func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, prestateProvider types.PrestateProvider, prestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProvider { + return &CannonTraceProvider{ + logger: logger, + dir: dir, + prestate: prestate, +- generator: NewExecutor(logger, m, cfg, prestate, localInputs), ++ generator: vm.NewExecutor(logger, m, cfg, prestate, localInputs), + gameDepth: gameDepth, +- preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), ++ preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), + PrestateProvider: prestateProvider, + } + } +@@ -170,7 +167,7 @@ return &proof, nil + } +  + func (c *CannonTraceProvider) finalState() (*mipsevm.State, error) { +- state, err := parseState(filepath.Join(c.dir, utils.FinalState)) ++ state, err := parseState(filepath.Join(c.dir, vm.FinalState)) + if err != nil { + return nil, fmt.Errorf("cannot read final state: %w", err) + } +@@ -183,21 +180,21 @@ type CannonTraceProviderForTest struct { + *CannonTraceProvider + } +  +-func NewTraceProviderForTest(logger log.Logger, m CannonMetricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProviderForTest { ++func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *CannonTraceProviderForTest { + p := &CannonTraceProvider{ + logger: logger, + dir: dir, + prestate: cfg.CannonAbsolutePreState, +- generator: NewExecutor(logger, m, cfg, cfg.CannonAbsolutePreState, localInputs), ++ generator: vm.NewExecutor(logger, m, cfg.Cannon, cfg.CannonAbsolutePreState, localInputs), + gameDepth: gameDepth, +- preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(utils.PreimageDir(dir)).Get), ++ preimageLoader: utils.NewPreimageLoader(kvstore.NewDiskKV(vm.PreimageDir(dir)).Get), + } + return &CannonTraceProviderForTest{p} + } +  + func (p *CannonTraceProviderForTest) FindStep(ctx context.Context, start uint64, preimage utils.PreimageOpt) (uint64, error) { + // Run cannon to find the step that meets the preimage conditions +- if err := p.generator.(*Executor).generateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { ++ if err := p.generator.(*vm.Executor).DoGenerateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { + return 0, fmt.Errorf("generate cannon trace (until preimage read): %w", err) + } + // Load the step from the state cannon finished with
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/cannon/provider_test.go CELO/op-challenger/game/fault/trace/cannon/provider_test.go +index 388bd151991bc48bee6f9d2e11d09b91b060c3fb..94277ed88399b1ef6e90860f3b693ff47ffa657b 100644 +--- OP/op-challenger/game/fault/trace/cannon/provider_test.go ++++ CELO/op-challenger/game/fault/trace/cannon/provider_test.go +@@ -13,6 +13,7 @@ "testing" +  + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/testlog" +@@ -257,7 +258,7 @@ var data []byte + var err error + if e.finalState != nil && e.finalState.Step <= i { + // Requesting a trace index past the end of the trace +- proofFile = filepath.Join(dir, utils.FinalState) ++ proofFile = filepath.Join(dir, vm.FinalState) + data, err = json.Marshal(e.finalState) + if err != nil { + return err
@@ -10188,370 +43036,294 @@
- (new) + OP
- CELO - -
-
-
- -
- -
- -
+336
-
-0
+ CELO
- -
-
-
diff --git OP/packages/contracts-bedrock/src/celo/StableTokenV2.sol CELO/packages/contracts-bedrock/src/celo/StableTokenV2.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..68632df65abc9d352de50b7f273afc491ff8a1b2 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/StableTokenV2.sol -@@ -0,0 +1,336 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import { ERC20PermitUpgradeable } from -+ "@openzeppelin/contracts-upgradeable/token/ERC20/extensions/draft-ERC20PermitUpgradeable.sol"; -+import { ERC20Upgradeable } from "@openzeppelin/contracts-upgradeable/token/ERC20/ERC20Upgradeable.sol"; -+import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; -+ -+import { IStableTokenV2 } from "./interfaces/IStableToken.sol"; -+import { CalledByVm } from "./CalledByVm.sol"; -+ -+/** -+ * @title ERC20 token with minting and burning permissioned to a broker and validators. -+ */ -+contract StableTokenV2 is IStableTokenV2, ERC20PermitUpgradeable, CalledByVm, OwnableUpgradeable { -+ address public validators; -+ address public broker; -+ address public exchange; -+ -+ event TransferComment(string comment); -+ event BrokerUpdated(address broker); -+ event ValidatorsUpdated(address validators); -+ event ExchangeUpdated(address exchange); -+ -+ /** -+ * @dev Restricts a function so it can only be executed by an address that's allowed to mint. -+ * Currently that's the broker, validators, or exchange. -+ */ -+ modifier onlyMinter() { -+ address sender = _msgSender(); -+ require(sender == broker || sender == validators || sender == exchange, "StableTokenV2: not allowed to mint"); -+ _; -+ } -+ -+ /** -+ * @dev Restricts a function so it can only be executed by an address that's allowed to burn. -+ * Currently that's the broker or exchange. -+ */ -+ modifier onlyBurner() { -+ address sender = _msgSender(); -+ require(sender == broker || sender == exchange, "StableTokenV2: not allowed to burn"); -+ _; -+ } -+ -+ /** -+ * @notice The constructor for the StableTokenV2 contract. -+ * @dev Should be called with disable=true in deployments when -+ * it's accessed through a Proxy. -+ * Call this with disable=false during testing, when used -+ * without a proxy. -+ * @param disable Set to true to run `_disableInitializers()` inherited from -+ * openzeppelin-contracts-upgradeable/Initializable.sol -+ */ -+ constructor(bool disable) { -+ if (disable) { -+ _disableInitializers(); -+ } -+ } -+ -+ /** -+ * @notice Initializes a StableTokenV2. -+ * It keeps the same signature as the original initialize() function -+ * in legacy/StableToken.sol -+ * @param _name The name of the stable token (English) -+ * @param _symbol A short symbol identifying the token (e.g. "cUSD") -+ * @param initialBalanceAddresses Array of addresses with an initial balance. -+ * @param initialBalanceValues Array of balance values corresponding to initialBalanceAddresses. -+ * deprecated-param exchangeIdentifier String identifier of exchange in registry (for specific fiat pairs) -+ */ -+ function initialize( -+ // slither-disable-start shadowing-local -+ string calldata _name, -+ string calldata _symbol, -+ // slither-disable-end shadowing-local -+ address[] calldata initialBalanceAddresses, -+ uint256[] calldata initialBalanceValues -+ ) -+ external -+ initializer -+ { -+ __ERC20_init_unchained(_name, _symbol); -+ __ERC20Permit_init(_symbol); -+ _transferOwnership(_msgSender()); -+ -+ require(initialBalanceAddresses.length == initialBalanceValues.length, "Array length mismatch"); -+ for (uint256 i = 0; i < initialBalanceAddresses.length; i += 1) { -+ _mint(initialBalanceAddresses[i], initialBalanceValues[i]); -+ } -+ } -+ -+ /** -+ * @notice Initializes a StableTokenV2 contract -+ * when upgrading from legacy/StableToken.sol. -+ * It sets the addresses that were previously read from the Registry. -+ * It runs the ERC20PermitUpgradeable initializer. -+ * @dev This function is only callable once. -+ * @param _broker The address of the Broker contract. -+ * @param _validators The address of the Validators contract. -+ * @param _exchange The address of the Exchange contract. -+ */ -+ function initializeV2( -+ address _broker, -+ address _validators, -+ address _exchange -+ ) -+ external -+ reinitializer(2) -+ onlyOwner -+ { -+ _setBroker(_broker); -+ _setValidators(_validators); -+ _setExchange(_exchange); -+ __ERC20Permit_init(symbol()); -+ } -+ -+ /** -+ * @notice Sets the address of the Broker contract. -+ * @dev This function is only callable by the owner. -+ * @param _broker The address of the Broker contract. -+ */ -+ function setBroker(address _broker) external onlyOwner { -+ _setBroker(_broker); -+ } -+ -+ /** -+ * @notice Sets the address of the Validators contract. -+ * @dev This function is only callable by the owner. -+ * @param _validators The address of the Validators contract. -+ */ -+ function setValidators(address _validators) external onlyOwner { -+ _setValidators(_validators); -+ } -+ -+ /** -+ * @notice Sets the address of the Exchange contract. -+ * @dev This function is only callable by the owner. -+ * @param _exchange The address of the Exchange contract. -+ */ -+ function setExchange(address _exchange) external onlyOwner { -+ _setExchange(_exchange); -+ } -+ -+ /** -+ * @notice Transfer token for a specified address -+ * @param to The address to transfer to. -+ * @param value The amount to be transferred. -+ * @param comment The transfer comment. -+ * @return True if the transaction succeeds. -+ */ -+ function transferWithComment(address to, uint256 value, string calldata comment) external returns (bool) { -+ emit TransferComment(comment); -+ return transfer(to, value); -+ } -+ -+ /** -+ * @notice Mints new StableToken and gives it to 'to'. -+ * @param to The account for which to mint tokens. -+ * @param value The amount of StableToken to mint. -+ */ -+ function mint(address to, uint256 value) external onlyMinter returns (bool) { -+ _mint(to, value); -+ return true; -+ } -+ -+ /** -+ * @notice Burns StableToken from the balance of msg.sender. -+ * @param value The amount of StableToken to burn. -+ */ -+ function burn(uint256 value) external onlyBurner returns (bool) { -+ _burn(msg.sender, value); -+ return true; -+ } -+ -+ /** -+ * @notice Set the address of the Broker contract and emit an event -+ * @param _broker The address of the Broker contract. -+ */ -+ function _setBroker(address _broker) internal { -+ broker = _broker; -+ emit BrokerUpdated(_broker); -+ } -+ -+ /** -+ * @notice Set the address of the Validators contract and emit an event -+ * @param _validators The address of the Validators contract. -+ */ -+ function _setValidators(address _validators) internal { -+ validators = _validators; -+ emit ValidatorsUpdated(_validators); -+ } -+ -+ /** -+ * @notice Set the address of the Exchange contract and emit an event -+ * @param _exchange The address of the Exchange contract. -+ */ -+ function _setExchange(address _exchange) internal { -+ exchange = _exchange; -+ emit ExchangeUpdated(_exchange); -+ } -+ -+ /// @inheritdoc ERC20Upgradeable -+ function transferFrom( -+ address from, -+ address to, -+ uint256 amount -+ ) -+ public -+ override(ERC20Upgradeable, IStableTokenV2) -+ returns (bool) -+ { -+ return ERC20Upgradeable.transferFrom(from, to, amount); -+ } -+ -+ /// @inheritdoc ERC20Upgradeable -+ function transfer(address to, uint256 amount) public override(ERC20Upgradeable, IStableTokenV2) returns (bool) { -+ return ERC20Upgradeable.transfer(to, amount); -+ } -+ -+ /// @inheritdoc ERC20Upgradeable -+ function balanceOf(address account) public view override(ERC20Upgradeable, IStableTokenV2) returns (uint256) { -+ return ERC20Upgradeable.balanceOf(account); -+ } -+ -+ /// @inheritdoc ERC20Upgradeable -+ function approve( -+ address spender, -+ uint256 amount -+ ) -+ public -+ override(ERC20Upgradeable, IStableTokenV2) -+ returns (bool) -+ { -+ return ERC20Upgradeable.approve(spender, amount); -+ } +
+ + +
+ +
+ +
+2
+
-2
+ +
+ +
+ +
diff --git OP/op-challenger/game/fault/trace/outputs/output_asterisc.go CELO/op-challenger/game/fault/trace/outputs/output_asterisc.go +index a6bbf39e7288fc290f3beea3c419f70acecb0c5d..ac129dbb26c82c791c5b79529ceefcabcfdd6e62 100644 +--- OP/op-challenger/game/fault/trace/outputs/output_asterisc.go ++++ CELO/op-challenger/game/fault/trace/outputs/output_asterisc.go +@@ -5,12 +5,12 @@ "context" + "fmt" + "path/filepath" +  +- "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/asterisc" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger/metrics" + "github.com/ethereum-optimism/optimism/op-service/eth" +@@ -21,7 +21,7 @@ + func NewOutputAsteriscTraceAccessor( + logger log.Logger, + m metrics.Metricer, +- cfg *config.Config, ++ cfg vm.Config, + l2Client utils.L2HeaderSource, + prestateProvider types.PrestateProvider, + asteriscPrestate string,
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+2
+
-2
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/outputs/output_cannon.go CELO/op-challenger/game/fault/trace/outputs/output_cannon.go +index 7a2b3d36a975a2fdc3c73ffc1aaf13dc9beba01b..ecc710380bfb82e4ec9d7d7e39112acbb8c1cc57 100644 +--- OP/op-challenger/game/fault/trace/outputs/output_cannon.go ++++ CELO/op-challenger/game/fault/trace/outputs/output_cannon.go +@@ -5,12 +5,12 @@ "context" + "fmt" + "path/filepath" +  +- "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/split" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-challenger/metrics" + "github.com/ethereum-optimism/optimism/op-service/eth" +@@ -21,7 +21,7 @@ + func NewOutputCannonTraceAccessor( + logger log.Logger, + m metrics.Metricer, +- cfg *config.Config, ++ cfg vm.Config, + l2Client utils.L2HeaderSource, + prestateProvider types.PrestateProvider, + cannonPrestate string,
+
+ + +
+ + +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+126
+
-0
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/vm/executor.go CELO/op-challenger/game/fault/trace/vm/executor.go +new file mode 100644 +index 0000000000000000000000000000000000000000..564ef6ac8d20105afbbf90cae3eff662db8a255b +--- /dev/null ++++ CELO/op-challenger/game/fault/trace/vm/executor.go +@@ -0,0 +1,126 @@ ++package vm + -+ /// @inheritdoc ERC20Upgradeable -+ function allowance( -+ address owner, -+ address spender -+ ) -+ public -+ view -+ override(ERC20Upgradeable, IStableTokenV2) -+ returns (uint256) -+ { -+ return ERC20Upgradeable.allowance(owner, spender); -+ } ++import ( ++ "context" ++ "fmt" ++ "math" ++ "os" ++ "path/filepath" ++ "strconv" ++ "strings" ++ "time" + -+ /// @inheritdoc ERC20Upgradeable -+ function totalSupply() public view override(ERC20Upgradeable, IStableTokenV2) returns (uint256) { -+ return ERC20Upgradeable.totalSupply(); -+ } ++ "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" ++ "github.com/ethereum/go-ethereum/log" ++) + -+ /// @inheritdoc ERC20PermitUpgradeable -+ function permit( -+ address owner, -+ address spender, -+ uint256 value, -+ uint256 deadline, -+ uint8 v, -+ bytes32 r, -+ bytes32 s -+ ) -+ public -+ override(ERC20PermitUpgradeable, IStableTokenV2) -+ { -+ ERC20PermitUpgradeable.permit(owner, spender, value, deadline, v, r, s); -+ } ++type Metricer interface { ++ RecordVmExecutionTime(vmType string, t time.Duration) ++} + -+ /** -+ * @notice Reserve balance for making payments for gas in this StableToken currency. -+ * @param from The account to reserve balance from -+ * @param value The amount of balance to reserve -+ * @dev Note that this function is called by the protocol when paying for tx fees in this -+ * currency. After the tx is executed, gas is refunded to the sender and credited to the -+ * various tx fee recipients via a call to `creditGasFees`. -+ */ -+ function debitGasFees(address from, uint256 value) external onlyVm { -+ _burn(from, value); -+ } ++type Config struct { ++ VmType string ++ L1 string ++ L1Beacon string ++ L2 string ++ VmBin string // Path to the vm executable to run when generating trace data ++ Server string // Path to the executable that provides the pre-image oracle server ++ Network string ++ RollupConfigPath string ++ L2GenesisPath string ++ SnapshotFreq uint // Frequency of snapshots to create when executing (in VM instructions) ++ InfoFreq uint // Frequency of progress log messages (in VM instructions) ++} + -+ /** -+ * @notice Alternative function to credit balance after making payments -+ * for gas in this StableToken currency. -+ * @param from The account to debit balance from -+ * @param feeRecipient Coinbase address -+ * @param gatewayFeeRecipient Gateway address -+ * @param communityFund Community fund address -+ * @param refund amount to be refunded by the VM -+ * @param tipTxFee Coinbase fee -+ * @param baseTxFee Community fund fee -+ * @param gatewayFee Gateway fee -+ * @dev Note that this function is called by the protocol when paying for tx fees in this -+ * currency. Before the tx is executed, gas is debited from the sender via a call to -+ * `debitGasFees`. -+ */ -+ function creditGasFees( -+ address from, -+ address feeRecipient, -+ address gatewayFeeRecipient, -+ address communityFund, -+ uint256 refund, -+ uint256 tipTxFee, -+ uint256 gatewayFee, -+ uint256 baseTxFee -+ ) -+ external -+ onlyVm -+ { -+ // slither-disable-next-line uninitialized-local -+ uint256 amountToBurn; -+ _mint(from, refund + tipTxFee + gatewayFee + baseTxFee); ++type Executor struct { ++ cfg Config ++ logger log.Logger ++ metrics Metricer ++ absolutePreState string ++ inputs utils.LocalGameInputs ++ selectSnapshot SnapshotSelect ++ cmdExecutor CmdExecutor ++} + -+ if (feeRecipient != address(0)) { -+ _transfer(from, feeRecipient, tipTxFee); -+ } else if (tipTxFee > 0) { -+ amountToBurn += tipTxFee; -+ } ++func NewExecutor(logger log.Logger, m Metricer, cfg Config, prestate string, inputs utils.LocalGameInputs) *Executor { ++ return &Executor{ ++ cfg: cfg, ++ logger: logger, ++ metrics: m, ++ inputs: inputs, ++ absolutePreState: prestate, ++ selectSnapshot: FindStartingSnapshot, ++ cmdExecutor: RunCmd, ++ } ++} + -+ if (gatewayFeeRecipient != address(0)) { -+ _transfer(from, gatewayFeeRecipient, gatewayFee); -+ } else if (gatewayFee > 0) { -+ amountToBurn += gatewayFee; -+ } ++// GenerateProof executes vm to generate a proof at the specified trace index. ++// The proof is stored at the specified directory. ++func (e *Executor) GenerateProof(ctx context.Context, dir string, i uint64) error { ++ return e.DoGenerateProof(ctx, dir, i, i) ++} + -+ if (communityFund != address(0)) { -+ _transfer(from, communityFund, baseTxFee); -+ } else if (baseTxFee > 0) { -+ amountToBurn += baseTxFee; -+ } ++// DoGenerateProof executes vm from the specified starting trace index until the end trace index. ++// The proof is stored at the specified directory. ++func (e *Executor) DoGenerateProof(ctx context.Context, dir string, begin uint64, end uint64, extraVmArgs ...string) error { ++ snapshotDir := filepath.Join(dir, SnapsDir) ++ start, err := e.selectSnapshot(e.logger, snapshotDir, e.absolutePreState, begin) ++ if err != nil { ++ return fmt.Errorf("find starting snapshot: %w", err) ++ } ++ proofDir := filepath.Join(dir, utils.ProofsDir) ++ dataDir := PreimageDir(dir) ++ lastGeneratedState := filepath.Join(dir, FinalState) ++ args := []string{ ++ "run", ++ "--input", start, ++ "--output", lastGeneratedState, ++ "--meta", "", ++ "--info-at", "%" + strconv.FormatUint(uint64(e.cfg.InfoFreq), 10), ++ "--proof-at", "=" + strconv.FormatUint(end, 10), ++ "--proof-fmt", filepath.Join(proofDir, "%d.json.gz"), ++ "--snapshot-at", "%" + strconv.FormatUint(uint64(e.cfg.SnapshotFreq), 10), ++ "--snapshot-fmt", filepath.Join(snapshotDir, "%d.json.gz"), ++ } ++ if end < math.MaxUint64 { ++ args = append(args, "--stop-at", "="+strconv.FormatUint(end+1, 10)) ++ } ++ args = append(args, extraVmArgs...) ++ args = append(args, ++ "--", ++ e.cfg.Server, "--server", ++ "--l1", e.cfg.L1, ++ "--l1.beacon", e.cfg.L1Beacon, ++ "--l2", e.cfg.L2, ++ "--datadir", dataDir, ++ "--l1.head", e.inputs.L1Head.Hex(), ++ "--l2.head", e.inputs.L2Head.Hex(), ++ "--l2.outputroot", e.inputs.L2OutputRoot.Hex(), ++ "--l2.claim", e.inputs.L2Claim.Hex(), ++ "--l2.blocknumber", e.inputs.L2BlockNumber.Text(10), ++ ) ++ if e.cfg.Network != "" { ++ args = append(args, "--network", e.cfg.Network) ++ } ++ if e.cfg.RollupConfigPath != "" { ++ args = append(args, "--rollup.config", e.cfg.RollupConfigPath) ++ } ++ if e.cfg.L2GenesisPath != "" { ++ args = append(args, "--l2.genesis", e.cfg.L2GenesisPath) ++ } + -+ if (amountToBurn > 0) { -+ _burn(from, amountToBurn); -+ } -+ } ++ if err := os.MkdirAll(snapshotDir, 0755); err != nil { ++ return fmt.Errorf("could not create snapshot directory %v: %w", snapshotDir, err) ++ } ++ if err := os.MkdirAll(dataDir, 0755); err != nil { ++ return fmt.Errorf("could not create preimage cache directory %v: %w", dataDir, err) ++ } ++ if err := os.MkdirAll(proofDir, 0755); err != nil { ++ return fmt.Errorf("could not create proofs directory %v: %w", proofDir, err) ++ } ++ e.logger.Info("Generating trace", "proof", end, "cmd", e.cfg.VmBin, "args", strings.Join(args, ", ")) ++ execStart := time.Now() ++ err = e.cmdExecutor(ctx, e.logger.New("proof", end), e.cfg.VmBin, args...) ++ e.metrics.RecordVmExecutionTime(e.cfg.VmType, time.Since(execStart)) ++ return err +}
@@ -10559,9 +43331,216 @@
+ +
+
+
+ + (new) + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+43
+
-38
+ +
+ +
+
+
diff --git OP/op-challenger/game/fault/trace/asterisc/executor_test.go CELO/op-challenger/game/fault/trace/vm/executor_test.go +rename from op-challenger/game/fault/trace/asterisc/executor_test.go +rename to op-challenger/game/fault/trace/vm/executor_test.go +index 7ce44f304375184fb17b40c5a04ef709d83a6365..00078bd2078e958157897d3b4144fc97c1c8b760 100644 +--- OP/op-challenger/game/fault/trace/asterisc/executor_test.go ++++ CELO/op-challenger/game/fault/trace/vm/executor_test.go +@@ -1,4 +1,4 @@ +-package asterisc ++package vm +  + import ( + "context" +@@ -6,8 +6,8 @@ "math" + "math/big" + "path/filepath" + "testing" ++ "time" +  +- "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger/metrics" + "github.com/ethereum-optimism/optimism/op-service/testlog" +@@ -20,13 +20,18 @@ func TestGenerateProof(t *testing.T) { + input := "starting.json" + tempDir := t.TempDir() + dir := filepath.Join(tempDir, "gameDir") +- cfg := config.NewConfig(common.Address{0xbb}, "http://localhost:8888", "http://localhost:9000", "http://localhost:9096", "http://localhost:9095", tempDir, config.TraceTypeAsterisc) +- cfg.L2Rpc = "http://localhost:9999" ++ cfg := Config{ ++ VmType: "test", ++ L1: "http://localhost:8888", ++ L1Beacon: "http://localhost:9000", ++ L2: "http://localhost:9999", ++ VmBin: "./bin/testvm", ++ Server: "./bin/testserver", ++ Network: "op-test", ++ SnapshotFreq: 500, ++ InfoFreq: 900, ++ } + prestate := "pre.json" +- cfg.AsteriscBin = "./bin/asterisc" +- cfg.AsteriscServer = "./bin/op-program" +- cfg.AsteriscSnapshotFreq = 500 +- cfg.AsteriscInfoFreq = 900 +  + inputs := utils.LocalGameInputs{ + L1Head: common.Hash{0x11}, +@@ -35,9 +40,9 @@ L2OutputRoot: common.Hash{0x33}, + L2Claim: common.Hash{0x44}, + L2BlockNumber: big.NewInt(3333), + } +- captureExec := func(t *testing.T, cfg config.Config, proofAt uint64) (string, string, map[string]string) { +- m := &asteriscDurationMetrics{} +- executor := NewExecutor(testlog.Logger(t, log.LevelInfo), m, &cfg, prestate, inputs) ++ captureExec := func(t *testing.T, cfg Config, proofAt uint64) (string, string, map[string]string) { ++ m := &stubVmMetrics{} ++ executor := NewExecutor(testlog.Logger(t, log.LevelInfo), m, cfg, prestate, inputs) + executor.selectSnapshot = func(logger log.Logger, dir string, absolutePreState string, i uint64) (string, error) { + return input, nil + } +@@ -49,7 +54,7 @@ binary = b + subcommand = a[0] + for i := 1; i < len(a); { + if a[i] == "--" { +- // Skip over the divider between asterisc and server program ++ // Skip over the divider between vm and server program + i += 1 + continue + } +@@ -60,24 +65,24 @@ return nil + } + err := executor.GenerateProof(context.Background(), dir, proofAt) + require.NoError(t, err) +- require.Equal(t, 1, m.executionTimeRecordCount, "Should record asterisc execution time") ++ require.Equal(t, 1, m.executionTimeRecordCount, "Should record vm execution time") + return binary, subcommand, args + } +  + t.Run("Network", func(t *testing.T) { +- cfg.AsteriscNetwork = "mainnet" +- cfg.AsteriscRollupConfigPath = "" +- cfg.AsteriscL2GenesisPath = "" ++ cfg.Network = "mainnet" ++ cfg.RollupConfigPath = "" ++ cfg.L2GenesisPath = "" + binary, subcommand, args := captureExec(t, cfg, 150_000_000) +- require.DirExists(t, filepath.Join(dir, utils.PreimagesDir)) +- require.DirExists(t, filepath.Join(dir, proofsDir)) +- require.DirExists(t, filepath.Join(dir, utils.SnapsDir)) +- require.Equal(t, cfg.AsteriscBin, binary) ++ require.DirExists(t, filepath.Join(dir, PreimagesDir)) ++ require.DirExists(t, filepath.Join(dir, utils.ProofsDir)) ++ require.DirExists(t, filepath.Join(dir, SnapsDir)) ++ require.Equal(t, cfg.VmBin, binary) + require.Equal(t, "run", subcommand) + require.Equal(t, input, args["--input"]) + require.Contains(t, args, "--meta") + require.Equal(t, "", args["--meta"]) +- require.Equal(t, filepath.Join(dir, utils.FinalState), args["--output"]) ++ require.Equal(t, filepath.Join(dir, FinalState), args["--output"]) + require.Equal(t, "=150000000", args["--proof-at"]) + require.Equal(t, "=150000001", args["--stop-at"]) + require.Equal(t, "%500", args["--snapshot-at"]) +@@ -85,14 +90,14 @@ require.Equal(t, "%900", args["--info-at"]) + // Slight quirk of how we pair off args + // The server binary winds up as the key and the first arg --server as the value which has no value + // Then everything else pairs off correctly again +- require.Equal(t, "--server", args[cfg.AsteriscServer]) +- require.Equal(t, cfg.L1EthRpc, args["--l1"]) ++ require.Equal(t, "--server", args[cfg.Server]) ++ require.Equal(t, cfg.L1, args["--l1"]) + require.Equal(t, cfg.L1Beacon, args["--l1.beacon"]) +- require.Equal(t, cfg.L2Rpc, args["--l2"]) +- require.Equal(t, filepath.Join(dir, utils.PreimagesDir), args["--datadir"]) +- require.Equal(t, filepath.Join(dir, proofsDir, "%d.json.gz"), args["--proof-fmt"]) +- require.Equal(t, filepath.Join(dir, utils.SnapsDir, "%d.json.gz"), args["--snapshot-fmt"]) +- require.Equal(t, cfg.AsteriscNetwork, args["--network"]) ++ require.Equal(t, cfg.L2, args["--l2"]) ++ require.Equal(t, filepath.Join(dir, PreimagesDir), args["--datadir"]) ++ require.Equal(t, filepath.Join(dir, utils.ProofsDir, "%d.json.gz"), args["--proof-fmt"]) ++ require.Equal(t, filepath.Join(dir, SnapsDir, "%d.json.gz"), args["--snapshot-fmt"]) ++ require.Equal(t, cfg.Network, args["--network"]) + require.NotContains(t, args, "--rollup.config") + require.NotContains(t, args, "--l2.genesis") +  +@@ -105,19 +110,19 @@ require.Equal(t, "3333", args["--l2.blocknumber"]) + }) +  + t.Run("RollupAndGenesis", func(t *testing.T) { +- cfg.AsteriscNetwork = "" +- cfg.AsteriscRollupConfigPath = "rollup.json" +- cfg.AsteriscL2GenesisPath = "genesis.json" ++ cfg.Network = "" ++ cfg.RollupConfigPath = "rollup.json" ++ cfg.L2GenesisPath = "genesis.json" + _, _, args := captureExec(t, cfg, 150_000_000) + require.NotContains(t, args, "--network") +- require.Equal(t, cfg.AsteriscRollupConfigPath, args["--rollup.config"]) +- require.Equal(t, cfg.AsteriscL2GenesisPath, args["--l2.genesis"]) ++ require.Equal(t, cfg.RollupConfigPath, args["--rollup.config"]) ++ require.Equal(t, cfg.L2GenesisPath, args["--l2.genesis"]) + }) +  + t.Run("NoStopAtWhenProofIsMaxUInt", func(t *testing.T) { +- cfg.AsteriscNetwork = "mainnet" +- cfg.AsteriscRollupConfigPath = "rollup.json" +- cfg.AsteriscL2GenesisPath = "genesis.json" ++ cfg.Network = "mainnet" ++ cfg.RollupConfigPath = "rollup.json" ++ cfg.L2GenesisPath = "genesis.json" + _, _, args := captureExec(t, cfg, math.MaxUint64) + // stop-at would need to be one more than the proof step which would overflow back to 0 + // so expect that it will be omitted. We'll ultimately want asterisc to execute until the program exits. +@@ -125,11 +130,11 @@ require.NotContains(t, args, "--stop-at") + }) + } +  +-type asteriscDurationMetrics struct { ++type stubVmMetrics struct { + metrics.NoopMetricsImpl + executionTimeRecordCount int + } +  +-func (c *asteriscDurationMetrics) RecordAsteriscExecutionTime(_ float64) { ++func (c *stubVmMetrics) RecordVmExecutionTime(_ string, _ time.Duration) { + c.executionTimeRecordCount++ + }
+
+ + +
+ @@ -10576,7 +43555,7 @@
@@ -10586,221 +43565,143 @@
-
+193
-
-0
+
+4
+
-4
-
diff --git OP/packages/contracts-bedrock/src/celo/UniswapFeeHandlerSeller.sol CELO/packages/contracts-bedrock/src/celo/UniswapFeeHandlerSeller.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..54ce14eaf37cfd30695729e4a2990b294d589b86 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/UniswapFeeHandlerSeller.sol -@@ -0,0 +1,193 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/utils/math/Math.sol"; -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+import "../../lib/openzeppelin-contracts/contracts/utils/structs/EnumerableSet.sol"; -+import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; -+ -+import "./UsingRegistry.sol"; -+ -+import "./common/interfaces/IFeeHandlerSeller.sol"; -+import "./stability/interfaces/ISortedOracles.sol"; -+import "./common/FixidityLib.sol"; -+import "./common/Initializable.sol"; -+import "./FeeHandlerSeller.sol"; -+ -+import "./uniswap/interfaces/IUniswapV2RouterMin.sol"; -+import "./uniswap/interfaces/IUniswapV2FactoryMin.sol"; -+ -+// An implementation of FeeHandlerSeller supporting interfaces compatible with -+// Uniswap V2 API -+// See https://github.com/celo-org/celo-proposals/blob/master/CIPs/cip-0052.md -+contract UniswapFeeHandlerSeller is FeeHandlerSeller { -+ using FixidityLib for FixidityLib.Fraction; -+ using EnumerableSet for EnumerableSet.AddressSet; -+ -+ uint256 constant MAX_TIMESTAMP_BLOCK_EXCHANGE = 20; -+ uint256 constant MAX_NUMBER_ROUTERS_PER_TOKEN = 3; -+ mapping(address => EnumerableSet.AddressSet) private routerAddresses; -+ -+ event ReceivedQuote(address indexed tokneAddress, address indexed router, uint256 quote); -+ event RouterUsed(address router); -+ event RouterAddressSet(address token, address router); -+ event RouterAddressRemoved(address token, address router); -+ -+ /** -+ * @notice Sets initialized == true on implementation contracts. -+ * @param test Set to true to skip implementation initialisation. -+ */ -+ constructor(bool test) FeeHandlerSeller(test) { } -+ -+ // without this line the contract can't receive native Celo transfers -+ receive() external payable { } -+ -+ /** -+ * @notice Returns the storage, major, minor, and patch version of the contract. -+ * @return Storage version of the contract. -+ * @return Major version of the contract. -+ * @return Minor version of the contract. -+ * @return Patch version of the contract. -+ */ -+ function getVersionNumber() external pure returns (uint256, uint256, uint256, uint256) { -+ return (1, 1, 0, 0); -+ } -+ -+ /** -+ * @notice Allows owner to set the router for a token. -+ * @param token Address of the token to set. -+ * @param router The new router. -+ */ -+ function setRouter(address token, address router) external onlyOwner { -+ _setRouter(token, router); -+ } -+ -+ function _setRouter(address token, address router) private { -+ require(router != address(0), "Router can't be address zero"); -+ routerAddresses[token].add(router); -+ require(routerAddresses[token].values().length <= MAX_NUMBER_ROUTERS_PER_TOKEN, "Max number of routers reached"); -+ emit RouterAddressSet(token, router); -+ } -+ -+ /** -+ * @notice Allows owner to remove a router for a token. -+ * @param token Address of the token. -+ * @param router Address of the router to remove. -+ */ -+ function removeRouter(address token, address router) external onlyOwner { -+ routerAddresses[token].remove(router); -+ emit RouterAddressRemoved(token, router); -+ } -+ -+ /** -+ * @notice Get the list of routers for a token. -+ * @param token The address of the token to query. -+ * @return An array of all the allowed router. -+ */ -+ function getRoutersForToken(address token) external view returns (address[] memory) { -+ return routerAddresses[token].values(); -+ } -+ -+ /** -+ * @dev Calculates the minimum amount of tokens that can be received for a given amount of sell tokens, -+ * taking into account the slippage and the rates of the sell token and CELO token on the Uniswap V2 pair. -+ * @param sellTokenAddress The address of the sell token. -+ * @param maxSlippage The maximum slippage allowed. -+ * @param amount The amount of sell tokens to be traded. -+ * @param bestRouter The Uniswap V2 router with the best price. -+ * @return The minimum amount of tokens that can be received. -+ */ -+ function calculateAllMinAmount( -+ address sellTokenAddress, -+ uint256 maxSlippage, -+ uint256 amount, -+ IUniswapV2RouterMin bestRouter -+ ) -+ private -+ view -+ returns (uint256) -+ { -+ ISortedOracles sortedOracles = getSortedOracles(); -+ uint256 minReports = minimumReports[sellTokenAddress]; -+ -+ require(sortedOracles.numRates(sellTokenAddress) >= minReports, "Number of reports for token not enough"); -+ -+ uint256 minimalSortedOracles = 0; -+ // if minimumReports for this token is zero, assume the check is not needed -+ if (minReports > 0) { -+ (uint256 rateNumerator, uint256 rateDenominator) = sortedOracles.medianRate(sellTokenAddress); -+ -+ minimalSortedOracles = calculateMinAmount(rateNumerator, rateDenominator, amount, maxSlippage); -+ } -+ -+ IERC20 celoToken = getGoldToken(); -+ address pair = IUniswapV2FactoryMin(bestRouter.factory()).getPair(sellTokenAddress, address(celoToken)); -+ uint256 minAmountPair = -+ calculateMinAmount(IERC20(sellTokenAddress).balanceOf(pair), celoToken.balanceOf(pair), amount, maxSlippage); -+ -+ return Math.max(minAmountPair, minimalSortedOracles); -+ } -+ -+ // This function explicitly defines few variables because it was getting error "stack too deep" -+ function sell( -+ address sellTokenAddress, -+ address buyTokenAddress, -+ uint256 amount, -+ uint256 maxSlippage // as fraction, -+ ) -+ external -+ returns (uint256) -+ { -+ require( -+ buyTokenAddress == registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID), "Buy token can only be gold token" -+ ); -+ -+ require(routerAddresses[sellTokenAddress].values().length > 0, "routerAddresses should be non empty"); -+ -+ // An improvement to this function would be to allow the user to pass a path as argument -+ // and if it generates a better outcome that the ones enabled that gets used -+ // and the user gets a reward -+ -+ IERC20 celoToken = getGoldToken(); -+ -+ IUniswapV2RouterMin bestRouter; -+ uint256 bestRouterQuote = 0; -+ -+ address[] memory path = new address[](2); -+ -+ path[0] = sellTokenAddress; -+ path[1] = address(celoToken); -+ -+ for (uint256 i = 0; i < routerAddresses[sellTokenAddress].values().length; i++) { -+ address poolAddress = routerAddresses[sellTokenAddress].at(i); -+ IUniswapV2RouterMin router = IUniswapV2RouterMin(poolAddress); -+ -+ // Using the second return value becuase it's the last argument, -+ // the previous values show how many tokens are exchanged in each path -+ // so the first value would be equivalent to balanceToBurn -+ uint256 wouldGet = router.getAmountsOut(amount, path)[1]; -+ -+ emit ReceivedQuote(sellTokenAddress, poolAddress, wouldGet); -+ if (wouldGet > bestRouterQuote) { -+ bestRouterQuote = wouldGet; -+ bestRouter = router; -+ } -+ } -+ -+ require(bestRouterQuote != 0, "Can't exchange with zero quote"); -+ -+ uint256 minAmount = 0; -+ minAmount = calculateAllMinAmount(sellTokenAddress, maxSlippage, amount, bestRouter); -+ -+ IERC20(sellTokenAddress).approve(address(bestRouter), amount); -+ bestRouter.swapExactTokensForTokens( -+ amount, minAmount, path, address(this), block.timestamp + MAX_TIMESTAMP_BLOCK_EXCHANGE -+ ); -+ -+ uint256 celoAmount = celoToken.balanceOf(address(this)); -+ celoToken.transfer(msg.sender, celoAmount); -+ emit RouterUsed(address(bestRouter)); -+ emit TokenSold(sellTokenAddress, buyTokenAddress, amount); -+ return celoAmount; -+ } -+}
+
diff --git OP/op-challenger/game/fault/trace/utils/executor.go CELO/op-challenger/game/fault/trace/vm/prestates.go +rename from op-challenger/game/fault/trace/utils/executor.go +rename to op-challenger/game/fault/trace/vm/prestates.go +index f3c5feac8311aa4f5510a0563f4ee23eb8e4c8b7..c51dda7de1e5bf1346d1657be0d235ad4f473255 100644 +--- OP/op-challenger/game/fault/trace/utils/executor.go ++++ CELO/op-challenger/game/fault/trace/vm/prestates.go +@@ -1,4 +1,4 @@ +-package utils ++package vm +  + import ( + "context" +@@ -10,7 +10,7 @@ "path/filepath" + "regexp" + "strconv" +  +- oplog "github.com/ethereum-optimism/optimism/op-service/log" ++ log2 "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/log" + ) +  +@@ -31,10 +31,10 @@ } +  + func RunCmd(ctx context.Context, l log.Logger, binary string, args ...string) error { + cmd := exec.CommandContext(ctx, binary, args...) +- stdOut := oplog.NewWriter(l, log.LevelInfo) ++ stdOut := log2.NewWriter(l, log.LevelInfo) + defer stdOut.Close() + // Keep stdErr at info level because FPVM uses stderr for progress messages +- stdErr := oplog.NewWriter(l, log.LevelInfo) ++ stdErr := log2.NewWriter(l, log.LevelInfo) + defer stdErr.Close() + cmd.Stdout = stdOut + cmd.Stderr = stdErr
+ + + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+8
+
-8
+ +
+ +
+
+
diff --git OP/op-conductor/conductor/service.go CELO/op-conductor/conductor/service.go +index 02c46a468bc78bb806d6fbc9a04e1cb4e87cbea5..a1ccef871538aa5426392d532c0e418f695a30a1 100644 +--- OP/op-conductor/conductor/service.go ++++ CELO/op-conductor/conductor/service.go +@@ -311,7 +311,7 @@ // Start implements cliapp.Lifecycle. + func (oc *OpConductor) Start(ctx context.Context) error { + oc.log.Info("starting OpConductor") +  +- if err := oc.hmon.Start(); err != nil { ++ if err := oc.hmon.Start(ctx); err != nil { + return errors.Wrap(err, "failed to start health monitor") + } +  +@@ -453,18 +453,18 @@ return oc.cons.LeaderWithID() + } +  + // AddServerAsVoter adds a server as a voter to the cluster. +-func (oc *OpConductor) AddServerAsVoter(_ context.Context, id string, addr string) error { +- return oc.cons.AddVoter(id, addr) ++func (oc *OpConductor) AddServerAsVoter(_ context.Context, id string, addr string, version uint64) error { ++ return oc.cons.AddVoter(id, addr, version) + } +  + // AddServerAsNonvoter adds a server as a non-voter to the cluster. non-voter will not participate in leader election. +-func (oc *OpConductor) AddServerAsNonvoter(_ context.Context, id string, addr string) error { +- return oc.cons.AddNonVoter(id, addr) ++func (oc *OpConductor) AddServerAsNonvoter(_ context.Context, id string, addr string, version uint64) error { ++ return oc.cons.AddNonVoter(id, addr, version) + } +  + // RemoveServer removes a server from the cluster. +-func (oc *OpConductor) RemoveServer(_ context.Context, id string) error { +- return oc.cons.RemoveServer(id) ++func (oc *OpConductor) RemoveServer(_ context.Context, id string, version uint64) error { ++ return oc.cons.RemoveServer(id, version) + } +  + // TransferLeader transfers leadership to another server. +@@ -488,7 +488,7 @@ return oc.healthy.Load() + } +  + // ClusterMembership returns current cluster's membership information. +-func (oc *OpConductor) ClusterMembership(_ context.Context) ([]*consensus.ServerInfo, error) { ++func (oc *OpConductor) ClusterMembership(_ context.Context) (*consensus.ClusterMembership, error) { + return oc.cons.ClusterMembership() + } +
@@ -10809,13 +43710,13 @@
- (new) + OP
@@ -10825,154 +43726,125 @@
-
+126
-
-0
+
+1
+
-1
-
diff --git OP/packages/contracts-bedrock/src/celo/UsingRegistry.sol CELO/packages/contracts-bedrock/src/celo/UsingRegistry.sol -new file mode 100644 -index 0000000000000000000000000000000000000000..0764125d65c19d7a1834b599a34bf2e4d0dafbf6 ---- /dev/null -+++ CELO/packages/contracts-bedrock/src/celo/UsingRegistry.sol -@@ -0,0 +1,126 @@ -+// SPDX-License-Identifier: MIT -+pragma solidity ^0.8.15; -+ -+import "../../lib/openzeppelin-contracts/contracts/access/Ownable.sol"; -+import "../../lib/openzeppelin-contracts/contracts/token/ERC20/IERC20.sol"; -+ -+import "./interfaces/IAccounts.sol"; -+import "./interfaces/IFeeCurrencyWhitelist.sol"; -+import "./interfaces/IFreezer.sol"; -+import "./interfaces/ICeloRegistry.sol"; -+ -+import "./governance/interfaces/IElection.sol"; -+import "./governance/interfaces/IGovernance.sol"; -+import "./governance/interfaces/ILockedGold.sol"; -+import "./governance/interfaces/IValidators.sol"; -+ -+import "./identity/interfaces/IRandom.sol"; -+import "./identity/interfaces/IAttestations.sol"; -+ -+import "./stability/interfaces/ISortedOracles.sol"; -+ -+import "./mento/interfaces/IExchange.sol"; -+import "./mento/interfaces/IReserve.sol"; -+import "./mento/interfaces/IStableToken.sol"; -+ -+contract UsingRegistry is Ownable { -+ event RegistrySet(address indexed registryAddress); -+ -+ // solhint-disable state-visibility -+ bytes32 constant ACCOUNTS_REGISTRY_ID = keccak256(abi.encodePacked("Accounts")); -+ bytes32 constant ATTESTATIONS_REGISTRY_ID = keccak256(abi.encodePacked("Attestations")); -+ bytes32 constant DOWNTIME_SLASHER_REGISTRY_ID = keccak256(abi.encodePacked("DowntimeSlasher")); -+ bytes32 constant DOUBLE_SIGNING_SLASHER_REGISTRY_ID = keccak256(abi.encodePacked("DoubleSigningSlasher")); -+ bytes32 constant ELECTION_REGISTRY_ID = keccak256(abi.encodePacked("Election")); -+ bytes32 constant EXCHANGE_REGISTRY_ID = keccak256(abi.encodePacked("Exchange")); -+ bytes32 constant FEE_CURRENCY_WHITELIST_REGISTRY_ID = keccak256(abi.encodePacked("FeeCurrencyWhitelist")); -+ bytes32 constant FREEZER_REGISTRY_ID = keccak256(abi.encodePacked("Freezer")); -+ bytes32 constant GOLD_TOKEN_REGISTRY_ID = keccak256(abi.encodePacked("GoldToken")); -+ bytes32 constant GOVERNANCE_REGISTRY_ID = keccak256(abi.encodePacked("Governance")); -+ bytes32 constant GOVERNANCE_SLASHER_REGISTRY_ID = keccak256(abi.encodePacked("GovernanceSlasher")); -+ bytes32 constant LOCKED_GOLD_REGISTRY_ID = keccak256(abi.encodePacked("LockedGold")); -+ bytes32 constant RESERVE_REGISTRY_ID = keccak256(abi.encodePacked("Reserve")); -+ bytes32 constant RANDOM_REGISTRY_ID = keccak256(abi.encodePacked("Random")); -+ bytes32 constant SORTED_ORACLES_REGISTRY_ID = keccak256(abi.encodePacked("SortedOracles")); -+ bytes32 constant STABLE_TOKEN_REGISTRY_ID = keccak256(abi.encodePacked("StableToken")); -+ bytes32 constant VALIDATORS_REGISTRY_ID = keccak256(abi.encodePacked("Validators")); -+ // solhint-enable state-visibility -+ -+ ICeloRegistry public registry; -+ -+ modifier onlyRegisteredContract(bytes32 identifierHash) { -+ require(registry.getAddressForOrDie(identifierHash) == msg.sender, "only registered contract"); -+ _; -+ } -+ -+ modifier onlyRegisteredContracts(bytes32[] memory identifierHashes) { -+ require(registry.isOneOf(identifierHashes, msg.sender), "only registered contracts"); -+ _; -+ } -+ -+ /** -+ * @notice Updates the address pointing to a Registry contract. -+ * @param registryAddress The address of a registry contract for routing to other contracts. -+ */ -+ function setRegistry(address registryAddress) public onlyOwner { -+ require(registryAddress != address(0), "Cannot register the null address"); -+ registry = ICeloRegistry(registryAddress); -+ emit RegistrySet(registryAddress); -+ } -+ -+ function getAccounts() internal view returns (IAccounts) { -+ return IAccounts(registry.getAddressForOrDie(ACCOUNTS_REGISTRY_ID)); -+ } -+ -+ function getAttestations() internal view returns (IAttestations) { -+ return IAttestations(registry.getAddressForOrDie(ATTESTATIONS_REGISTRY_ID)); -+ } -+ -+ function getElection() internal view returns (IElection) { -+ return IElection(registry.getAddressForOrDie(ELECTION_REGISTRY_ID)); -+ } -+ -+ function getExchange() internal view returns (IExchange) { -+ return IExchange(registry.getAddressForOrDie(EXCHANGE_REGISTRY_ID)); -+ } -+ -+ function getFeeCurrencyWhitelistRegistry() internal view returns (IFeeCurrencyWhitelist) { -+ return IFeeCurrencyWhitelist(registry.getAddressForOrDie(FEE_CURRENCY_WHITELIST_REGISTRY_ID)); -+ } -+ -+ function getFreezer() internal view returns (IFreezer) { -+ return IFreezer(registry.getAddressForOrDie(FREEZER_REGISTRY_ID)); -+ } -+ -+ function getGoldToken() internal view returns (IERC20) { -+ return IERC20(registry.getAddressForOrDie(GOLD_TOKEN_REGISTRY_ID)); -+ } -+ -+ function getGovernance() internal view returns (IGovernance) { -+ return IGovernance(registry.getAddressForOrDie(GOVERNANCE_REGISTRY_ID)); -+ } -+ -+ function getLockedGold() internal view returns (ILockedGold) { -+ return ILockedGold(registry.getAddressForOrDie(LOCKED_GOLD_REGISTRY_ID)); -+ } -+ -+ function getRandom() internal view returns (IRandom) { -+ return IRandom(registry.getAddressForOrDie(RANDOM_REGISTRY_ID)); -+ } -+ -+ function getReserve() internal view returns (IReserve) { -+ return IReserve(registry.getAddressForOrDie(RESERVE_REGISTRY_ID)); -+ } -+ -+ function getSortedOracles() internal view returns (ISortedOracles) { -+ return ISortedOracles(registry.getAddressForOrDie(SORTED_ORACLES_REGISTRY_ID)); -+ } -+ -+ function getStableToken() internal view returns (IStableToken) { -+ return IStableToken(registry.getAddressForOrDie(STABLE_TOKEN_REGISTRY_ID)); -+ } +
diff --git OP/op-conductor/conductor/service_test.go CELO/op-conductor/conductor/service_test.go +index f6d84d6db5fc64e2d6cf632270e74d0e79878018..4e19925baa4b163fbc416b1e896d60276e811867 100644 +--- OP/op-conductor/conductor/service_test.go ++++ CELO/op-conductor/conductor/service_test.go +@@ -122,7 +122,7 @@ conductor.retryBackoff = func() time.Duration { return 0 } // disable retry backoff for tests + s.conductor = conductor +  + s.healthUpdateCh = make(chan error, 1) +- s.hmon.EXPECT().Start().Return(nil) ++ s.hmon.EXPECT().Start(mock.Anything).Return(nil) + s.conductor.healthUpdateCh = s.healthUpdateCh +  + s.leaderUpdateCh = make(chan bool, 1)
+
+ + +
+ + +
+
+
+ + OP + +
+ +
+ + CELO + +
+
+
+ +
+ +
+ +
+16
+
-6
+ +
+ +
+
+
diff --git OP/op-conductor/consensus/iface.go CELO/op-conductor/consensus/iface.go +index 15096c2e8ae122e0c9f56bed36bf1a949d31e54f..69b9506c50b26656788e10d3184445f0860ec689 100644 +--- OP/op-conductor/consensus/iface.go ++++ CELO/op-conductor/consensus/iface.go +@@ -25,6 +25,12 @@ } + return "ServerSuffrage" + } +  ++// ClusterMembership defines a versioned list of servers in the cluster. ++type ClusterMembership struct { ++ Servers []ServerInfo `json:"servers"` ++ Version uint64 `json:"version"` ++} + -+ function getValidators() internal view returns (IValidators) { -+ return IValidators(registry.getAddressForOrDie(VALIDATORS_REGISTRY_ID)); -+ } -+}
+ // ServerInfo defines the server information. + type ServerInfo struct { + ID string `json:"id"` +@@ -37,13 +43,17 @@ // + //go:generate mockery --name Consensus --output mocks/ --with-expecter=true + type Consensus interface { + // AddVoter adds a voting member into the cluster, voter is eligible to become leader. +- AddVoter(id, addr string) error ++ // If version is non-zero, this will only be applied if the current cluster version matches the expected version. ++ AddVoter(id, addr string, version uint64) error + // AddNonVoter adds a non-voting member into the cluster, non-voter is not eligible to become leader. +- AddNonVoter(id, addr string) error ++ // If version is non-zero, this will only be applied if the current cluster version matches the expected version. ++ AddNonVoter(id, addr string, version uint64) error + // DemoteVoter demotes a voting member into a non-voting member, if leader is being demoted, it will cause a new leader election. +- DemoteVoter(id string) error ++ // If version is non-zero, this will only be applied if the current cluster version matches the expected version. ++ DemoteVoter(id string, version uint64) error + // RemoveServer removes a member (both voter or non-voter) from the cluster, if leader is being removed, it will cause a new leader election. +- RemoveServer(id string) error ++ // If version is non-zero, this will only be applied if the current cluster version matches the expected version. ++ RemoveServer(id string, version uint64) error + // LeaderCh returns a channel that will be notified when leadership status changes (true = leader, false = follower) + LeaderCh() <-chan bool + // Leader returns if it is the leader of the cluster. +@@ -56,8 +66,8 @@ // TransferLeader triggers leadership transfer to another member in the cluster. + TransferLeader() error + // TransferLeaderTo triggers leadership transfer to a specific member in the cluster. + TransferLeaderTo(id, addr string) error +- // ClusterMembership returns the current cluster membership configuration. +- ClusterMembership() ([]*ServerInfo, error) ++ // ClusterMembership returns the current cluster membership configuration and associated version. ++ ClusterMembership() (*ClusterMembership, error) +  + // CommitPayload commits latest unsafe payload to the FSM in a strongly consistent fashion. + CommitUnsafePayload(payload *eth.ExecutionPayloadEnvelope) error
@@ -10981,13 +43853,13 @@ @@ -10997,43 +43869,269 @@
-
+2
-
-1
+
+51
+
-47
-
diff --git OP/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol CELO/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol -index 43ddd65424f895b007d2eab8f5907b281c714f74..915dcefc1761de10fdd1e851387f956150de51d7 100644 ---- OP/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol -+++ CELO/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol -@@ -5,6 +5,7 @@ import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; - import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; - import { ILegacyMintableERC20, IOptimismMintableERC20 } from "src/universal/IOptimismMintableERC20.sol"; - import { ISemver } from "src/universal/ISemver.sol"; -+import { FeeCurrency } from "src/celo/FeeCurrency.sol"; +
diff --git OP/op-conductor/consensus/mocks/Consensus.go CELO/op-conductor/consensus/mocks/Consensus.go +index 02d65869c06a8d4837ebcaca1d08ef095d6d53df..ca1397a690e1f93f66f5df38ad3f7e613eae46eb 100644 +--- OP/op-conductor/consensus/mocks/Consensus.go ++++ CELO/op-conductor/consensus/mocks/Consensus.go +@@ -22,17 +22,17 @@ func (_m *Consensus) EXPECT() *Consensus_Expecter { + return &Consensus_Expecter{mock: &_m.Mock} + }   - /// @title OptimismMintableERC20 - /// @notice OptimismMintableERC20 is a standard extension of the base ERC20 token contract designed -@@ -12,7 +13,7 @@ /// to allow the StandardBridge contracts to mint and burn tokens. This makes it possible to - /// use an OptimismMintablERC20 as the L2 representation of an L1 token, or vice-versa. - /// Designed to be backwards compatible with the older StandardL2ERC20 token which was only - /// meant for use on L2. --contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20, ERC20, ISemver { -+contract OptimismMintableERC20 is IOptimismMintableERC20, ILegacyMintableERC20, ERC20, ISemver, FeeCurrency { - /// @notice Address of the corresponding version of this token on the remote chain. - address public immutable REMOTE_TOKEN; -
+-// AddNonVoter provides a mock function with given fields: id, addr +-func (_m *Consensus) AddNonVoter(id string, addr string) error { +- ret := _m.Called(id, addr) ++// AddNonVoter provides a mock function with given fields: id, addr, version ++func (_m *Consensus) AddNonVoter(id string, addr string, version uint64) error { ++ ret := _m.Called(id, addr, version) +  + if len(ret) == 0 { + panic("no return value specified for AddNonVoter") + } +  + var r0 error +- if rf, ok := ret.Get(0).(func(string, string) error); ok { +- r0 = rf(id, addr) ++ if rf, ok := ret.Get(0).(func(string, string, uint64) error); ok { ++ r0 = rf(id, addr, version) + } else { + r0 = ret.Error(0) + } +@@ -48,13 +48,14 @@ + // AddNonVoter is a helper method to define mock.On call + // - id string + // - addr string +-func (_e *Consensus_Expecter) AddNonVoter(id interface{}, addr interface{}) *Consensus_AddNonVoter_Call { +- return &Consensus_AddNonVoter_Call{Call: _e.mock.On("AddNonVoter", id, addr)} ++// - version uint64 ++func (_e *Consensus_Expecter) AddNonVoter(id interface{}, addr interface{}, version interface{}) *Consensus_AddNonVoter_Call { ++ return &Consensus_AddNonVoter_Call{Call: _e.mock.On("AddNonVoter", id, addr, version)} + } +  +-func (_c *Consensus_AddNonVoter_Call) Run(run func(id string, addr string)) *Consensus_AddNonVoter_Call { ++func (_c *Consensus_AddNonVoter_Call) Run(run func(id string, addr string, version uint64)) *Consensus_AddNonVoter_Call { + _c.Call.Run(func(args mock.Arguments) { +- run(args[0].(string), args[1].(string)) ++ run(args[0].(string), args[1].(string), args[2].(uint64)) + }) + return _c + } +@@ -64,22 +65,22 @@ _c.Call.Return(_a0) + return _c + } +  +-func (_c *Consensus_AddNonVoter_Call) RunAndReturn(run func(string, string) error) *Consensus_AddNonVoter_Call { ++func (_c *Consensus_AddNonVoter_Call) RunAndReturn(run func(string, string, uint64) error) *Consensus_AddNonVoter_Call { + _c.Call.Return(run) + return _c + } +  +-// AddVoter provides a mock function with given fields: id, addr +-func (_m *Consensus) AddVoter(id string, addr string) error { +- ret := _m.Called(id, addr) ++// AddVoter provides a mock function with given fields: id, addr, version ++func (_m *Consensus) AddVoter(id string, addr string, version uint64) error { ++ ret := _m.Called(id, addr, version) +  + if len(ret) == 0 { + panic("no return value specified for AddVoter") + } +  + var r0 error +- if rf, ok := ret.Get(0).(func(string, string) error); ok { +- r0 = rf(id, addr) ++ if rf, ok := ret.Get(0).(func(string, string, uint64) error); ok { ++ r0 = rf(id, addr, version) + } else { + r0 = ret.Error(0) + } +@@ -95,13 +96,14 @@ + // AddVoter is a helper method to define mock.On call + // - id string + // - addr string +-func (_e *Consensus_Expecter) AddVoter(id interface{}, addr interface{}) *Consensus_AddVoter_Call { +- return &Consensus_AddVoter_Call{Call: _e.mock.On("AddVoter", id, addr)} ++// - version uint64 ++func (_e *Consensus_Expecter) AddVoter(id interface{}, addr interface{}, version interface{}) *Consensus_AddVoter_Call { ++ return &Consensus_AddVoter_Call{Call: _e.mock.On("AddVoter", id, addr, version)} + } +  +-func (_c *Consensus_AddVoter_Call) Run(run func(id string, addr string)) *Consensus_AddVoter_Call { ++func (_c *Consensus_AddVoter_Call) Run(run func(id string, addr string, version uint64)) *Consensus_AddVoter_Call { + _c.Call.Run(func(args mock.Arguments) { +- run(args[0].(string), args[1].(string)) ++ run(args[0].(string), args[1].(string), args[2].(uint64)) + }) + return _c + } +@@ -111,29 +113,29 @@ _c.Call.Return(_a0) + return _c + } +  +-func (_c *Consensus_AddVoter_Call) RunAndReturn(run func(string, string) error) *Consensus_AddVoter_Call { ++func (_c *Consensus_AddVoter_Call) RunAndReturn(run func(string, string, uint64) error) *Consensus_AddVoter_Call { + _c.Call.Return(run) + return _c + } +  + // ClusterMembership provides a mock function with given fields: +-func (_m *Consensus) ClusterMembership() ([]*consensus.ServerInfo, error) { ++func (_m *Consensus) ClusterMembership() (*consensus.ClusterMembership, error) { + ret := _m.Called() +  + if len(ret) == 0 { + panic("no return value specified for ClusterMembership") + } +  +- var r0 []*consensus.ServerInfo ++ var r0 *consensus.ClusterMembership + var r1 error +- if rf, ok := ret.Get(0).(func() ([]*consensus.ServerInfo, error)); ok { ++ if rf, ok := ret.Get(0).(func() (*consensus.ClusterMembership, error)); ok { + return rf() + } +- if rf, ok := ret.Get(0).(func() []*consensus.ServerInfo); ok { ++ if rf, ok := ret.Get(0).(func() *consensus.ClusterMembership); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { +- r0 = ret.Get(0).([]*consensus.ServerInfo) ++ r0 = ret.Get(0).(*consensus.ClusterMembership) + } + } +  +@@ -163,12 +165,12 @@ }) + return _c + } +  +-func (_c *Consensus_ClusterMembership_Call) Return(_a0 []*consensus.ServerInfo, _a1 error) *Consensus_ClusterMembership_Call { ++func (_c *Consensus_ClusterMembership_Call) Return(_a0 *consensus.ClusterMembership, _a1 error) *Consensus_ClusterMembership_Call { + _c.Call.Return(_a0, _a1) + return _c + } +  +-func (_c *Consensus_ClusterMembership_Call) RunAndReturn(run func() ([]*consensus.ServerInfo, error)) *Consensus_ClusterMembership_Call { ++func (_c *Consensus_ClusterMembership_Call) RunAndReturn(run func() (*consensus.ClusterMembership, error)) *Consensus_ClusterMembership_Call { + _c.Call.Return(run) + return _c + } +@@ -219,17 +221,17 @@ _c.Call.Return(run) + return _c + } +  +-// DemoteVoter provides a mock function with given fields: id +-func (_m *Consensus) DemoteVoter(id string) error { +- ret := _m.Called(id) ++// DemoteVoter provides a mock function with given fields: id, version ++func (_m *Consensus) DemoteVoter(id string, version uint64) error { ++ ret := _m.Called(id, version) +  + if len(ret) == 0 { + panic("no return value specified for DemoteVoter") + } +  + var r0 error +- if rf, ok := ret.Get(0).(func(string) error); ok { +- r0 = rf(id) ++ if rf, ok := ret.Get(0).(func(string, uint64) error); ok { ++ r0 = rf(id, version) + } else { + r0 = ret.Error(0) + } +@@ -244,13 +246,14 @@ } +  + // DemoteVoter is a helper method to define mock.On call + // - id string +-func (_e *Consensus_Expecter) DemoteVoter(id interface{}) *Consensus_DemoteVoter_Call { +- return &Consensus_DemoteVoter_Call{Call: _e.mock.On("DemoteVoter", id)} ++// - version uint64 ++func (_e *Consensus_Expecter) DemoteVoter(id interface{}, version interface{}) *Consensus_DemoteVoter_Call { ++ return &Consensus_DemoteVoter_Call{Call: _e.mock.On("DemoteVoter", id, version)} + } +  +-func (_c *Consensus_DemoteVoter_Call) Run(run func(id string)) *Consensus_DemoteVoter_Call { ++func (_c *Consensus_DemoteVoter_Call) Run(run func(id string, version uint64)) *Consensus_DemoteVoter_Call { + _c.Call.Run(func(args mock.Arguments) { +- run(args[0].(string)) ++ run(args[0].(string), args[1].(uint64)) + }) + return _c + } +@@ -260,7 +263,7 @@ _c.Call.Return(_a0) + return _c + } +  +-func (_c *Consensus_DemoteVoter_Call) RunAndReturn(run func(string) error) *Consensus_DemoteVoter_Call { ++func (_c *Consensus_DemoteVoter_Call) RunAndReturn(run func(string, uint64) error) *Consensus_DemoteVoter_Call { + _c.Call.Return(run) + return _c + } +@@ -461,17 +464,17 @@ _c.Call.Return(run) + return _c + } +  +-// RemoveServer provides a mock function with given fields: id +-func (_m *Consensus) RemoveServer(id string) error { +- ret := _m.Called(id) ++// RemoveServer provides a mock function with given fields: id, version ++func (_m *Consensus) RemoveServer(id string, version uint64) error { ++ ret := _m.Called(id, version) +  + if len(ret) == 0 { + panic("no return value specified for RemoveServer") + } +  + var r0 error +- if rf, ok := ret.Get(0).(func(string) error); ok { +- r0 = rf(id) ++ if rf, ok := ret.Get(0).(func(string, uint64) error); ok { ++ r0 = rf(id, version) + } else { + r0 = ret.Error(0) + } +@@ -486,13 +489,14 @@ } +  + // RemoveServer is a helper method to define mock.On call + // - id string +-func (_e *Consensus_Expecter) RemoveServer(id interface{}) *Consensus_RemoveServer_Call { +- return &Consensus_RemoveServer_Call{Call: _e.mock.On("RemoveServer", id)} ++// - version uint64 ++func (_e *Consensus_Expecter) RemoveServer(id interface{}, version interface{}) *Consensus_RemoveServer_Call { ++ return &Consensus_RemoveServer_Call{Call: _e.mock.On("RemoveServer", id, version)} + } +  +-func (_c *Consensus_RemoveServer_Call) Run(run func(id string)) *Consensus_RemoveServer_Call { ++func (_c *Consensus_RemoveServer_Call) Run(run func(id string, version uint64)) *Consensus_RemoveServer_Call { + _c.Call.Run(func(args mock.Arguments) { +- run(args[0].(string)) ++ run(args[0].(string), args[1].(uint64)) + }) + return _c + } +@@ -502,7 +506,7 @@ _c.Call.Return(_a0) + return _c + } +  +-func (_c *Consensus_RemoveServer_Call) RunAndReturn(run func(string) error) *Consensus_RemoveServer_Call { ++func (_c *Consensus_RemoveServer_Call) RunAndReturn(run func(string, uint64) error) *Consensus_RemoveServer_Call { + _c.Call.Return(run) + return _c + }
@@ -11042,13 +44140,13 @@ @@ -11058,274 +44156,357 @@
-
+2
-
-0
+
+25
+
-22
-
diff --git OP/packages/contracts-bedrock/test/L2Genesis.t.sol CELO/packages/contracts-bedrock/test/L2Genesis.t.sol -index f851f62d634d87b5b688acc59c208b8a4c9f4c90..01451960d5bff86df1eec270067fcc2d75291cf7 100644 ---- OP/packages/contracts-bedrock/test/L2Genesis.t.sol -+++ CELO/packages/contracts-bedrock/test/L2Genesis.t.sol -@@ -181,6 +181,7 @@ - /// @notice Tests the number of accounts in the genesis setup - function _test_allocs_size(string memory _path) internal { - genesis.cfg().setFundDevAccounts(false); -+ genesis.cfg().setDeployCeloContracts(true); - genesis.runWithLatestLocal(_dummyL1Deps()); - genesis.writeGenesisAllocs(_path); +
diff --git OP/op-conductor/consensus/raft.go CELO/op-conductor/consensus/raft.go +index 2c3f79fe29469655dd4c070e0ec4929525f3d164..b80c39be06c75994912a9b47fa6194b90119141d 100644 +--- OP/op-conductor/consensus/raft.go ++++ CELO/op-conductor/consensus/raft.go +@@ -112,27 +112,36 @@ }, nil + }   -@@ -190,6 +191,7 @@ expected += 21; // predeploy implementations (excl. legacy erc20-style eth and legacy message sender) - expected += 256; // precompiles - expected += 12; // preinstalls - expected += 1; // 4788 deployer account -+ expected += 16; // Celo contracts - // 16 prefunded dev accounts are excluded - assertEq(expected, getJSONKeyCount(_path), "key count check"); -
-
- - - - -
- -
- - - - -
- -
-
-
- - -
-
- -
-
-
- - -
- -
-
-
- - -
-
- -
-
-
- - -
- -
-
-
- - -
-
- -
-
-
- - - + // AddNonVoter implements Consensus, it tries to add a non-voting member into the cluster. +-func (rc *RaftConsensus) AddNonVoter(id string, addr string) error { +- if err := rc.r.AddNonvoter(raft.ServerID(id), raft.ServerAddress(addr), 0, defaultTimeout).Error(); err != nil { +- rc.log.Error("failed to add non-voter", "id", id, "addr", addr, "err", err) ++func (rc *RaftConsensus) AddNonVoter(id string, addr string, version uint64) error { ++ if err := rc.r.AddNonvoter(raft.ServerID(id), raft.ServerAddress(addr), version, defaultTimeout).Error(); err != nil { ++ rc.log.Error("failed to add non-voter", "id", id, "addr", addr, "version", version, "err", err) + return err + } + return nil + } +  + // AddVoter implements Consensus, it tries to add a voting member into the cluster. +-func (rc *RaftConsensus) AddVoter(id string, addr string) error { +- if err := rc.r.AddVoter(raft.ServerID(id), raft.ServerAddress(addr), 0, defaultTimeout).Error(); err != nil { +- rc.log.Error("failed to add voter", "id", id, "addr", addr, "err", err) ++func (rc *RaftConsensus) AddVoter(id string, addr string, version uint64) error { ++ if err := rc.r.AddVoter(raft.ServerID(id), raft.ServerAddress(addr), version, defaultTimeout).Error(); err != nil { ++ rc.log.Error("failed to add voter", "id", id, "addr", addr, "version", version, "err", err) + return err + } + return nil + } +  + // DemoteVoter implements Consensus, it tries to demote a voting member into a non-voting member in the cluster. +-func (rc *RaftConsensus) DemoteVoter(id string) error { +- if err := rc.r.DemoteVoter(raft.ServerID(id), 0, defaultTimeout).Error(); err != nil { +- rc.log.Error("failed to demote voter", "id", id, "err", err) ++func (rc *RaftConsensus) DemoteVoter(id string, version uint64) error { ++ if err := rc.r.DemoteVoter(raft.ServerID(id), version, defaultTimeout).Error(); err != nil { ++ rc.log.Error("failed to demote voter", "id", id, "version", version, "err", err) ++ return err ++ } ++ return nil ++} ++ ++// RemoveServer implements Consensus, it tries to remove a member (both voter or non-voter) from the cluster, if leader is being removed, it will cause a new leader election. ++func (rc *RaftConsensus) RemoveServer(id string, version uint64) error { ++ if err := rc.r.RemoveServer(raft.ServerID(id), version, defaultTimeout).Error(); err != nil { ++ rc.log.Error("failed to remove voter", "id", id, "version", version, "err", err) + return err + } + return nil +@@ -156,15 +165,6 @@ + // LeaderCh implements Consensus, it returns a channel that will be notified when leadership status changes (true = leader, false = follower). + func (rc *RaftConsensus) LeaderCh() <-chan bool { + return rc.r.LeaderCh() +-} +- +-// RemoveServer implements Consensus, it tries to remove a member (both voter or non-voter) from the cluster, if leader is being removed, it will cause a new leader election. +-func (rc *RaftConsensus) RemoveServer(id string) error { +- if err := rc.r.RemoveServer(raft.ServerID(id), 0, defaultTimeout).Error(); err != nil { +- rc.log.Error("failed to remove voter", "id", id, "err", err) +- return err +- } +- return nil + } +  + // ServerID implements Consensus, it returns the server ID of the current server. +@@ -232,19 +232,22 @@ return rc.unsafeTracker.UnsafeHead(), nil + } +  + // ClusterMembership implements Consensus, it returns the current cluster membership configuration. +-func (rc *RaftConsensus) ClusterMembership() ([]*ServerInfo, error) { ++func (rc *RaftConsensus) ClusterMembership() (*ClusterMembership, error) { + var future raft.ConfigurationFuture + if future = rc.r.GetConfiguration(); future.Error() != nil { + return nil, future.Error() + } +  +- var servers []*ServerInfo ++ var servers []ServerInfo + for _, srv := range future.Configuration().Servers { +- servers = append(servers, &ServerInfo{ ++ servers = append(servers, ServerInfo{ + ID: string(srv.ID), + Addr: string(srv.Address), + Suffrage: ServerSuffrage(srv.Suffrage), + }) + } +- return servers, nil ++ return &ClusterMembership{ ++ Servers: servers, ++ Version: future.Index(), ++ }, nil + } - -
+ +
+ +
+ +
+2
+
-1
+ +
+ +
+
+
diff --git OP/op-service/oppprof/cli.go CELO/op-service/oppprof/cli.go +index 710cbeaaf7641e58f32e2021dbaac98975e5be32..d6ccb8566960cba5d4558d8effaf4e02db442885 100644 +--- OP/op-service/oppprof/cli.go ++++ CELO/op-service/oppprof/cli.go +@@ -22,6 +22,7 @@ defaultListenAddr = "0.0.0.0" + defaultListenPort = 6060 + ) +  ++var ErrInvalidPort = errors.New("invalid pprof port") + var allowedProfileTypes = []profileType{"cpu", "heap", "goroutine", "threadcreate", "block", "mutex", "allocs"} +  + type profileType string +@@ -122,7 +123,7 @@ return nil + } +  + if m.ListenPort < 0 || m.ListenPort > math.MaxUint16 { +- return errors.New("invalid pprof port") ++ return ErrInvalidPort + } +  + return nil
+
+ + +
+ + +
+
+
+ + OP
@@ -13286,34 +49196,85 @@
-
+1
+
+38
-0
-
diff --git OP/op-chain-ops/genesis/genesis.go CELO/op-chain-ops/genesis/genesis.go -index bbfb15c346496a1f19cccd5915f4e457bd3f8a2b..93953a06089cda171548e38d9538ee6ead06a2b1 100644 ---- OP/op-chain-ops/genesis/genesis.go -+++ CELO/op-chain-ops/genesis/genesis.go -@@ -68,6 +68,7 @@ CancunTime: config.EcotoneTime(block.Time()), - EcotoneTime: config.EcotoneTime(block.Time()), - FjordTime: config.FjordTime(block.Time()), - InteropTime: config.InteropTime(block.Time()), -+ Cel2Time: config.RegolithTime(block.Time()), - Optimism: &params.OptimismConfig{ - EIP1559Denominator: eip1559Denom, - EIP1559Elasticity: eip1559Elasticity,
+
diff --git OP/op-service/predeploys/addresses.go CELO/op-service/predeploys/addresses.go +index 3602bb5448d3f79114baef4506265ae87a34df7f..c55dcfbebcac462c9773cd1372d137cffc85b9cd 100644 +--- OP/op-service/predeploys/addresses.go ++++ CELO/op-service/predeploys/addresses.go +@@ -38,6 +38,17 @@ SenderCreator_v060 = "0x7fc98430eaedbb6070b35b39d798725049088348" + EntryPoint_v060 = "0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789" + SenderCreator_v070 = "0xEFC2c1444eBCC4Db75e7613d20C6a62fF67A167C" + EntryPoint_v070 = "0x0000000071727De22E5E9d8BAf0edAc6f37da032" ++ ++ // Celo ++ CeloRegistry = "0x000000000000000000000000000000000000ce10" ++ GoldToken = "0x471ece3750da237f93b8e339c536989b8978a438" ++ FeeHandler = "0xcd437749e43a154c07f3553504c68fbfd56b8778" ++ FeeCurrencyWhitelist = "0xbb024e9cdcb2f9e34d893630d19611b8a5381b3c" ++ MentoFeeHandlerSeller = "0x4efa274b7e33476c961065000d58ee09f7921a74" ++ UniswapFeeHandlerSeller = "0xd3aee28548dbb65df03981f0dc0713bfcbd10a97" ++ SortedOracles = "0xefb84935239dacdecf7c5ba76d8de40b077b7b33" ++ AddressSortedLinkedListWithMedian = "0xED477A99035d0c1e11369F1D7A4e587893cc002B" ++ FeeCurrency = "0x4200000000000000000000000000000000001022" + ) +  + var ( +@@ -76,6 +87,19 @@ EntryPoint_v070Addr = common.HexToAddress(EntryPoint_v070) +  + Predeploys = make(map[string]*Predeploy) + PredeploysByAddress = make(map[common.Address]*Predeploy) ++ ++ // Celo ++ CeloRegistryAddr = common.HexToAddress(CeloRegistry) ++ GoldTokenAddr = common.HexToAddress(GoldToken) ++ FeeHandlerAddr = common.HexToAddress(FeeHandler) ++ FeeCurrencyWhitelistAddr = common.HexToAddress(FeeCurrencyWhitelist) ++ MentoFeeHandlerSellerAddr = common.HexToAddress(MentoFeeHandlerSeller) ++ UniswapFeeHandlerSellerAddr = common.HexToAddress(UniswapFeeHandlerSeller) ++ SortedOraclesAddr = common.HexToAddress(SortedOracles) ++ AddressSortedLinkedListWithMedianAddr = common.HexToAddress(AddressSortedLinkedListWithMedian) ++ FeeCurrencyAddr = common.HexToAddress(FeeCurrency) ++ ++ CeloPredeploys = make(map[string]*Predeploy) + ) +  + func init() { +@@ -155,6 +179,20 @@ } + Predeploys["EntryPoint_v070"] = &Predeploy{ + Address: EntryPoint_v070Addr, + ProxyDisabled: true, ++ } ++ ++ // Celo ++ CeloPredeploys["CeloRegistry"] = &Predeploy{Address: CeloRegistryAddr} ++ CeloPredeploys["GoldToken"] = &Predeploy{Address: GoldTokenAddr} ++ CeloPredeploys["FeeHandler"] = &Predeploy{Address: FeeHandlerAddr} ++ CeloPredeploys["FeeCurrencyWhitelist"] = &Predeploy{Address: FeeCurrencyWhitelistAddr} ++ CeloPredeploys["MentoFeeHandlerSeller"] = &Predeploy{Address: MentoFeeHandlerSellerAddr} ++ CeloPredeploys["UniswapFeeHandlerSeller"] = &Predeploy{Address: UniswapFeeHandlerSellerAddr} ++ CeloPredeploys["SortedOracles"] = &Predeploy{Address: SortedOraclesAddr} ++ CeloPredeploys["AddressSortedLinkedListWithMedian"] = &Predeploy{Address: AddressSortedLinkedListWithMedianAddr} ++ CeloPredeploys["FeeCurrency"] = &Predeploy{Address: FeeCurrencyAddr} ++ for key, predeploy := range CeloPredeploys { ++ Predeploys[key] = predeploy + } +  + for _, predeploy := range Predeploys {
@@ -13322,13 +49283,13 @@ @@ -13339,33 +49300,57 @@
+2
-
-1
+
-9
-
diff --git OP/op-chain-ops/genesis/testdata/test-deploy-config-full.json CELO/op-chain-ops/genesis/testdata/test-deploy-config-full.json -index c0aefac625ff5b0a11560dd40ee2ed22c53a7416..09415e40bfd164a02482b916eb2ac2ca8be04479 100644 ---- OP/op-chain-ops/genesis/testdata/test-deploy-config-full.json -+++ CELO/op-chain-ops/genesis/testdata/test-deploy-config-full.json -@@ -92,5 +92,6 @@ "daCommitmentType": "KeccakCommtiment", - "daChallengeProxy": "0x0000000000000000000000000000000000000000", - "daChallengeWindow": 0, - "daResolveWindow": 0, -- "daResolverRefundPercentage": 0 -+ "daResolverRefundPercentage": 0, -+ "deployCeloContracts": false - }
+
diff --git OP/op-service/rethdb-reader/Cargo.lock CELO/op-service/rethdb-reader/Cargo.lock +index 5b4c003d5b0c01c5cdd4778209878c0a5841242b..0b66063c3372b4df7728c8e213dc6a3a5c971345 100644 +--- OP/op-service/rethdb-reader/Cargo.lock ++++ CELO/op-service/rethdb-reader/Cargo.lock +@@ -1020,16 +1020,15 @@ ] +  + [[package]] + name = "curve25519-dalek" +-version = "4.1.2" ++version = "4.1.3" + source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" ++checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" + dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", +- "platforms", + "rustc_version 0.4.0", + "subtle", + "zeroize", +@@ -2649,12 +2648,6 @@ name = "pkg-config" + version = "0.3.30" + source = "registry+https://github.com/rust-lang/crates.io-index" + checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +- +-[[package]] +-name = "platforms" +-version = "3.3.0" +-source = "registry+https://github.com/rust-lang/crates.io-index" +-checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +  + [[package]] + name = "polyval"
@@ -13374,13 +49359,13 @@ @@ -13390,112 +49375,44 @@
-
+15
-
-0
+
+3
+
-1
-
diff --git OP/op-chain-ops/justfile CELO/op-chain-ops/justfile -index 9775f535d227d4d2dacfa690fe318b2f1b78f23f..b0ec8ee270a586b46ea9383d29bf5041e46273bd 100644 ---- OP/op-chain-ops/justfile -+++ CELO/op-chain-ops/justfile -@@ -23,3 +23,18 @@ build_abi SystemConfig - #build_abi ISemver - build_abi ProxyAdmin - build_abi StorageSetter -+ -+bindings-celo-migrate: -+ #!/usr/bin/env bash -+ set -euxo pipefail -+ -+ build_abi() { -+ local lowercase=$(echo "$2" | awk '{print tolower($0)}') -+ abigen \ -+ --abi "{{abis}}/$1.json" \ -+ --pkg bindings \ -+ --out "cmd/celo-migrate/bindings/$lowercase.go" \ -+ --type $2 -+ } +
diff --git OP/op-service/rpc/cli.go CELO/op-service/rpc/cli.go +index 866dfd0336d72f1dd2f8161e4b96d234fdd318bb..99cb2dd6c9b225559b8bbbba18abf99089e2bbf6 100644 +--- OP/op-service/rpc/cli.go ++++ CELO/op-service/rpc/cli.go +@@ -14,6 +14,8 @@ PortFlagName = "rpc.port" + EnableAdminFlagName = "rpc.enable-admin" + ) +  ++var ErrInvalidPort = errors.New("invalid RPC port") + -+ build_abi GoldToken CeloToken
-
- - - - -
- -
- - - - -
- -
-
-
- - -
-
- -
-
-
- - -
-
-
-
@@ -13504,13 +49421,13 @@
- OP + (new)
@@ -13520,44 +49437,57 @@
-
+2
-
-2
+
+29
+
-0
-
diff --git OP/op-e2e/actions/l2_batcher.go CELO/op-e2e/actions/l2_batcher.go -index 310a6cade9516d26a8e55cc3273aca494eb7368b..fd249e58fe6ab2638ba2fec7b8abda9c6c796aa7 100644 ---- OP/op-e2e/actions/l2_batcher.go -+++ CELO/op-e2e/actions/l2_batcher.go -@@ -277,7 +277,7 @@ for _, opt := range txOpts { - opt(rawTx) - } -  -- gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false) -+ gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false, nil) - require.NoError(t, err, "need to compute intrinsic gas") - rawTx.Gas = gas - txData = rawTx -@@ -468,7 +468,7 @@ GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Data: outputFrame, - } -- gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false) -+ gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false, nil) - require.NoError(t, err, "need to compute intrinsic gas") - rawTx.Gas = gas -
+
diff --git OP/op-service/safego/nocopy.go CELO/op-service/safego/nocopy.go +new file mode 100644 +index 0000000000000000000000000000000000000000..1b7e6e89f9cc8520565a6f54cab602b9406fed02 +--- /dev/null ++++ CELO/op-service/safego/nocopy.go +@@ -0,0 +1,29 @@ ++package safego ++ ++// NoCopy is a super simple safety util taken from the Go atomic lib. ++// ++// NoCopy may be added to structs which must not be copied ++// after the first use. ++// ++// The NoCopy struct is empty, so should be a zero-cost util at runtime. ++// ++// See https://golang.org/issues/8005#issuecomment-190753527 ++// for details. ++// ++// Note that it must not be embedded, due to the Lock and Unlock methods. ++// ++// Like: ++// ``` ++// ++// type Example { ++// V uint64 ++// _ NoCopy ++// } ++// ++// Then run: `go vet -copylocks .` ++// ``` ++type NoCopy struct{} ++ ++// Lock is a no-op used by -copylocks checker from `go vet`. ++func (*NoCopy) Lock() {} ++func (*NoCopy) Unlock() {}
@@ -13566,13 +49496,13 @@ @@ -13582,35 +49512,49 @@
-
+1
-
-1
+
+5
+
-2
-
diff --git OP/op-e2e/actions/l2_batcher_test.go CELO/op-e2e/actions/l2_batcher_test.go -index 3a137ce992af6458cc10688ec0a2b8fdfaf697b2..071785076538788662f8c5244eb2ec665c700704 100644 ---- OP/op-e2e/actions/l2_batcher_test.go -+++ CELO/op-e2e/actions/l2_batcher_test.go -@@ -496,7 +496,7 @@ signer := types.LatestSigner(sd.L2Cfg.Config) - data := make([]byte, 120_000) // very large L2 txs, as large as the tx-pool will accept - _, err := rng.Read(data[:]) // fill with random bytes, to make compression ineffective - require.NoError(t, err) -- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) -+ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) - require.NoError(t, err) - if gas > engine.engineApi.RemainingBlockGas() { - break
+
diff --git OP/op-service/sources/rollupclient.go CELO/op-service/sources/rollupclient.go +index 96cd166732a9686e7f875f451b6510a29513d67a..14c38d35b4e8e5f96161edd73e7163f3d0d6b479 100644 +--- OP/op-service/sources/rollupclient.go ++++ CELO/op-service/sources/rollupclient.go +@@ -3,10 +3,9 @@ + import ( + "context" +  +- "golang.org/x/exp/slog" +- + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ++ "golang.org/x/exp/slog" +  + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/client" +@@ -69,6 +68,10 @@ } +  + func (r *RollupClient) PostUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { + return r.rpc.CallContext(ctx, nil, "admin_postUnsafePayload", payload) ++} ++ ++func (r *RollupClient) OverrideLeader(ctx context.Context) error { ++ return r.rpc.CallContext(ctx, nil, "admin_overrideLeader") + } +  + func (r *RollupClient) SetLogLevel(ctx context.Context, lvl slog.Level) error {
@@ -13619,13 +49563,13 @@ @@ -13635,44 +49579,33 @@
-
+2
-
-2
+
+3
+
-0
-
diff --git OP/op-e2e/actions/span_batch_test.go CELO/op-e2e/actions/span_batch_test.go -index 6ccb76a461bd6d9eab10aaf4ac2a3187f8239dbb..39dd3f817a7aef9febeb32da30d217f7eb898a34 100644 ---- OP/op-e2e/actions/span_batch_test.go -+++ CELO/op-e2e/actions/span_batch_test.go -@@ -524,7 +524,7 @@ signer := types.LatestSigner(sd.L2Cfg.Config) - data := make([]byte, rand.Intn(100)) - _, err := crand.Read(data[:]) // fill with random bytes - require.NoError(t, err) -- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) -+ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) - require.NoError(t, err) - baseFee := seqEngine.l2Chain.CurrentBlock().BaseFee - nonce, err := cl.PendingNonceAt(t.Ctx(), addrs[userIdx]) -@@ -663,7 +663,7 @@ signer := types.LatestSigner(sdDeltaActivated.L2Cfg.Config) - data := make([]byte, rand.Intn(100)) - _, err := crand.Read(data[:]) // fill with random bytes - require.NoError(t, err) -- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) -+ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) - require.NoError(t, err) - baseFee := seqEngine.l2Chain.CurrentBlock().BaseFee - nonce, err := seqEngCl.PendingNonceAt(t.Ctx(), addrs[userIdx])
+
diff --git OP/op-service/testutils/metrics.go CELO/op-service/testutils/metrics.go +index 79e575cab1e246181d3c492a6ceb687e315dd08d..19e2baf85f25df00e55333c28b0fe75bfcde4cc7 100644 +--- OP/op-service/testutils/metrics.go ++++ CELO/op-service/testutils/metrics.go +@@ -69,3 +69,6 @@ + func (n *TestRPCMetrics) RecordRPCClientResponse(method string, err error) {} +  + func (t *TestDerivationMetrics) SetDerivationIdle(idle bool) {} ++ ++func (t *TestDerivationMetrics) RecordPipelineReset() { ++}
@@ -13681,13 +49614,13 @@
- OP + (new)
@@ -13697,35 +49630,69 @@
-
+1
-
-1
+
+41
+
-0
-
diff --git OP/op-e2e/actions/sync_test.go CELO/op-e2e/actions/sync_test.go -index e7521bdd8c94b0b800ca469b661074d03385f112..19e9f3098439b1dece9c6338702ee892261b00d1 100644 ---- OP/op-e2e/actions/sync_test.go -+++ CELO/op-e2e/actions/sync_test.go -@@ -896,7 +896,7 @@ // Create valid TX - aliceNonce, err := seqEng.EthClient().PendingNonceAt(t.Ctx(), dp.Addresses.Alice) - require.NoError(t, err) - data := make([]byte, rand.Intn(100)) -- gas, err := core.IntrinsicGas(data, nil, false, true, true, false) -+ gas, err := core.IntrinsicGas(data, nil, false, true, true, false, nil) - require.NoError(t, err) - baseFee := seqEng.l2Chain.CurrentBlock().BaseFee - tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{
+
diff --git OP/op-service/testutils/mock_emitter.go CELO/op-service/testutils/mock_emitter.go +new file mode 100644 +index 0000000000000000000000000000000000000000..e9b502dfee38e8783574fb8e8089fa956ef0c49d +--- /dev/null ++++ CELO/op-service/testutils/mock_emitter.go +@@ -0,0 +1,41 @@ ++package testutils ++ ++import ( ++ "github.com/stretchr/testify/mock" ++ ++ "github.com/ethereum-optimism/optimism/op-node/rollup" ++) ++ ++type MockEmitter struct { ++ mock.Mock ++} ++ ++func (m *MockEmitter) Emit(ev rollup.Event) { ++ m.Mock.MethodCalled("Emit", ev) ++} ++ ++func (m *MockEmitter) ExpectOnce(expected rollup.Event) { ++ m.Mock.On("Emit", expected).Once() ++} ++ ++func (m *MockEmitter) ExpectMaybeRun(fn func(ev rollup.Event)) { ++ m.Mock.On("Emit", mock.Anything).Maybe().Run(func(args mock.Arguments) { ++ fn(args.Get(0).(rollup.Event)) ++ }) ++} ++ ++func (m *MockEmitter) ExpectOnceType(typ string) { ++ m.Mock.On("Emit", mock.AnythingOfType(typ)).Once() ++} ++ ++func (m *MockEmitter) ExpectOnceRun(fn func(ev rollup.Event)) { ++ m.Mock.On("Emit", mock.Anything).Once().Run(func(args mock.Arguments) { ++ fn(args.Get(0).(rollup.Event)) ++ }) ++} ++ ++func (m *MockEmitter) AssertExpectations(t mock.TestingT) { ++ m.Mock.AssertExpectations(t) ++} ++ ++var _ rollup.EventEmitter = (*MockEmitter)(nil)
@@ -13734,13 +49701,13 @@ @@ -13750,35 +49717,44 @@
-
+1
-
-1
+
+2
+
-2
-
diff --git OP/op-e2e/brotli_batcher_test.go CELO/op-e2e/brotli_batcher_test.go -index 97211c471ba05df73b58aa7ea504b30dd3e11b0b..1ebb27efb5ad775b24cf3f566827e6e158df1121 100644 ---- OP/op-e2e/brotli_batcher_test.go -+++ CELO/op-e2e/brotli_batcher_test.go -@@ -85,7 +85,7 @@ receipt := SendL2Tx(t, cfg, l2Seq, ethPrivKey, func(opts *TxOpts) { - opts.Value = big.NewInt(1_000_000_000) - opts.Nonce = 1 // Already have deposit - opts.ToAddr = &common.Address{0xff, 0xff} -- opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) -+ opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false, nil) - require.NoError(t, err) - opts.VerifyOnClients(l2Verif) - })
+
diff --git OP/op-service/txmgr/send_state.go CELO/op-service/txmgr/send_state.go +index 4066a0caa1499386659f7e0530158292a39d4828..6053d3f3c5e39c80cd7a05fe3d9543714dc863fa 100644 +--- OP/op-service/txmgr/send_state.go ++++ CELO/op-service/txmgr/send_state.go +@@ -16,7 +16,7 @@ // declaration once op-geth is updated to this version. + ErrAlreadyReserved = errors.New("address already reserved") +  + // Returned by CriticalError when the system is unable to get the tx into the mempool in the +- // alloted time ++ // allotted time + ErrMempoolDeadlineExpired = errors.New("failed to get tx into the mempool") + ) +  +@@ -125,7 +125,7 @@ case s.nonceTooLowCount >= s.safeAbortNonceTooLowCount: + // we have exceeded the nonce too low count + return core.ErrNonceTooLow + case s.successFullPublishCount == 0 && s.now().After(s.txInMempoolDeadline): +- // unable to get the tx into the mempool in the alloted time ++ // unable to get the tx into the mempool in the allotted time + return ErrMempoolDeadlineExpired + case s.alreadyReserved: + // incompatible tx type in mempool
@@ -13787,13 +49763,13 @@
- (new) + OP
@@ -13803,84 +49779,43 @@
-
+56
-
-0
+
+2
+
-1
-
diff --git OP/op-e2e/celo/run_all_tests.sh CELO/op-e2e/celo/run_all_tests.sh -new file mode 100755 -index 0000000000000000000000000000000000000000..272dea975368579a77069d98e2f9fa97261eaf72 ---- /dev/null -+++ CELO/op-e2e/celo/run_all_tests.sh -@@ -0,0 +1,56 @@ -+#!/bin/bash -+set -eo pipefail -+ -+SCRIPT_DIR=$(readlink -f "$(dirname "$0")") -+TEST_GLOB=$1 -+cd "$SCRIPT_DIR" || exit 1 -+source "$SCRIPT_DIR/shared.sh" -+ -+## Start geth -+cd "$SCRIPT_DIR/../.." || exit 1 -+trap 'cd "$SCRIPT_DIR/../.." && make devnet-down' EXIT # kill bg job at exit -+make devnet-up -+ -+# Wait for geth to be ready -+for _ in {1..10} -+do -+ if cast block &> /dev/null -+ then -+ break -+ fi -+ sleep 0.2 -+done -+ -+## Run tests -+echo Geth ready, start tests -+failures=0 -+tests=0 -+cd "$SCRIPT_DIR" || exit 1 -+for f in test_*"$TEST_GLOB"* -+do -+ echo -e "\nRun $f" -+ if "./$f" -+ then -+ tput setaf 2 || true -+ echo "PASS $f" -+ else -+ tput setaf 1 || true -+ echo "FAIL $f ❌" -+ ((failures++)) || true -+ fi -+ tput sgr0 || true -+ ((tests++)) || true -+done -+ -+## Final summary -+echo -+if [[ $failures -eq 0 ]] -+then -+ tput setaf 2 || true -+ echo All tests succeeded! -+else -+ tput setaf 1 || true -+ echo $failures/$tests failed. -+fi -+tput sgr0 || true -+exit $failures
+
diff --git OP/op-service/txmgr/txmgr.go CELO/op-service/txmgr/txmgr.go +index 015d6edc767f34abfa620a358185a1f92d58c416..2e5a87d073da35dc7b2657077b0a4a0c5ee2da02 100644 +--- OP/op-service/txmgr/txmgr.go ++++ CELO/op-service/txmgr/txmgr.go +@@ -10,6 +10,7 @@ "sync" + "sync/atomic" + "time" +  ++ "github.com/ethereum-optimism/optimism/op-service/errutil" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" +@@ -272,7 +273,7 @@ Data: candidate.TxData, + Value: candidate.Value, + }) + if err != nil { +- return nil, fmt.Errorf("failed to estimate gas: %w", err) ++ return nil, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) + } + gasLimit = gas + }
@@ -13895,7 +49830,7 @@
@@ -13905,39 +49840,29 @@
-
+11
+
+1
-0
-
diff --git OP/op-e2e/celo/shared.sh CELO/op-e2e/celo/shared.sh +
diff --git OP/op-supervisor/.gitignore CELO/op-supervisor/.gitignore new file mode 100644 -index 0000000000000000000000000000000000000000..5c09d3c03a4dfb41f197c8f369258963bd4d3519 +index 0000000000000000000000000000000000000000..ba077a4031add5b3a04384f8b9cfc414efbf47dd --- /dev/null -+++ CELO/op-e2e/celo/shared.sh -@@ -0,0 +1,11 @@ -+#!/bin/bash -+#shellcheck disable=SC2034 # unused vars make sense in a shared file -+ -+export ETH_RPC_URL=http://127.0.0.1:9545 -+export ETH_RPC_URL_L1=http://127.0.0.1:8545 -+ -+export ACC_PRIVKEY=ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 -+export ACC_ADDR=$(cast wallet address $ACC_PRIVKEY) -+export REGISTRY_ADDR=0x000000000000000000000000000000000000ce10 -+export TOKEN_ADDR=0x471ece3750da237f93b8e339c536989b8978a438 -+export FEE_CURRENCY_DIRECTORY_ADDR=0x71FFbD48E34bdD5a87c3c683E866dc63b8B2a685
++++ CELO/op-supervisor/.gitignore +@@ -0,0 +1 @@ ++bin
@@ -13952,7 +49877,7 @@
@@ -13962,40 +49887,52 @@
-
+12
+
+24
-0
-
diff --git OP/op-e2e/celo/test_token_duality.sh CELO/op-e2e/celo/test_token_duality.sh -new file mode 100755 -index 0000000000000000000000000000000000000000..355afef8c7ca71f39454d1e452c60d2046b6ebf8 +
diff --git OP/op-supervisor/Makefile CELO/op-supervisor/Makefile +new file mode 100644 +index 0000000000000000000000000000000000000000..1f58b6f02384365cfa7429c8355c6da984e9cae8 --- /dev/null -+++ CELO/op-e2e/celo/test_token_duality.sh -@@ -0,0 +1,12 @@ -+#!/bin/bash -+#shellcheck disable=SC2086 -+set -eo pipefail ++++ CELO/op-supervisor/Makefile +@@ -0,0 +1,24 @@ ++GITCOMMIT ?= $(shell git rev-parse HEAD) ++GITDATE ?= $(shell git show -s --format='%ct') ++VERSION ?= v0.0.0 + -+source shared.sh ++LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) ++LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) ++LDFLAGSSTRING +=-X main.Version=$(VERSION) ++LDFLAGSSTRING +=-X main.Meta=$(VERSION_META) ++LDFLAGS := -ldflags "$(LDFLAGSSTRING)" + -+# Send token and check balance -+balance_before=$(cast balance 0x000000000000000000000000000000000000dEaD) -+cast send --private-key $ACC_PRIVKEY $TOKEN_ADDR 'transfer(address to, uint256 value) returns (bool)' 0x000000000000000000000000000000000000dEaD 100 -+balance_after=$(cast balance 0x000000000000000000000000000000000000dEaD) -+echo "Balance change: $balance_before -> $balance_after" -+[[ $((balance_before + 100)) -eq $balance_after ]] || (echo "Balance did not change as expected"; exit 1)
++ ++op-supervisor: ++ env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/op-supervisor ./cmd ++ ++clean: ++ rm bin/op-supervisor ++ ++test: ++ go test -v ./... ++ ++.PHONY: \ ++ op-supervisor \ ++ clean \ ++ test
@@ -14010,7 +49947,7 @@
@@ -14020,70 +49957,85 @@
-
+42
+
+57
-0
-
diff --git OP/op-e2e/celo/test_weth_bridge.sh CELO/op-e2e/celo/test_weth_bridge.sh -new file mode 100755 -index 0000000000000000000000000000000000000000..a25195d416b313be687deef5033a6d019b9d0dee +
diff --git OP/op-supervisor/cmd/main.go CELO/op-supervisor/cmd/main.go +new file mode 100644 +index 0000000000000000000000000000000000000000..01444e01b92578b3579c1cbefae10c4a93d5c399 --- /dev/null -+++ CELO/op-e2e/celo/test_weth_bridge.sh -@@ -0,0 +1,42 @@ -+#!/bin/bash -+#shellcheck disable=SC2086 -+set -eo pipefail -+set -x ++++ CELO/op-supervisor/cmd/main.go +@@ -0,0 +1,57 @@ ++package main + -+source shared.sh -+SCRIPT_DIR=$(readlink -f "$(dirname "$0")") -+CONTRACTS_DIR=$SCRIPT_DIR/../../packages/contracts-bedrock ++import ( ++ "context" ++ "os" + -+# Deploy WETH -+L1_WETH=$( -+ ETH_RPC_URL=$ETH_RPC_URL_L1 forge create --private-key=$ACC_PRIVKEY --root $CONTRACTS_DIR $CONTRACTS_DIR/src/dispute/weth/WETH98.sol:WETH98 --json | jq .deployedTo -r -+) ++ "github.com/ethereum-optimism/optimism/op-supervisor/config" ++ "github.com/urfave/cli/v2" + -+# create ERC20 token on L2 -+L2_TOKEN=$( -+ cast send --private-key $ACC_PRIVKEY 0x4200000000000000000000000000000000000012 "createOptimismMintableERC20(address,string,string)" $L1_WETH "Wrapped Ether" "WETH" --json \ -+ | jq -r '.logs[0].topics[2]' | cast parse-bytes32-address -+) ++ "github.com/ethereum/go-ethereum/log" + -+# Wrap some ETH -+ETH_RPC_URL=$ETH_RPC_URL_L1 cast send --private-key $ACC_PRIVKEY $L1_WETH --value 1ether -+# Approve transfer to bridge -+L1_BRIDGE_ADDR=$(cast call 0x4200000000000000000000000000000000000010 'otherBridge() returns (address)') -+ETH_RPC_URL=$ETH_RPC_URL_L1 cast send --private-key $ACC_PRIVKEY $L1_WETH 'approve(address, uint256) returns (bool)' $L1_BRIDGE_ADDR 1ether -+# Bridge to L2 -+ETH_RPC_URL=$ETH_RPC_URL_L1 cast send --private-key $ACC_PRIVKEY $L1_BRIDGE_ADDR 'bridgeERC20(address _localToken, address _remoteToken, uint256 _amount, uint32 _minGasLimit, bytes calldata _extraData)' $L1_WETH $L2_TOKEN 0.3ether 50000 0x --gas-limit 6000000 ++ opservice "github.com/ethereum-optimism/optimism/op-service" ++ "github.com/ethereum-optimism/optimism/op-service/cliapp" ++ oplog "github.com/ethereum-optimism/optimism/op-service/log" ++ "github.com/ethereum-optimism/optimism/op-service/metrics/doc" ++ "github.com/ethereum-optimism/optimism/op-service/opio" ++ "github.com/ethereum-optimism/optimism/op-supervisor/flags" ++ "github.com/ethereum-optimism/optimism/op-supervisor/metrics" ++ "github.com/ethereum-optimism/optimism/op-supervisor/supervisor" ++) + -+# Setup up oracle and FeeCurrencyDirectory -+ORACLE=$(forge create --private-key=$ACC_PRIVKEY --root $CONTRACTS_DIR $CONTRACTS_DIR/src/celo/testing/MockSortedOracles.sol:MockSortedOracles --json | jq .deployedTo -r) -+cast send --private-key $ACC_PRIVKEY $ORACLE 'setMedianRate(address, uint256)' $L2_TOKEN 100000000000000000 -+cast send --private-key $ACC_PRIVKEY $FEE_CURRENCY_DIRECTORY_ADDR 'setCurrencyConfig(address, address, uint256)' $L2_TOKEN $ORACLE 60000 ++var ( ++ Version = "v0.0.1" ++ GitCommit = "" ++ GitDate = "" ++) + -+# Check balance from bridging (we intentionally don't do this right after bridging, since it takes a bit) -+L2_BALANCE=$(cast call $L2_TOKEN 'balanceOf(address) returns (uint256)' $ACC_ADDR) -+echo L2 balance: $L2_BALANCE -+[[ $(echo $L2_BALANCE | awk '{print $1}') -gt 0 ]] || (echo "Bridging to L2 failed!"; exit 1) ++func main() { ++ ctx := opio.WithInterruptBlocker(context.Background()) ++ err := run(ctx, os.Args, fromConfig) ++ if err != nil { ++ log.Crit("Application failed", "message", err) ++ } ++} + -+# Send fee currency tx! -+#TXHASH=$(~/op-geth/e2e_test/js-tests/send_tx.mjs 901 $ACC_PRIVKEY $L2_TOKEN) -+#cast receipt $TXHASH -+echo You can use privkey $ACC_PRIVKEY to pay for txs with $L2_TOKEN, now.
++func run(ctx context.Context, args []string, fn supervisor.MainFn) error { ++ oplog.SetupDefaults() ++ ++ app := cli.NewApp() ++ app.Flags = cliapp.ProtectFlags(flags.Flags) ++ app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "") ++ app.Name = "op-supervisor" ++ app.Usage = "op-supervisor monitors cross-L2 interop messaging" ++ app.Description = "The op-supervisor monitors cross-L2 interop messaging by pre-fetching events and then resolving the cross-L2 dependencies to answer safety queries." ++ app.Action = cliapp.LifecycleCmd(supervisor.Main(app.Version, fn)) ++ app.Commands = []*cli.Command{ ++ { ++ Name: "doc", ++ Subcommands: doc.NewSubcommands(metrics.NewMetrics("default")), ++ }, ++ } ++ return app.RunContext(ctx, args) ++} ++ ++func fromConfig(ctx context.Context, cfg *config.Config, logger log.Logger) (cliapp.Lifecycle, error) { ++ return supervisor.SupervisorFromConfig(ctx, cfg, logger) ++}
@@ -14092,13 +50044,13 @@
- OP + (new)
@@ -14108,35 +50060,158 @@
-
+1
-
-1
+
+130
+
-0
-
diff --git OP/op-e2e/eip4844_test.go CELO/op-e2e/eip4844_test.go -index 5b5cc1d5308edf3d0f2fa927da4838c169128225..c6f481ab67fe7709a7ee9c17c2157134111f81bf 100644 ---- OP/op-e2e/eip4844_test.go -+++ CELO/op-e2e/eip4844_test.go -@@ -102,7 +102,7 @@ opts.Nonce = 1 // Already have deposit - opts.ToAddr = &common.Address{0xff, 0xff} - // put some random data in the tx to make it fill up 6 blobs (multi-blob case) - opts.Data = testutils.RandomData(rand.New(rand.NewSource(420)), 400) -- opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) -+ opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false, nil) - require.NoError(t, err) - opts.VerifyOnClients(l2Verif) - })
+
diff --git OP/op-supervisor/cmd/main_test.go CELO/op-supervisor/cmd/main_test.go +new file mode 100644 +index 0000000000000000000000000000000000000000..6a463a81275aaef7f6644b80f8b42766c87a845f +--- /dev/null ++++ CELO/op-supervisor/cmd/main_test.go +@@ -0,0 +1,130 @@ ++package main ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "testing" ++ ++ "github.com/ethereum-optimism/optimism/op-supervisor/config" ++ "github.com/stretchr/testify/require" ++ ++ "github.com/ethereum/go-ethereum/log" ++ ++ "github.com/ethereum-optimism/optimism/op-service/cliapp" ++) ++ ++var ( ++ ValidL2RPCs = []string{"http;//localhost:8545"} ++ ValidDatadir = "./supervisor_test_datadir" ++) ++ ++func TestLogLevel(t *testing.T) { ++ t.Run("RejectInvalid", func(t *testing.T) { ++ verifyArgsInvalid(t, "unknown level: foo", addRequiredArgs("--log.level=foo")) ++ }) ++ ++ for _, lvl := range []string{"trace", "debug", "info", "error", "crit"} { ++ lvl := lvl ++ t.Run("AcceptValid_"+lvl, func(t *testing.T) { ++ logger, _, err := dryRunWithArgs(addRequiredArgs("--log.level", lvl)) ++ require.NoError(t, err) ++ require.NotNil(t, logger) ++ }) ++ } ++} ++ ++func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { ++ cfg := configForArgs(t, addRequiredArgs()) ++ defaultCfgTempl := config.NewConfig(ValidL2RPCs, ValidDatadir) ++ defaultCfg := *defaultCfgTempl ++ defaultCfg.Version = Version ++ require.Equal(t, defaultCfg, *cfg) ++} ++ ++func TestL2RPCs(t *testing.T) { ++ t.Run("Required", func(t *testing.T) { ++ verifyArgsInvalid(t, "flag l2-rpcs is required", addRequiredArgsExcept("--l2-rpcs")) ++ }) ++ ++ t.Run("Valid", func(t *testing.T) { ++ url1 := "http://example.com:1234" ++ url2 := "http://foobar.com:1234" ++ cfg := configForArgs(t, addRequiredArgsExcept("--l2-rpcs", "--l2-rpcs="+url1+","+url2)) ++ require.Equal(t, []string{url1, url2}, cfg.L2RPCs) ++ }) ++} ++ ++func TestDatadir(t *testing.T) { ++ t.Run("Required", func(t *testing.T) { ++ verifyArgsInvalid(t, "flag datadir is required", addRequiredArgsExcept("--datadir")) ++ }) ++ ++ t.Run("Valid", func(t *testing.T) { ++ dir := "foo" ++ cfg := configForArgs(t, addRequiredArgsExcept("--datadir", "--datadir", dir)) ++ require.Equal(t, dir, cfg.Datadir) ++ }) ++} ++ ++func TestMockRun(t *testing.T) { ++ t.Run("Valid", func(t *testing.T) { ++ cfg := configForArgs(t, addRequiredArgs("--mock-run")) ++ require.Equal(t, true, cfg.MockRun) ++ }) ++} ++ ++func verifyArgsInvalid(t *testing.T, messageContains string, cliArgs []string) { ++ _, _, err := dryRunWithArgs(cliArgs) ++ require.ErrorContains(t, err, messageContains) ++} ++ ++func configForArgs(t *testing.T, cliArgs []string) *config.Config { ++ _, cfg, err := dryRunWithArgs(cliArgs) ++ require.NoError(t, err) ++ return cfg ++} ++ ++func dryRunWithArgs(cliArgs []string) (log.Logger, *config.Config, error) { ++ cfg := new(config.Config) ++ var logger log.Logger ++ fullArgs := append([]string{"op-supervisor"}, cliArgs...) ++ testErr := errors.New("dry-run") ++ err := run(context.Background(), fullArgs, func(ctx context.Context, config *config.Config, log log.Logger) (cliapp.Lifecycle, error) { ++ logger = log ++ cfg = config ++ return nil, testErr ++ }) ++ if errors.Is(err, testErr) { // expected error ++ err = nil ++ } ++ return logger, cfg, err ++} ++ ++func addRequiredArgs(args ...string) []string { ++ req := requiredArgs() ++ combined := toArgList(req) ++ return append(combined, args...) ++} ++ ++func addRequiredArgsExcept(name string, optionalArgs ...string) []string { ++ req := requiredArgs() ++ delete(req, name) ++ return append(toArgList(req), optionalArgs...) ++} ++ ++func toArgList(req map[string]string) []string { ++ var combined []string ++ for name, value := range req { ++ combined = append(combined, fmt.Sprintf("%s=%s", name, value)) ++ } ++ return combined ++} ++ ++func requiredArgs() map[string]string { ++ args := map[string]string{ ++ "--l2-rpcs": ValidL2RPCs[0], ++ "--datadir": ValidDatadir, ++ } ++ return args ++}
@@ -14145,13 +50220,13 @@
- OP + (new)
@@ -14161,34 +50236,86 @@
-
+1
+
+58
-0
-
diff --git OP/op-e2e/setup.go CELO/op-e2e/setup.go -index 2b255879180c15fbf4f70f898903d03eae68a23a..78120858c516c40ba48d44752e9e0ac6a12f9a27 100644 ---- OP/op-e2e/setup.go -+++ CELO/op-e2e/setup.go -@@ -539,6 +539,7 @@ DeltaTime: cfg.DeployConfig.DeltaTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), - EcotoneTime: cfg.DeployConfig.EcotoneTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), - FjordTime: cfg.DeployConfig.FjordTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), - InteropTime: cfg.DeployConfig.InteropTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), -+ Cel2Time: cfg.DeployConfig.RegolithTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), - ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy, - } - }
+
diff --git OP/op-supervisor/config/config.go CELO/op-supervisor/config/config.go +new file mode 100644 +index 0000000000000000000000000000000000000000..dbf723d55d3b4e1033258a1280d817ec50d71863 +--- /dev/null ++++ CELO/op-supervisor/config/config.go +@@ -0,0 +1,58 @@ ++package config ++ ++import ( ++ "errors" ++ ++ oplog "github.com/ethereum-optimism/optimism/op-service/log" ++ opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" ++ "github.com/ethereum-optimism/optimism/op-service/oppprof" ++ oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" ++) ++ ++var ( ++ ErrMissingL2RPC = errors.New("must specify at least one L2 RPC") ++ ErrMissingDatadir = errors.New("must specify datadir") ++) ++ ++type Config struct { ++ Version string ++ ++ LogConfig oplog.CLIConfig ++ MetricsConfig opmetrics.CLIConfig ++ PprofConfig oppprof.CLIConfig ++ RPC oprpc.CLIConfig ++ ++ // MockRun runs the service with a mock backend ++ MockRun bool ++ ++ L2RPCs []string ++ Datadir string ++} ++ ++func (c *Config) Check() error { ++ var result error ++ result = errors.Join(result, c.MetricsConfig.Check()) ++ result = errors.Join(result, c.PprofConfig.Check()) ++ result = errors.Join(result, c.RPC.Check()) ++ if len(c.L2RPCs) == 0 { ++ result = errors.Join(result, ErrMissingL2RPC) ++ } ++ if c.Datadir == "" { ++ result = errors.Join(result, ErrMissingDatadir) ++ } ++ return result ++} ++ ++// NewConfig creates a new config using default values whenever possible. ++// Required options with no suitable default are passed as parameters. ++func NewConfig(l2RPCs []string, datadir string) *Config { ++ return &Config{ ++ LogConfig: oplog.DefaultCLIConfig(), ++ MetricsConfig: opmetrics.DefaultCLIConfig(), ++ PprofConfig: oppprof.DefaultCLIConfig(), ++ RPC: oprpc.DefaultCLIConfig(), ++ MockRun: false, ++ L2RPCs: l2RPCs, ++ Datadir: datadir, ++ } ++}
@@ -14197,13 +50324,13 @@
- OP + (new)
@@ -14213,152 +50340,80 @@
-
+10
-
-2
+
+52
+
-0
-
diff --git OP/op-e2e/system_test.go CELO/op-e2e/system_test.go -index 4dcea8bd96c368c7565a60c500245594c630b25d..b1b2b857f98eec8a52842a6025bf47915ac94deb 100644 ---- OP/op-e2e/system_test.go -+++ CELO/op-e2e/system_test.go -@@ -1280,8 +1280,16 @@ require.Nil(t, err, "reading gpo decimals") -  - require.Equal(t, decimals.Uint64(), uint64(6), "wrong gpo decimals") -  -+ // Celo changes the base fee recipient -+ var baseFeeRecipient common.Address -+ if sys.RollupConfig.Cel2Time == nil { -+ baseFeeRecipient = predeploys.BaseFeeVaultAddr -+ } else { -+ baseFeeRecipient = predeploys.FeeHandlerAddr -+ } +
diff --git OP/op-supervisor/config/config_test.go CELO/op-supervisor/config/config_test.go +new file mode 100644 +index 0000000000000000000000000000000000000000..ef84e4be81bd7d18993a59fadbbe14fd55b23918 +--- /dev/null ++++ CELO/op-supervisor/config/config_test.go +@@ -0,0 +1,52 @@ ++package config + - // BaseFee Recipient -- baseFeeRecipientStartBalance, err := l2Seq.BalanceAt(context.Background(), predeploys.BaseFeeVaultAddr, big.NewInt(rpc.EarliestBlockNumber.Int64())) -+ baseFeeRecipientStartBalance, err := l2Seq.BalanceAt(context.Background(), baseFeeRecipient, big.NewInt(rpc.EarliestBlockNumber.Int64())) - require.Nil(t, err) -  - // L1Fee Recipient -@@ -1324,7 +1332,7 @@ - endBalance, err := l2Seq.BalanceAt(context.Background(), fromAddr, header.Number) - require.Nil(t, err) -  -- baseFeeRecipientEndBalance, err := l2Seq.BalanceAt(context.Background(), predeploys.BaseFeeVaultAddr, header.Number) -+ baseFeeRecipientEndBalance, err := l2Seq.BalanceAt(context.Background(), baseFeeRecipient, header.Number) - require.Nil(t, err) -  - l1Header, err := l1.HeaderByNumber(context.Background(), nil)
-
- - - - -
- -
- - - - -
- -
-
-
- - -
-
- -
-
-
- - -
- -
-
-
- - -
-
- -
-
-
- - -
-
-
-
@@ -14367,13 +50422,13 @@
- OP + (new)
@@ -14383,237 +50438,1127 @@
-
+2
+
+83
-0
-
diff --git OP/op-node/rollup/types.go CELO/op-node/rollup/types.go -index 816181687567a074ad8aeb4666fa6e4c1b82d372..8a42d37ebeb98452b43d470cfb3ce501ceb6be87 100644 ---- OP/op-node/rollup/types.go -+++ CELO/op-node/rollup/types.go -@@ -92,6 +92,7 @@ // a pre-mainnet Bedrock change that addresses findings of the Sherlock contest related to deposit attributes. - // "Regolith" is the loose deposited rock that sits on top of Bedrock. - // Active if RegolithTime != nil && L2 block timestamp >= *RegolithTime, inactive otherwise. - RegolithTime *uint64 `json:"regolith_time,omitempty"` -+ Cel2Time *uint64 `json:"cel2_time,omitempty"` -  - // CanyonTime sets the activation time of the Canyon network upgrade. - // Active if CanyonTime != nil && L2 block timestamp >= *CanyonTime, inactive otherwise. -@@ -599,6 +600,7 @@ "ecotone_time", fmtForkTimeOrUnset(c.EcotoneTime), - "fjord_time", fmtForkTimeOrUnset(c.FjordTime), - "interop_time", fmtForkTimeOrUnset(c.InteropTime), - "plasma_mode", c.PlasmaConfig != nil, -+ "cel2_time", fmtForkTimeOrUnset(c.Cel2Time), - ) - } -
-
- - - -
-
- -
+
diff --git OP/op-supervisor/flags/flags.go CELO/op-supervisor/flags/flags.go +new file mode 100644 +index 0000000000000000000000000000000000000000..1759381694acb2da176f63ecda219404dff1a934 +--- /dev/null ++++ CELO/op-supervisor/flags/flags.go +@@ -0,0 +1,83 @@ ++package flags ++ ++import ( ++ "fmt" ++ ++ "github.com/ethereum-optimism/optimism/op-supervisor/config" ++ "github.com/urfave/cli/v2" ++ ++ opservice "github.com/ethereum-optimism/optimism/op-service" ++ oplog "github.com/ethereum-optimism/optimism/op-service/log" ++ opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" ++ "github.com/ethereum-optimism/optimism/op-service/oppprof" ++ oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" ++) ++ ++const EnvVarPrefix = "OP_SUPERVISOR" ++ ++func prefixEnvVars(name string) []string { ++ return opservice.PrefixEnvVar(EnvVarPrefix, name) ++} ++ ++var ( ++ L2RPCsFlag = &cli.StringSliceFlag{ ++ Name: "l2-rpcs", ++ Usage: "L2 RPC sources.", ++ EnvVars: prefixEnvVars("L2_RPCS"), ++ } ++ DataDirFlag = &cli.PathFlag{ ++ Name: "datadir", ++ Usage: "Directory to store data generated as part of responding to games", ++ EnvVars: prefixEnvVars("DATADIR"), ++ } ++ MockRunFlag = &cli.BoolFlag{ ++ Name: "mock-run", ++ Usage: "Mock run, no actual backend used, just presenting the service", ++ EnvVars: prefixEnvVars("MOCK_RUN"), ++ Hidden: true, // this is for testing only ++ } ++) ++ ++var requiredFlags = []cli.Flag{ ++ L2RPCsFlag, ++ DataDirFlag, ++} ++ ++var optionalFlags = []cli.Flag{ ++ MockRunFlag, ++} ++ ++func init() { ++ optionalFlags = append(optionalFlags, oprpc.CLIFlags(EnvVarPrefix)...) ++ optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) ++ optionalFlags = append(optionalFlags, opmetrics.CLIFlags(EnvVarPrefix)...) ++ optionalFlags = append(optionalFlags, oppprof.CLIFlags(EnvVarPrefix)...) ++ ++ Flags = append(Flags, requiredFlags...) ++ Flags = append(Flags, optionalFlags...) ++} ++ ++// Flags contains the list of configuration options available to the binary. ++var Flags []cli.Flag ++ ++func CheckRequired(ctx *cli.Context) error { ++ for _, f := range requiredFlags { ++ if !ctx.IsSet(f.Names()[0]) { ++ return fmt.Errorf("flag %s is required", f.Names()[0]) ++ } ++ } ++ return nil ++} ++ ++func ConfigFromCLI(ctx *cli.Context, version string) *config.Config { ++ return &config.Config{ ++ Version: version, ++ LogConfig: oplog.ReadCLIConfig(ctx), ++ MetricsConfig: opmetrics.ReadCLIConfig(ctx), ++ PprofConfig: oppprof.ReadCLIConfig(ctx), ++ RPC: oprpc.ReadCLIConfig(ctx), ++ MockRun: ctx.Bool(MockRunFlag.Name), ++ L2RPCs: ctx.StringSlice(L2RPCsFlag.Name), ++ Datadir: ctx.Path(DataDirFlag.Name), ++ } ++}
- -