diff --git a/.github/workflows/arm_deb_packager.yml b/.github/workflows/arm_deb_packager.yml new file mode 100644 index 00000000..64d451c6 --- /dev/null +++ b/.github/workflows/arm_deb_packager.yml @@ -0,0 +1,89 @@ +name: arm_deb_packager + + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: + labels: arm-runner-2204 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + # Variables + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + - name: adding version + run: | + NUMERIC_VERSION=$( echo ${{ env.GIT_TAG }} | sed 's/[^0-9.]//g' ) + echo "VERSION=$NUMERIC_VERSION" >> $GITHUB_ENV + + - name: go mod download + run: go mod download + + - name: Build the binary + run: make build + + - name: Build the rust binary + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: making directory structure + run: mkdir -p packaging/deb/cdk/usr/bin/ + - name: copying necessary binary for arm64 + run: cp -rp target/cdk-node packaging/deb/cdk/usr/bin/cdk-node + - name: copying rust binary for arm64 + run: cp -rp target/release/cdk packaging/deb/cdk/usr/bin/cdk + + # Control file creation + - name: Create control file + run: | + echo "Package: cdk" >> packaging/deb/cdk/DEBIAN/control + echo "Version: ${{ env.VERSION }}" >> packaging/deb/cdk/DEBIAN/control + echo "Section: base" >> packaging/deb/cdk/DEBIAN/control + echo "Priority: optional" >> packaging/deb/cdk/DEBIAN/control + echo "Architecture: arm64" >> packaging/deb/cdk/DEBIAN/control + echo "Maintainer: devops@polygon.technology" >> packaging/deb/cdk/DEBIAN/control + echo "Description: cdk binary package" >> packaging/deb/cdk/DEBIAN/control + + - name: Creating package for binary for cdk ${{ env.ARCH }} + run: cp -rp packaging/deb/cdk packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: arm64 + + - name: create checksum for the arm64 package + run: cd packaging/deb/ && sha256sum cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb > cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb.checksum + env: + ARCH: arm64 + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/deb/cdk**.deb + packaging/deb/cdk**.deb.checksum diff --git a/.github/workflows/arm_rpm_packager.yml b/.github/workflows/arm_rpm_packager.yml new file mode 100644 index 00000000..614b80f2 --- /dev/null +++ b/.github/workflows/arm_rpm_packager.yml @@ -0,0 +1,103 @@ +name: arm_rpm_packager + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: + labels: arm-runner-2204 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + + - name: Adding a TAG.1 to the env + run: echo "GIT_TAG1=`echo $(git describe --tags --abbrev=0)`" | sed 's/-/./g' >> $GITHUB_ENV + + - name: Download deps for project + run: go mod download + + - name: Building cdk-node for amd64 + run: make build + + - name: Building the cdk + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: Installing some dependencies + run: sudo apt-get update && sudo apt-get install -y rpm + + - name: Setup rpm package for binary + run: | + mkdir -p packaging/rpm/SPECS + mkdir -p packaging/rpm/BUILD + mkdir -p packaging/rpm/RPMS + mkdir -p packaging/rpm/SRPMS + + touch packaging/rpm/cdk.spec + echo "Name: cdk" >> packaging/rpm/SPECS/cdk.spec + echo "Version: ${{ env.GIT_TAG1 }}" >> packaging/rpm/SPECS/cdk.spec + echo "Release: 1%{?dist}" >> packaging/rpm/SPECS/cdk.spec + echo "License: GPL/AGPL" >> packaging/rpm/SPECS/cdk.spec + echo "BuildArch: aarch64" >> packaging/rpm/SPECS/cdk.spec + echo "Summary: cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%description" >> packaging/rpm/SPECS/cdk.spec + echo "cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%pre" >> packaging/rpm/SPECS/cdk.spec + echo "getent group cdk >/dev/null || groupadd -r cdk" >> packaging/rpm/SPECS/cdk.spec + echo "getent passwd cdk >/dev/null || useradd -s /bin/false -d /opt/cdk -r cdk -g cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%install" >> packaging/rpm/SPECS/cdk.spec + echo "mkdir -p %{buildroot}/usr/bin" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/cdk-node %{buildroot}/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/release/cdk %{buildroot}/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%files" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + + + - name: Construct rpm package + run: | + rpmbuild --define "_topdir /home/runner/work/cdk/cdk/packaging/rpm_build" \ + --define "_builddir %{_topdir}/BUILD" \ + --define "_rpmdir %{_topdir}/RPMS" \ + --define "_srcrpmdir %{_topdir}/SRPMS" \ + --define "__spec_install_post /bin/true" \ + -bb packaging/rpm/SPECS/cdk.spec + + - name: Rename file for post rpm build and for checksum + run: mv /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}-1.aarch64.rpm /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}.aarch64.rpm + + - name: Checksum for the rpm package + run: sha256sum /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}.aarch64.rpm > /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/aarch64/cdk-${{ env.GIT_TAG1 }}.aarch64.rpm.checksum + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/rpm_build/RPMS/aarch64/cdk-**.rpm + packaging/rpm_build/RPMS/aarch64/cdk-**.rpm.checksum diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index bdad36b2..77255d39 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -12,11 +12,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Install Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version: 1.21.x - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b22f8710..f388cdd0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,10 +1,10 @@ name: Release on: - push: - tags: - # run only against tags that follow semver (https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string) - - 'v[0-9]+.[0-9]+.[0-9]+*' + push: + tags: + - 'v*.*.*' + - 'v*.*.*-*' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -33,9 +33,6 @@ jobs: TAGS: ${{ steps.meta.outputs.tags }} VERSION: ${{ steps.meta.outputs.version }} steps: - - name: validate tag - run: echo ${{ github.ref_name }} | grep -qE '^v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$' - - name: Prepare run: | platform=${{ matrix.platform }} diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index b20bb982..787d2301 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -13,7 +13,11 @@ jobs: matrix: go-version: [ 1.22.x ] goarch: [ "amd64" ] - e2e-group: [ "elderberry-validium", "elderberry-rollup" ] + e2e-group: + - "fork9-validium" + - "fork11-rollup" + - "fork12-validium" + - "fork12-rollup" runs-on: ubuntu-latest steps: - name: Checkout code @@ -35,7 +39,7 @@ jobs: run: | echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list sudo apt update - sudo apt install kurtosis-cli=0.90.1 + sudo apt install kurtosis-cli=1.3.0 kurtosis version - name: Disable kurtosis analytics @@ -48,6 +52,16 @@ jobs: pip3 install yq yq --version + - name: Install polycli + run: | + POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" + tmp_dir=$(mktemp -d) + curl -L "https://github.com/0xPolygon/polygon-cli/releases/download/${POLYCLI_VERSION}/polycli_${POLYCLI_VERSION}_linux_amd64.tar.gz" | tar -xz -C "$tmp_dir" + mv "$tmp_dir"/* /usr/local/bin/polycli + rm -rf "$tmp_dir" + sudo chmod +x /usr/local/bin/polycli + /usr/local/bin/polycli version + - name: Install foundry uses: foundry-rs/foundry-toolchain@v1 @@ -56,6 +70,7 @@ jobs: with: repository: 0xPolygon/kurtosis-cdk path: "kurtosis-cdk" + ref: "v0.2.14" - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 9ac51af0..71ebc7d7 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -3,7 +3,7 @@ on: push: branches: # Disable test for the moment as it takes too long - - "this-test-is-disabled" + - "test_disabled" concurrency: @@ -34,7 +34,7 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: 3debe0a4dd000e02f7e6bde3247432211bf0336f + ref: a7a80b7b5d98a69a23415ab0018e556257a6dfb6 path: kurtosis-cdk - name: Install Kurtosis CDK tools @@ -51,7 +51,11 @@ jobs: - name: Install polycli run: | - tmp_dir=$(mktemp -d) && curl -L https://github.com/0xPolygon/polygon-cli/releases/download/v0.1.48/polycli_v0.1.48_linux_amd64.tar.gz | tar -xz -C "$tmp_dir" && mv "$tmp_dir"/* /usr/local/bin/polycli && rm -rf "$tmp_dir" + POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" + tmp_dir=$(mktemp -d) + curl -L "https://github.com/0xPolygon/polygon-cli/releases/download/${POLYCLI_VERSION}/polycli_${POLYCLI_VERSION}_linux_amd64.tar.gz" | tar -xz -C "$tmp_dir" + mv "$tmp_dir"/* /usr/local/bin/polycli + rm -rf "$tmp_dir" sudo chmod +x /usr/local/bin/polycli /usr/local/bin/polycli version @@ -68,8 +72,11 @@ jobs: - name: Configure Kurtosis CDK working-directory: ./kurtosis-cdk run: | - /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "jerrycgh/cdk-erigon:d5d04906f723f3f1d8c43c9e6baf3e18c27ff348"' params.yml /usr/local/bin/yq -i '.args.cdk_node_image = "cdk:local"' params.yml + /usr/local/bin/yq -i '.args.zkevm_rollup_fork_id = "12"' params.yml + /usr/local/bin/yq -i '.args.zkevm_prover_image = "hermeznetwork/zkevm-prover:v8.0.0-RC5-fork.12"' params.yml + /usr/local/bin/yq -i '.args.cdk_erigon_node_image = "jerrycgh/cdk-erigon:d5d04906f723f3f1d8c43c9e6baf3e18c27ff348"' params.yml + - name: Deploy Kurtosis CDK package working-directory: ./kurtosis-cdk diff --git a/.github/workflows/x86_deb_packager.yml b/.github/workflows/x86_deb_packager.yml new file mode 100644 index 00000000..2568861a --- /dev/null +++ b/.github/workflows/x86_deb_packager.yml @@ -0,0 +1,89 @@ +name: x86_deb_packager + + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + # Variables + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + - name: adding version + run: | + NUMERIC_VERSION=$( echo ${{ env.GIT_TAG }} | sed 's/[^0-9.]//g' ) + echo "VERSION=$NUMERIC_VERSION" >> $GITHUB_ENV + + - name: go mod download + run: go mod download + + - name: Build the binary + run: make build + + - name: Build the rust binary + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: making directory structure + run: mkdir -p packaging/deb/cdk/usr/bin/ + - name: copying necessary binary for amd64 + run: cp -rp target/cdk-node packaging/deb/cdk/usr/bin/cdk-node + - name: copying rust binary for amd64 + run: cp -rp target/release/cdk packaging/deb/cdk/usr/bin/cdk + + # Control file creation + - name: Create control file + run: | + echo "Package: cdk" >> packaging/deb/cdk/DEBIAN/control + echo "Version: ${{ env.VERSION }}" >> packaging/deb/cdk/DEBIAN/control + echo "Section: base" >> packaging/deb/cdk/DEBIAN/control + echo "Priority: optional" >> packaging/deb/cdk/DEBIAN/control + echo "Architecture: amd64" >> packaging/deb/cdk/DEBIAN/control + echo "Maintainer: devops@polygon.technology" >> packaging/deb/cdk/DEBIAN/control + echo "Description: cdk binary package" >> packaging/deb/cdk/DEBIAN/control + + - name: Creating package for binary for cdk ${{ env.ARCH }} + run: cp -rp packaging/deb/cdk packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + + - name: Running package build + run: dpkg-deb --build --root-owner-group packaging/deb/cdk-${{ env.GIT_TAG }}-${{ env.ARCH }} + env: + ARCH: amd64 + + - name: Create checksum for the amd64 package + run: cd packaging/deb/ && sha256sum cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb > cdk-${{ env.GIT_TAG }}-${{ env.ARCH }}.deb.checksum + env: + ARCH: amd64 + + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/deb/cdk**.deb + packaging/deb/cdk**.deb.checksum diff --git a/.github/workflows/x86_rpm_packager.yml b/.github/workflows/x86_rpm_packager.yml new file mode 100644 index 00000000..9f06fb64 --- /dev/null +++ b/.github/workflows/x86_rpm_packager.yml @@ -0,0 +1,102 @@ +name: x86_rpm_packager + +on: + push: + branches: + - 'main' + paths: + - '**' + tags: + - 'v*.*.*' + - 'v*.*.*-*' + +jobs: + build: + permissions: + id-token: write + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Go + uses: actions/setup-go@master + with: + go-version: 1.22.x + - name: Adding TAG to ENV + run: echo "GIT_TAG=`echo $(git describe --tags --abbrev=0)`" >> $GITHUB_ENV + + - name: Adding a TAG.1 to the env + run: echo "GIT_TAG1=`echo $(git describe --tags --abbrev=0)`" | sed 's/-/./g' >> $GITHUB_ENV + + - name: Download deps for project + run: go mod download + + - name: Building cdk-node for amd64 + run: make build + + - name: Building the cdk + run: | + BUILD_SCRIPT_DISABLED=1 + cargo build --release --bin cdk + + - name: Installing some dependencies + run: sudo apt-get update && sudo apt-get install -y rpm + + - name: Setup rpm package for binary + run: | + mkdir -p packaging/rpm/SPECS + mkdir -p packaging/rpm/BUILD + mkdir -p packaging/rpm/RPMS + mkdir -p packaging/rpm/SRPMS + + touch packaging/rpm/cdk.spec + echo "Name: cdk" >> packaging/rpm/SPECS/cdk.spec + echo "Version: ${{ env.GIT_TAG1 }}" >> packaging/rpm/SPECS/cdk.spec + echo "Release: 1%{?dist}" >> packaging/rpm/SPECS/cdk.spec + echo "License: GPL/AGPL" >> packaging/rpm/SPECS/cdk.spec + echo "BuildArch: x86_64" >> packaging/rpm/SPECS/cdk.spec + echo "Summary: cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%description" >> packaging/rpm/SPECS/cdk.spec + echo "cdk rpm package" >> packaging/rpm/SPECS/cdk.spec + + echo "%pre" >> packaging/rpm/SPECS/cdk.spec + echo "getent group cdk >/dev/null || groupadd -r cdk" >> packaging/rpm/SPECS/cdk.spec + echo "getent passwd cdk >/dev/null || useradd -s /bin/false -d /opt/cdk -r cdk -g cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%install" >> packaging/rpm/SPECS/cdk.spec + echo "mkdir -p %{buildroot}/usr/bin" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/cdk-node %{buildroot}/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + echo "cp /home/runner/work/cdk/cdk/target/release/cdk %{buildroot}/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + + echo "%files" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk" >> packaging/rpm/SPECS/cdk.spec + echo "/usr/bin/cdk-node" >> packaging/rpm/SPECS/cdk.spec + + + - name: Construct rpm package + run: | + rpmbuild --define "_topdir /home/runner/work/cdk/cdk/packaging/rpm_build" \ + --define "_builddir %{_topdir}/BUILD" \ + --define "_rpmdir %{_topdir}/RPMS" \ + --define "_srcrpmdir %{_topdir}/SRPMS" \ + --define "__spec_install_post /bin/true" \ + -bb packaging/rpm/SPECS/cdk.spec + + - name: Rename file for post rpm build and for checksum + run: mv /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}-1.x86_64.rpm /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}.x86_64.rpm + + - name: Checksum for the rpm package + run: sha256sum /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}.x86_64.rpm > /home/runner/work/cdk/cdk/packaging/rpm_build/RPMS/x86_64/cdk-${{ env.GIT_TAG1 }}.x86_64.rpm.checksum + + - name: Release cdk Packages + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ env.GIT_TAG }} + prerelease: true + files: | + packaging/rpm_build/RPMS/x86_64/cdk-**.rpm + packaging/rpm_build/RPMS/x86_64/cdk-**.rpm.checksum diff --git a/.gitignore b/.gitignore index 958ed6ff..ce4e0058 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,10 @@ target/ book/ index.html tmp -.vscode \ No newline at end of file +.vscode +coverage.out +coverage.html +.idea +.idea/* + +data diff --git a/Cargo.lock b/Cargo.lock index 3044ff93..b9956840 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -38,6 +38,18 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -47,6 +59,171 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloy-json-rpc" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 1.0.0", + "hashbrown 0.14.5", + "hex-literal", + "indexmap 2.6.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +dependencies = [ + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest 0.12.8", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap 2.6.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.68", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +dependencies = [ + "const-hex", + "dunce", + "heck", + "proc-macro2", + "quote", + "syn 2.0.68", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-transport" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower 0.5.1", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-http" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.8", + "serde_json", + "tower 0.5.1", + "tracing", + "url", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -117,6 +294,130 @@ version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "arrayvec" version = "0.7.4" @@ -151,7 +452,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -228,15 +529,6 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" -[[package]] -name = "beef" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" -dependencies = [ - "serde", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -369,7 +661,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -390,14 +682,24 @@ dependencies = [ name = "cdk" version = "0.1.0" dependencies = [ + "alloy-json-rpc", + "alloy-rpc-client", + "alloy-transport-http", "anyhow", "cdk-config", "clap", + "colored", "dotenvy", "execute", + "reqwest 0.12.8", + "serde", + "serde_json", + "tempfile", + "tokio", "toml", "tracing", "tracing-subscriber", + "url", ] [[package]] @@ -439,7 +741,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -500,7 +802,7 @@ checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" dependencies = [ "bs58", "coins-core", - "digest", + "digest 0.10.7", "hmac", "k256", "serde", @@ -533,7 +835,7 @@ dependencies = [ "base64 0.21.7", "bech32", "bs58", - "digest", + "digest 0.10.7", "generic-array 0.14.7", "hex", "ripemd", @@ -550,6 +852,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +[[package]] +name = "colored" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +dependencies = [ + "lazy_static", + "windows-sys 0.48.0", +] + [[package]] name = "combine" version = "4.6.7" @@ -751,6 +1063,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_more" version = "0.99.18" @@ -762,6 +1085,36 @@ dependencies = [ "syn 2.0.68", ] +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "digest" version = "0.10.7" @@ -835,7 +1188,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der", - "digest", + "digest 0.10.7", "elliptic-curve", "rfc6979", "signature", @@ -856,7 +1209,7 @@ checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct", "crypto-bigint", - "digest", + "digest 0.10.7", "ff", "generic-array 0.14.7", "group", @@ -927,7 +1280,7 @@ checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" dependencies = [ "aes", "ctr", - "digest", + "digest 0.10.7", "hex", "hmac", "pbkdf2 0.11.0", @@ -1052,7 +1405,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "syn 2.0.68", @@ -1114,8 +1467,8 @@ checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649" dependencies = [ "chrono", "ethers-core", - "reqwest", - "semver", + "reqwest 0.11.27", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -1139,7 +1492,7 @@ dependencies = [ "futures-locks", "futures-util", "instant", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", @@ -1171,7 +1524,7 @@ dependencies = [ "jsonwebtoken", "once_cell", "pin-project", - "reqwest", + "reqwest 0.11.27", "serde", "serde_json", "thiserror", @@ -1224,7 +1577,7 @@ dependencies = [ "path-slash", "rayon", "regex", - "semver", + "semver 1.0.23", "serde", "serde_json", "solang-parser", @@ -1290,6 +1643,17 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.13.0" @@ -1334,6 +1698,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1468,6 +1847,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "fxhash" version = "0.2.1" @@ -1522,15 +1907,15 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-net" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43aaa242d1239a8822c15c645f02166398da4f8b5c4bae795c1f5b44e9eee173" +checksum = "c06f627b1a58ca3d42b45d6104bf1e1a03799df472df00988b6ba21accc10580" dependencies = [ "futures-channel", "futures-core", "futures-sink", "gloo-utils", - "http 0.2.12", + "http 1.1.0", "js-sys", "pin-project", "serde", @@ -1589,7 +1974,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1608,7 +1993,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.6", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1626,6 +2011,16 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "serde", +] + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" [[package]] name = "hashers" @@ -1653,6 +2048,15 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hmac" @@ -1660,7 +2064,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest", + "digest 0.10.7", ] [[package]] @@ -1817,6 +2221,22 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.6" @@ -1832,7 +2252,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -1933,12 +2353,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -1972,6 +2392,15 @@ version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.11.0" @@ -2027,9 +2456,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" +checksum = "126b48a5acc3c52fbd5381a77898cb60e145123179588a29e7ac48f9c06e401b" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -2045,9 +2474,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08163edd8bcc466c33d79e10f695cdc98c00d1e6ddfb95cec41b6b0279dd5432" +checksum = "bf679a8e0e083c77997f7c4bb4ca826577105906027ae462aac70ff348d02c6a" dependencies = [ "base64 0.22.1", "futures-channel", @@ -2070,13 +2499,11 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79712302e737d23ca0daa178e752c9334846b08321d439fd89af9a384f8c830b" +checksum = "b0e503369a76e195b65af35058add0e6900b794a4e9a9316900ddd3a87a80477" dependencies = [ - "anyhow", "async-trait", - "beef", "bytes", "futures-timer", "futures-util", @@ -2099,9 +2526,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" +checksum = "f2c0caba4a6a8efbafeec9baa986aa22a75a96c29d3e4b0091b0098d6470efb5" dependencies = [ "async-trait", "base64 0.22.1", @@ -2117,16 +2544,16 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" +checksum = "fc660a9389e2748e794a40673a4155d501f32db667757cdb80edeff0306b489b" dependencies = [ "heck", "proc-macro-crate", @@ -2137,11 +2564,10 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "654afab2e92e5d88ebd8a39d6074483f3f2bfdf91c5ac57fe285e7127cdd4f51" +checksum = "af6e6c9b6d975edcb443565d648b605f3e85a04ec63aa6941811a8894cc9cded" dependencies = [ - "anyhow", "futures-util", "http 1.1.0", "http-body 1.0.0", @@ -2159,17 +2585,16 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower", + "tower 0.4.13", "tracing", ] [[package]] name = "jsonrpsee-types" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9c465fbe385238e861fdc4d1c85e04ada6c1fd246161d26385c1b311724d2af" +checksum = "d8fb16314327cbc94fdf7965ef7e4422509cd5597f76d137bd104eb34aeede67" dependencies = [ - "beef", "http 1.1.0", "serde", "serde_json", @@ -2178,9 +2603,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" +checksum = "e0da62b43702bd5640ea305d35df95da30abc878e79a7b4b01feda3beaf35d3c" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -2189,9 +2614,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.23.2" +version = "0.24.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" +checksum = "39aabf5d6c6f22da8d5b808eea1fab0736059f11fb42f71f141b14f404e5046a" dependencies = [ "http 1.1.0", "jsonrpsee-client-transport", @@ -2237,6 +2662,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "lalrpop" version = "0.20.2" @@ -2246,7 +2681,7 @@ dependencies = [ "ascii-canvas", "bit-set", "ena", - "itertools", + "itertools 0.11.0", "lalrpop-util", "petgraph", "regex", @@ -2333,7 +2768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ "cfg-if", - "digest", + "digest 0.10.7", ] [[package]] @@ -2359,13 +2794,31 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", ] [[package]] @@ -2490,12 +2943,50 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "option-ext" version = "0.2.0" @@ -2554,7 +3045,7 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2568,6 +3059,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "path-slash" version = "0.2.1" @@ -2580,7 +3077,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest", + "digest 0.10.7", "hmac", "password-hash", "sha2", @@ -2592,7 +3089,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ - "digest", + "digest 0.10.7", "hmac", ] @@ -2611,6 +3108,17 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -2618,7 +3126,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.2.6", + "indexmap 2.6.0", ] [[package]] @@ -2628,7 +3136,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version", + "rustc_version 0.4.0", ] [[package]] @@ -2781,6 +3289,28 @@ dependencies = [ "toml_edit 0.21.1", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -2796,6 +3326,8 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ + "bit-set", + "bit-vec", "bitflags 2.6.0", "lazy_static", "num-traits", @@ -2803,9 +3335,17 @@ dependencies = [ "rand_chacha", "rand_xorshift", "regex-syntax 0.8.4", + "rusty-fork", + "tempfile", "unarray", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.36" @@ -2830,6 +3370,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core", + "serde", ] [[package]] @@ -2972,8 +3513,8 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", - "system-configuration", + "sync_wrapper 0.1.2", + "system-configuration 0.5.1", "tokio", "tokio-rustls 0.24.1", "tower-service", @@ -2985,6 +3526,49 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +dependencies = [ + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2 0.4.5", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls 0.27.2", + "hyper-tls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile 2.1.2", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "system-configuration 0.6.1", + "tokio", + "tokio-native-tls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "windows-registry", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -3031,7 +3615,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" dependencies = [ - "digest", + "digest 0.10.7", ] [[package]] @@ -3062,6 +3646,36 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -3070,9 +3684,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc-hex" @@ -3080,13 +3694,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver", + "semver 1.0.23", ] [[package]] @@ -3221,6 +3844,18 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" version = "1.0.18" @@ -3252,7 +3887,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "cfg-if", - "derive_more", + "derive_more 0.99.18", "parity-scale-codec", "scale-info-derive", ] @@ -3344,6 +3979,15 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.23" @@ -3353,6 +3997,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -3367,18 +4020,18 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -3387,20 +4040,21 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -3419,15 +4073,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.3" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e73139bc5ec2d45e6c5fd85be5a46949c1c39a4c18e56915f5eb4c12f975e377" +checksum = "9720086b3357bcb44fce40117d769a4d068c70ecfa190850a980a71755f66fcc" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -3437,9 +4091,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.3" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d3d6b56b64335c0180e5ffde23b3c5e08c14c585b51a15bd0e95393f46703" +checksum = "5f1abbfe725f27678f4663bcacb75a83e829fd464c25d78dd038a3a29e307cec" dependencies = [ "darling", "proc-macro2", @@ -3455,7 +4109,7 @@ checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -3466,7 +4120,7 @@ checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest", + "digest 0.10.7", ] [[package]] @@ -3475,10 +4129,20 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest", + "digest 0.10.7", "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -3494,7 +4158,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "digest", + "digest 0.10.7", "rand_core", ] @@ -3563,7 +4227,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" dependencies = [ - "itertools", + "itertools 0.11.0", "lalrpop", "lalrpop-util", "phf", @@ -3656,8 +4320,8 @@ dependencies = [ "fs2", "hex", "once_cell", - "reqwest", - "semver", + "reqwest 0.11.27", + "semver 1.0.23", "serde", "serde_json", "sha2", @@ -3688,12 +4352,33 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -3702,7 +4387,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -3715,6 +4411,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -3723,14 +4429,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3746,18 +4453,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2675633b1499176c2dff06b0856a27976a8f9d436737b4cf4f312d4d91d8bbb" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20468752b09f49e909e55a5d338caa8bedf615594e9d80bc4c565d30faf798c" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -3831,32 +4538,41 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "pin-project-lite", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", "syn 2.0.68", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -3921,21 +4637,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.14" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.15", + "toml_edit 0.22.22", ] [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] @@ -3946,22 +4662,22 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.15" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59a3a72298453f564e2b111fa896f8d07fabb36f51f06d7e875fc5e0b5a3ef1" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow 0.6.20", ] [[package]] @@ -3980,17 +4696,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -4121,6 +4851,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + [[package]] name = "uint" version = "0.9.5" @@ -4218,12 +4954,27 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -4377,7 +5128,37 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -4395,7 +5176,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -4415,18 +5205,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4437,9 +5227,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4449,9 +5239,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4461,15 +5251,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4479,9 +5269,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4491,9 +5281,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4503,9 +5293,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4515,9 +5305,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -4530,9 +5320,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -4558,7 +5348,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version", + "rustc_version 0.4.0", "send_wrapper 0.6.0", "thiserror", "wasm-bindgen", @@ -4581,11 +5371,45 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "zeroize" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] [[package]] name = "zip" diff --git a/Cargo.toml b/Cargo.toml index 0bb41677..debf9da1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,15 +8,15 @@ version = "0.1.0" edition = "2021" [workspace.dependencies] -serde = { version = "1.0.197", features = ["derive"] } -serde_json = "1.0.116" -serde_with = "3.7.0" -thiserror = "1.0.58" -toml = "0.8.12" +serde = { version = "1.0.210", features = ["derive"] } +serde_json = "1.0.128" +serde_with = "3.10.0" +thiserror = "1.0.64" +toml = "0.8.19" tracing = "0.1.40" tracing-appender = "0.2.3" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } url = { version = "2.5.2", features = ["serde"] } ethers = "2.0.14" -jsonrpsee = { version = "0.23.2", features = ["full"] } +jsonrpsee = { version = "0.24.5", features = ["full"] } diff --git a/Dockerfile b/Dockerfile index f2970447..ac5e759b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,11 +9,12 @@ RUN go mod download # BUILD BINARY COPY . . -RUN make build +RUN make build-go # BUILD RUST BIN -FROM --platform=${BUILDPLATFORM} rust:slim-bullseye AS chef +FROM --platform=${BUILDPLATFORM} rust:slim-bookworm AS chef USER root +RUN apt-get update && apt-get install -y openssl pkg-config libssl-dev RUN cargo install cargo-chef WORKDIR /app @@ -41,7 +42,7 @@ RUN cargo build --release --bin cdk # CONTAINER FOR RUNNING BINARY FROM --platform=${BUILDPLATFORM} debian:bookworm-slim -RUN apt-get update && apt-get install -y ca-certificates postgresql-client +RUN apt-get update && apt-get install -y ca-certificates postgresql-client libssl-dev && rm -rf /var/lib/apt/lists/* COPY --from=builder /app/target/release/cdk /usr/local/bin/ COPY --from=build /go/src/github.com/0xPolygon/cdk/target/cdk-node /usr/local/bin/ diff --git a/Makefile b/Makefile index c653ec1e..2adb0c40 100644 --- a/Makefile +++ b/Makefile @@ -56,7 +56,14 @@ install-linter: check-go check-curl generate-code-from-proto: check-protoc .PHONY: build -build: ## Builds the binary locally into ./dist +build: build-rust build-go ## Builds the binaries locally into ./target + +.PHONY: build-rust +build-rust: + export BUILD_SCRIPT_DISABLED=1 && cargo build --release + +.PHONY: build-go +build-go: $(GOENVVARS) go build -ldflags "all=$(LDFLAGS)" -o $(GOBIN)/$(GOBINARY) $(GOCMD) .PHONY: build-docker diff --git a/README.md b/README.md index 23a4e4e8..9c9480bd 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,30 @@ Setup Kurtosis following this instructions https://github.com/0xPolygon/kurtosis - You can run locally against kurtosis-cdk environment using: [docs/local_debug.md](docs/local_debug.md) +### Build locally + +You can locally build a production release of CDK CLI + cdk-node with: + +``` +make build +``` + +### Run locally + +You can build and run a debug release locally using: + +``` +cargo run +``` + +It will build and run both binaries. +### Running with Kurtosis + +1. Run your kurtosis environment +2. build `cdk-erigon` and make it available in your system's PATH +3. Run `scripts/local_config` +4. cargo run -- --config ./tmp/cdk/local_config/test.kurtosis.toml --chain ./tmp/cdk/local_config/genesis.json erigon + ## Contributing Contributions are very welcomed, the guidelines are currently not available (WIP) diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go index ee02771e..3659eb3f 100644 --- a/aggoracle/chaingersender/evm.go +++ b/aggoracle/chaingersender/evm.go @@ -9,7 +9,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot" cfgTypes "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -26,12 +27,11 @@ type EthClienter interface { type EthTxManager interface { Remove(ctx context.Context, id common.Hash) error ResultsByStatus(ctx context.Context, - statuses []ethtxmanager.MonitoredTxStatus, - ) ([]ethtxmanager.MonitoredTxResult, error) - Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) + statuses []ethtxtypes.MonitoredTxStatus, + ) ([]ethtxtypes.MonitoredTxResult, error) + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) Add(ctx context.Context, to *common.Address, - forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, @@ -103,7 +103,7 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com if err != nil { return err } - id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), data, c.gasOffset, nil) + id, err := c.ethTxMan.Add(ctx, &c.gerAddr, big.NewInt(0), data, c.gasOffset, nil) if err != nil { return err } @@ -115,14 +115,14 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com c.logger.Error("error calling ethTxMan.Result: ", err) } switch res.Status { - case ethtxmanager.MonitoredTxStatusCreated, - ethtxmanager.MonitoredTxStatusSent: + case ethtxtypes.MonitoredTxStatusCreated, + ethtxtypes.MonitoredTxStatusSent: continue - case ethtxmanager.MonitoredTxStatusFailed: + case ethtxtypes.MonitoredTxStatusFailed: return fmt.Errorf("tx %s failed", res.ID) - case ethtxmanager.MonitoredTxStatusMined, - ethtxmanager.MonitoredTxStatusSafe, - ethtxmanager.MonitoredTxStatusFinalized: + case ethtxtypes.MonitoredTxStatusMined, + ethtxtypes.MonitoredTxStatusSafe, + ethtxtypes.MonitoredTxStatusFinalized: return nil default: c.logger.Error("unexpected tx status: ", res.Status) diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go index 25a8a96d..b1506032 100644 --- a/aggoracle/e2e_test.go +++ b/aggoracle/e2e_test.go @@ -8,7 +8,7 @@ import ( gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/test/helpers" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient/simulated" @@ -16,7 +16,7 @@ import ( ) func TestEVM(t *testing.T) { - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) runTest(t, env.GERL1Contract, env.AggOracleSender, env.L1Client, env.AuthL1) } diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 1ba94d7a..874f7ada 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -6,6 +6,7 @@ import ( "math/big" "time" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" @@ -70,7 +71,7 @@ func (a *AggOracle) Start(ctx context.Context) { case errors.Is(err, l1infotreesync.ErrBlockNotProcessed): a.logger.Debugf("syncer is not ready for the block %d", blockNumToFetch) - case errors.Is(err, l1infotreesync.ErrNotFound): + case errors.Is(err, db.ErrNotFound): blockNumToFetch = 0 a.logger.Debugf("syncer has not found any GER until block %d", blockNumToFetch) diff --git a/aggregator/agglayer_client.go b/aggregator/agglayer/agglayer_client.go similarity index 87% rename from aggregator/agglayer_client.go rename to aggregator/agglayer/agglayer_client.go index 4726ccc1..a5222571 100644 --- a/aggregator/agglayer_client.go +++ b/aggregator/agglayer/agglayer_client.go @@ -1,4 +1,4 @@ -package aggregator +package agglayer import ( "context" @@ -13,6 +13,10 @@ import ( "github.com/ethereum/go-ethereum/common" ) +const errCodeAgglayerRateLimitExceeded int = -10007 + +var ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") + // AgglayerClientInterface is the interface that defines the methods that the AggLayerClient will implement type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) @@ -39,6 +43,9 @@ func (c *AggLayerClient) SendTx(signedTx SignedTx) (common.Hash, error) { } if response.Error != nil { + if response.Error.Code == errCodeAgglayerRateLimitExceeded { + return common.Hash{}, ErrAgglayerRateLimitExceeded + } return common.Hash{}, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) } diff --git a/aggregator/agglayer_tx.go b/aggregator/agglayer/agglayer_tx.go similarity index 98% rename from aggregator/agglayer_tx.go rename to aggregator/agglayer/agglayer_tx.go index 30a483ae..f024f570 100644 --- a/aggregator/agglayer_tx.go +++ b/aggregator/agglayer/agglayer_tx.go @@ -1,4 +1,4 @@ -package aggregator +package agglayer import ( "crypto/ecdsa" diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index a97d72f9..7106b615 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -16,6 +16,7 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" cdkTypes "github.com/0xPolygon/cdk-rpc/types" + "github.com/0xPolygon/cdk/aggregator/agglayer" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" @@ -24,13 +25,15 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/datastream" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" streamlog "github.com/0xPolygonHermez/zkevm-data-streamer/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" synclog "github.com/0xPolygonHermez/zkevm-synchronizer-l1/log" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" + "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer/l1_check_block" "github.com/ethereum/go-ethereum/common" "go.uber.org/zap/zapcore" "google.golang.org/grpc" @@ -64,10 +67,10 @@ type Aggregator struct { cfg Config logger *log.Logger - state stateInterface - etherman etherman - ethTxManager *ethtxmanager.Client - streamClient *datastreamer.StreamClient + state StateInterface + etherman Etherman + ethTxManager EthTxManagerClient + streamClient StreamClient l1Syncr synchronizer.Synchronizer halted atomic.Bool @@ -95,7 +98,7 @@ type Aggregator struct { exit context.CancelFunc sequencerPrivateKey *ecdsa.PrivateKey - aggLayerClient AgglayerClientInterface + aggLayerClient agglayer.AgglayerClientInterface } // New creates a new aggregator. @@ -103,8 +106,8 @@ func New( ctx context.Context, cfg Config, logger *log.Logger, - stateInterface stateInterface, - etherman etherman) (*Aggregator, error) { + stateInterface StateInterface, + etherman Etherman) (*Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker switch cfg.TxProfitabilityCheckerType { @@ -165,12 +168,12 @@ func New( } var ( - aggLayerClient AgglayerClientInterface + aggLayerClient agglayer.AgglayerClientInterface sequencerPrivateKey *ecdsa.PrivateKey ) if !cfg.SyncModeOnlyEnabled && cfg.SettlementBackend == AggLayer { - aggLayerClient = NewAggLayerClient(cfg.AggLayerURL) + aggLayerClient = agglayer.NewAggLayerClient(cfg.AggLayerURL) sequencerPrivateKey, err = newKeyFromKeystore(cfg.SequencerPrivateKey) if err != nil { @@ -422,6 +425,12 @@ func (a *Aggregator) handleReceivedDataStream( switch entry.Type { case datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START): + // Check currentStreamBatchRaw is empty as sanity check + if len(a.currentStreamBatchRaw.Blocks) > 0 { + a.logger.Errorf("currentStreamBatchRaw should be empty, "+ + "but it contains %v blocks", len(a.currentStreamBatchRaw.Blocks)) + a.resetCurrentBatchData() + } batch := &datastream.BatchStart{} err := proto.Unmarshal(entry.Data, batch) if err != nil { @@ -919,10 +928,11 @@ func (a *Aggregator) settleWithAggLayer( inputs ethmanTypes.FinalProofInputs) bool { proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") proofBytes := common.Hex2Bytes(proofStrNo0x) - tx := Tx{ + + tx := agglayer.Tx{ LastVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumber - 1), NewVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumberFinal), - ZKP: ZKP{ + ZKP: agglayer.ZKP{ NewStateRoot: common.BytesToHash(inputs.NewStateRoot), NewLocalExitRoot: common.BytesToHash(inputs.NewLocalExitRoot), Proof: cdkTypes.ArgBytes(proofBytes), @@ -941,9 +951,12 @@ func (a *Aggregator) settleWithAggLayer( a.logger.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) txHash, err := a.aggLayerClient.SendTx(*signedTx) if err != nil { - a.logger.Errorf("failed to send tx to the agglayer: %v", err) + if errors.Is(err, agglayer.ErrAgglayerRateLimitExceeded) { + a.logger.Errorf("%s. Config param VerifyProofInterval should match the agglayer configured rate limit.", err) + } else { + a.logger.Errorf("failed to send tx to the agglayer: %v", err) + } a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) - return false } @@ -978,7 +991,7 @@ func (a *Aggregator) settleDirect( return false } - monitoredTxID, err := a.ethTxManager.Add(ctx, to, nil, big.NewInt(0), data, a.cfg.GasOffset, nil) + monitoredTxID, err := a.ethTxManager.Add(ctx, to, big.NewInt(0), data, a.cfg.GasOffset, nil) if err != nil { a.logger.Errorf("Error Adding TX to ethTxManager: %v", err) mTxLogger := ethtxmanager.CreateLogger(monitoredTxID, sender, to) @@ -989,8 +1002,8 @@ func (a *Aggregator) settleDirect( } // process monitored batch verifications before starting a next cycle - a.ethTxManager.ProcessPendingMonitoredTxs(ctx, func(result ethtxmanager.MonitoredTxResult) { - a.handleMonitoredTxResult(result) + a.ethTxManager.ProcessPendingMonitoredTxs(ctx, func(result ethtxtypes.MonitoredTxResult) { + a.handleMonitoredTxResult(result, proof.BatchNumber, proof.BatchNumberFinal) }) return true @@ -1011,7 +1024,7 @@ func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Cont // buildFinalProof builds and return the final proof for an aggregated/batch proof. func (a *Aggregator) buildFinalProof( - ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) { + ctx context.Context, prover ProverInterface, proof *state.Proof) (*prover.FinalProof, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1057,7 +1070,7 @@ func (a *Aggregator) buildFinalProof( // build the final proof. If no proof is provided it looks for a previously // generated proof. If the proof is eligible, then the final proof generation // is triggered. -func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterface, proof *state.Proof) (bool, error) { +func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover ProverInterface, proof *state.Proof) (bool, error) { proverName := prover.Name() proverID := prover.ID() @@ -1243,7 +1256,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. } func (a *Aggregator) getAndLockProofsToAggregate( - ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) { + ctx context.Context, prover ProverInterface) (*state.Proof, *state.Proof, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1291,7 +1304,7 @@ func (a *Aggregator) getAndLockProofsToAggregate( return proof1, proof2, nil } -func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterface) (bool, error) { +func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterface) (bool, error) { proverName := prover.Name() proverID := prover.ID() @@ -1456,7 +1469,7 @@ func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumb } func (a *Aggregator) getAndLockBatchToProve( - ctx context.Context, prover proverInterface, + ctx context.Context, prover ProverInterface, ) (*state.Batch, []byte, *state.Proof, error) { proverID := prover.ID() proverName := prover.Name() @@ -1572,7 +1585,7 @@ func (a *Aggregator) getAndLockBatchToProve( return &dbBatch.Batch, dbBatch.Witness, proof, nil } -func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInterface) (bool, error) { +func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInterface) (bool, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1875,13 +1888,7 @@ func (a *Aggregator) getWitness(batchNumber uint64, url string, fullWitness bool return nil, err } - witnessString := strings.TrimLeft(witness, "0x") - if len(witnessString)%2 != 0 { - witnessString = "0" + witnessString - } - bytes := common.Hex2Bytes(witnessString) - - return bytes, nil + return common.FromHex(witness), nil } func printInputProver(logger *log.Logger, inputProver *prover.StatelessInputProver) { @@ -1934,57 +1941,39 @@ func (hc *healthChecker) Watch(req *grpchealth.HealthCheckRequest, server grpche }) } -func (a *Aggregator) handleMonitoredTxResult(result ethtxmanager.MonitoredTxResult) { +func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult, firstBatch, lastBatch uint64) { mTxResultLogger := ethtxmanager.CreateMonitoredTxResultLogger(result) - if result.Status == ethtxmanager.MonitoredTxStatusFailed { + if result.Status == ethtxtypes.MonitoredTxStatusFailed { mTxResultLogger.Fatal("failed to send batch verification, TODO: review this fatal and define what to do in this case") } - // TODO: REVIEW THIS - - /* - // monitoredIDFormat: "proof-from-%v-to-%v" - idSlice := strings.Split(result.ID, "-") - proofBatchNumberStr := idSlice[2] - proofBatchNumber, err := strconv.ParseUint(proofBatchNumberStr, encoding.Base10, 0) - - if err != nil { - mTxResultLogger.Errorf("failed to read final proof batch number from monitored tx: %v", err) - } - - proofBatchNumberFinalStr := idSlice[4] - proofBatchNumberFinal, err := strconv.ParseUint(proofBatchNumberFinalStr, encoding.Base10, 0) - - if err != nil { - mTxResultLogger.Errorf("failed to read final proof batch number final from monitored tx: %v", err) - } - - log := log.WithFields("txId", result.ID, "batches", fmt.Sprintf("%d-%d", proofBatchNumber, proofBatchNumberFinal)) - log.Info("Final proof verified") - - // wait for the synchronizer to catch up the verified batches - log.Debug("A final proof has been sent, waiting for the network to be synced") + // Wait for the transaction to be finalized, then we can safely delete all recursive + // proofs up to the last batch in this proof - for !a.isSynced(a.ctx, &proofBatchNumberFinal) { - log.Info("Waiting for synchronizer to sync...") - time.Sleep(a.cfg.RetryTime.Duration) - } + finaLizedBlockNumber, err := l1_check_block.L1FinalizedFetch.BlockNumber(a.ctx, a.etherman) + if err != nil { + mTxResultLogger.Errorf("failed to get finalized block number: %v", err) + } - // network is synced with the final proof, we can safely delete all recursive - // proofs up to the last synced batch - err = a.State.CleanupGeneratedProofs(a.ctx, proofBatchNumberFinal, nil) + for result.MinedAtBlockNumber.Uint64() > finaLizedBlockNumber { + select { + case <-a.ctx.Done(): + return + case <-time.After(a.cfg.RetryTime.Duration): + finaLizedBlockNumber, err = l1_check_block.L1FinalizedFetch.BlockNumber(a.ctx, a.etherman) + if err != nil { + mTxResultLogger.Errorf("failed to get finalized block number: %v", err) + } + } + } - if err != nil { - log.Errorf("Failed to store proof aggregation result: %v", err) - } - */ -} + err = a.state.DeleteGeneratedProofs(a.ctx, firstBatch, lastBatch, nil) + if err != nil { + mTxResultLogger.Errorf("failed to delete generated proofs from %d to %d: %v", firstBatch, lastBatch, err) + } -/* -func buildMonitoredTxID(batchNumber, batchNumberFinal uint64) string { - return fmt.Sprintf(monitoredIDFormat, batchNumber, batchNumberFinal) + mTxResultLogger.Debugf("deleted generated proofs from %d to %d", firstBatch, lastBatch) } -*/ func (a *Aggregator) cleanupLockedProofs() { for { diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go new file mode 100644 index 00000000..f906ebbb --- /dev/null +++ b/aggregator/aggregator_test.go @@ -0,0 +1,1573 @@ +package aggregator + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + mocks "github.com/0xPolygon/cdk/aggregator/mocks" + "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/cdk/state/datastream" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +var ( + proofID = "proofId" + proof = "proof" + proverName = "proverName" + proverID = "proverID" +) + +type mox struct { + stateMock *mocks.StateInterfaceMock + ethTxManager *mocks.EthTxManagerClientMock + etherman *mocks.EthermanMock + proverMock *mocks.ProverInterfaceMock + aggLayerClientMock *mocks.AgglayerClientInterfaceMock +} + +func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { + t.Helper() + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + case <-time.After(timeout): + t.Fatalf("WaitGroup not done, test time expired after %s", timeout) + } +} + +func Test_resetCurrentBatchData(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentBatchStreamData: []byte("test"), + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: 1, + ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, + Transactions: []state.L2TxRaw{}, + }, + }, + }, + currentStreamL2Block: state.L2BlockRaw{}, + } + + a.resetCurrentBatchData() + + assert.Equal(t, []byte{}, a.currentBatchStreamData) + assert.Equal(t, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}, a.currentStreamBatchRaw) + assert.Equal(t, state.L2BlockRaw{}, a.currentStreamL2Block) +} + +func Test_handleReorg(t *testing.T) { + t.Parallel() + + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + mockState := new(mocks.StateInterfaceMock) + reorgData := synchronizer.ReorgExecutionResult{} + + a := &Aggregator{ + l1Syncr: mockL1Syncr, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + ctx: context.Background(), + } + + mockL1Syncr.On("GetLastestVirtualBatchNumber", mock.Anything).Return(uint64(100), nil).Once() + mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, uint64(100), mock.Anything).Return(nil).Once() + + go a.handleReorg(reorgData) + time.Sleep(3 * time.Second) + + assert.True(t, a.halted.Load()) + mockState.AssertExpectations(t) + mockL1Syncr.AssertExpectations(t) +} + +func Test_handleRollbackBatches(t *testing.T) { + t.Parallel() + + mockStreamClient := new(mocks.StreamClientMock) + mockEtherman := new(mocks.EthermanMock) + mockState := new(mocks.StateInterfaceMock) + + // Test data + rollbackData := synchronizer.RollbackBatchesData{ + LastBatchNumber: 100, + } + + mockStreamClient.On("IsStarted").Return(true).Once() + mockStreamClient.On("ResetProcessEntryFunc").Return().Once() + mockStreamClient.On("SetProcessEntryFunc", mock.Anything).Return().Once() + mockStreamClient.On("ExecCommandStop").Return(nil).Once() + mockStreamClient.On("Start").Return(nil).Once() + mockStreamClient.On("ExecCommandStartBookmark", mock.Anything).Return(nil).Once() + mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() + mockState.On("DeleteBatchesOlderThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, rollbackData.LastBatchNumber+1, mock.AnythingOfType("uint64"), nil).Return(nil).Once() + + a := Aggregator{ + ctx: context.Background(), + streamClient: mockStreamClient, + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + streamClientMutex: &sync.Mutex{}, + currentBatchStreamData: []byte{}, + currentStreamBatchRaw: state.BatchRawV2{}, + currentStreamL2Block: state.L2BlockRaw{}, + } + + a.halted.Store(false) + a.handleRollbackBatches(rollbackData) + + assert.False(t, a.halted.Load()) + mockStreamClient.AssertExpectations(t) + mockEtherman.AssertExpectations(t) + mockState.AssertExpectations(t) +} + +func Test_handleReceivedDataStream_BatchStart(t *testing.T) { + t.Parallel() + + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + agg := Aggregator{ + state: mockState, + l1Syncr: mockL1Syncr, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + currentStreamBatch: state.Batch{}, + } + + // Prepare a FileEntry for Batch Start + batchStartData, err := proto.Marshal(&datastream.BatchStart{ + Number: 1, + ChainId: 2, + ForkId: 3, + Type: datastream.BatchType_BATCH_TYPE_REGULAR, + }) + assert.NoError(t, err) + + batchStartEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START), + Data: batchStartData, + } + + // Test the handleReceivedDataStream for Batch Start + err = agg.handleReceivedDataStream(batchStartEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, agg.currentStreamBatch.BatchNumber, uint64(1)) + assert.Equal(t, agg.currentStreamBatch.ChainID, uint64(2)) + assert.Equal(t, agg.currentStreamBatch.ForkID, uint64(3)) + assert.Equal(t, agg.currentStreamBatch.Type, datastream.BatchType_BATCH_TYPE_REGULAR) +} + +func Test_handleReceivedDataStream_BatchEnd(t *testing.T) { + t.Parallel() + + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + a := Aggregator{ + state: mockState, + l1Syncr: mockL1Syncr, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + currentStreamBatch: state.Batch{ + BatchNumber: uint64(2), + Type: datastream.BatchType_BATCH_TYPE_REGULAR, + Coinbase: common.Address{}, + }, + currentStreamL2Block: state.L2BlockRaw{ + BlockNumber: uint64(10), + }, + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: uint64(9), + ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, + Transactions: []state.L2TxRaw{}, + }, + }, + }, + cfg: Config{ + UseL1BatchData: false, + }, + } + + batchEndData, err := proto.Marshal(&datastream.BatchEnd{ + Number: 1, + LocalExitRoot: []byte{1, 2, 3}, + StateRoot: []byte{4, 5, 6}, + Debug: nil, + }) + assert.NoError(t, err) + + batchEndEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_END), + Data: batchEndData, + } + + mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber-1, nil). + Return(&state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + }, nil).Once() + mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber, nil). + Return(&state.DBBatch{ + Witness: []byte("test_witness"), + }, nil).Once() + mockState.On("AddBatch", mock.Anything, mock.Anything, nil).Return(nil).Once() + mockL1Syncr.On("GetVirtualBatchByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). + Return(&synchronizer.VirtualBatch{BatchL2Data: []byte{1, 2, 3}}, nil).Once() + mockL1Syncr.On("GetSequenceByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). + Return(&synchronizer.SequencedBatches{ + L1InfoRoot: common.Hash{}, + Timestamp: time.Now(), + }, nil).Once() + + err = a.handleReceivedDataStream(batchEndEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, a.currentBatchStreamData, []byte{}) + assert.Equal(t, a.currentStreamBatchRaw, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}) + assert.Equal(t, a.currentStreamL2Block, state.L2BlockRaw{}) + + mockState.AssertExpectations(t) + mockL1Syncr.AssertExpectations(t) +} + +func Test_handleReceivedDataStream_L2Block(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentStreamL2Block: state.L2BlockRaw{ + BlockNumber: uint64(9), + }, + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{}, + }, + currentStreamBatch: state.Batch{}, + } + + // Mock data for L2Block + l2Block := &datastream.L2Block{ + Number: uint64(10), + DeltaTimestamp: uint32(5), + L1InfotreeIndex: uint32(1), + Coinbase: []byte{0x01}, + GlobalExitRoot: []byte{0x02}, + } + + l2BlockData, err := proto.Marshal(l2Block) + assert.NoError(t, err) + + l2BlockEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_L2_BLOCK), + Data: l2BlockData, + } + + err = a.handleReceivedDataStream(l2BlockEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, uint64(10), a.currentStreamL2Block.BlockNumber) + assert.Equal(t, uint32(5), a.currentStreamL2Block.ChangeL2BlockHeader.DeltaTimestamp) + assert.Equal(t, uint32(1), a.currentStreamL2Block.ChangeL2BlockHeader.IndexL1InfoTree) + assert.Equal(t, 0, len(a.currentStreamL2Block.Transactions)) + assert.Equal(t, uint32(1), a.currentStreamBatch.L1InfoTreeIndex) + assert.Equal(t, common.BytesToAddress([]byte{0x01}), a.currentStreamBatch.Coinbase) + assert.Equal(t, common.BytesToHash([]byte{0x02}), a.currentStreamBatch.GlobalExitRoot) +} + +func Test_handleReceivedDataStream_Transaction(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentStreamL2Block: state.L2BlockRaw{ + Transactions: []state.L2TxRaw{}, + }, + logger: log.GetDefaultLogger(), + } + + tx := ethTypes.NewTransaction( + 0, + common.HexToAddress("0x01"), + big.NewInt(1000000000000000000), + uint64(21000), + big.NewInt(20000000000), + nil, + ) + + // Encode transaction into RLP format + var buf bytes.Buffer + err := tx.EncodeRLP(&buf) + require.NoError(t, err, "Failed to encode transaction") + + transaction := &datastream.Transaction{ + L2BlockNumber: uint64(10), + Index: uint64(0), + IsValid: true, + Encoded: buf.Bytes(), + EffectiveGasPricePercentage: uint32(90), + } + + transactionData, err := proto.Marshal(transaction) + assert.NoError(t, err) + + transactionEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_TRANSACTION), + Data: transactionData, + } + + err = a.handleReceivedDataStream(transactionEntry, nil, nil) + assert.NoError(t, err) + + assert.Len(t, a.currentStreamL2Block.Transactions, 1) + assert.Equal(t, uint8(90), a.currentStreamL2Block.Transactions[0].EfficiencyPercentage) + assert.False(t, a.currentStreamL2Block.Transactions[0].TxAlreadyEncoded) + assert.NotNil(t, a.currentStreamL2Block.Transactions[0].Tx) +} + +func Test_sendFinalProofSuccess(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + batchNum := uint64(23) + batchNumFinal := uint64(42) + + recursiveProof := &state.Proof{ + Prover: &proverName, + ProverID: &proverID, + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProof := &prover.FinalProof{} + + testCases := []struct { + name string + setup func(m mox, a *Aggregator) + asserts func(a *Aggregator) + }{ + { + name: "Successfully settled on Agglayer", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + AggLayerTxTimeout: types.Duration{Duration: time.Millisecond * 1}, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + testHash := common.BytesToHash([]byte("test hash")) + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(testHash, nil).Once() + m.aggLayerClientMock.On("WaitTxToBeMined", testHash, mock.Anything).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Successfully settled on L1 (Direct)", + setup: func(m mox, a *Aggregator) { + senderAddr := common.BytesToAddress([]byte("sender address")).Hex() + toAddr := common.BytesToAddress([]byte("to address")) + data := []byte("data") + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + GasOffset: uint64(10), + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, common.HexToAddress(senderAddr)).Return(&toAddr, data, nil).Once() + m.ethTxManager.On("Add", mock.Anything, &toAddr, big.NewInt(0), data, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Return(nil, nil).Once() + m.ethTxManager.On("ProcessPendingMonitoredTxs", mock.Anything, mock.Anything).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + + curve := elliptic.P256() + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + require.NoError(err, "error generating key") + + a := Aggregator{ + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + aggLayerClient: aggLayerClient, + finalProof: make(chan finalProofMsg), + logger: log.GetDefaultLogger(), + verifyingProof: false, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + sequencerPrivateKey: privateKey, + } + a.ctx, a.exit = context.WithCancel(context.Background()) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + aggLayerClientMock: aggLayerClient, + } + if tc.setup != nil { + tc.setup(m, &a) + } + // send a final proof over the channel + go func() { + finalMsg := finalProofMsg{ + proverID: proverID, + recursiveProof: recursiveProof, + finalProof: finalProof, + } + a.finalProof <- finalMsg + time.Sleep(1 * time.Second) + a.exit() + }() + + a.sendFinalProof() + if tc.asserts != nil { + tc.asserts(&a) + } + }) + } +} + +func Test_sendFinalProofError(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + errTest := errors.New("test error") + batchNum := uint64(23) + batchNumFinal := uint64(42) + sender := common.BytesToAddress([]byte("SenderAddress")) + senderAddr := sender.Hex() + + recursiveProof := &state.Proof{ + Prover: &proverName, + ProverID: &proverID, + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProof := &prover.FinalProof{} + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(*Aggregator) + }{ + { + name: "Failed to settle on Agglayer: GetBatch error", + setup: func(m mox, a *Aggregator) { + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + // test is done, stop the sendFinalProof method + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on Agglayer: SendTx error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Run(func(args mock.Arguments) { + // test is done, stop the sendFinalProof method + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on Agglayer: WaitTxToBeMined error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + AggLayerTxTimeout: types.Duration{Duration: time.Millisecond * 1}, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(common.Hash{}, nil).Once() + m.aggLayerClientMock.On("WaitTxToBeMined", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(errTest) + m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on L1 (Direct): BuildTrustedVerifyBatchesTxData error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, nil, errTest) + m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on L1 (Direct): Error Adding TX to ethTxManager", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + GasOffset: uint64(10), + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Return(nil, nil, nil).Once() + m.ethTxManager.On("Add", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + + curve := elliptic.P256() + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + require.NoError(err, "error generating key") + + a := Aggregator{ + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + aggLayerClient: aggLayerClient, + finalProof: make(chan finalProofMsg), + logger: log.GetDefaultLogger(), + verifyingProof: false, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + sequencerPrivateKey: privateKey, + } + a.ctx, a.exit = context.WithCancel(context.Background()) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + aggLayerClientMock: aggLayerClient, + } + if tc.setup != nil { + tc.setup(m, &a) + } + // send a final proof over the channel + go func() { + finalMsg := finalProofMsg{ + proverID: proverID, + recursiveProof: recursiveProof, + finalProof: finalProof, + } + a.finalProof <- finalMsg + }() + + a.sendFinalProof() + if tc.asserts != nil { + tc.asserts(&a) + } + }) + } +} + +func Test_buildFinalProof(t *testing.T) { + assert := assert.New(t) + batchNum := uint64(23) + batchNumFinal := uint64(42) + recursiveProof := &state.Proof{ + ProverID: &proverID, + Proof: "test proof", + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProofID := "finalProofID" + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(err error, fProof *prover.FinalProof) + }{ + { + name: "using real prover", + setup: func(m mox, a *Aggregator) { + finalProof := prover.FinalProof{ + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte("StateRoot"), + NewLocalExitRoot: []byte("LocalExitRoot"), + }, + } + + m.proverMock.On("Name").Return("name").Once() + m.proverMock.On("ID").Return("id").Once() + m.proverMock.On("Addr").Return("addr").Once() + m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(err error, fProof *prover.FinalProof) { + assert.NoError(err) + assert.True(bytes.Equal([]byte("StateRoot"), fProof.Public.NewStateRoot), "State roots should be equal") + assert.True(bytes.Equal([]byte("LocalExitRoot"), fProof.Public.NewLocalExitRoot), "LocalExit roots should be equal") + }, + }, + { + name: "using mock prover", + setup: func(m mox, a *Aggregator) { + finalProof := prover.FinalProof{ + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte(mockedStateRoot), + NewLocalExitRoot: []byte(mockedLocalExitRoot), + }, + } + + finalDBBatch := &state.DBBatch{ + Batch: state.Batch{ + StateRoot: common.BytesToHash([]byte("mock StateRoot")), + LocalExitRoot: common.BytesToHash([]byte("mock LocalExitRoot")), + }, + } + + m.proverMock.On("Name").Return("name").Once() + m.proverMock.On("ID").Return("id").Once() + m.proverMock.On("Addr").Return("addr").Once() + m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Return(finalDBBatch, nil).Once() + }, + asserts: func(err error, fProof *prover.FinalProof) { + assert.NoError(err) + expStateRoot := common.BytesToHash([]byte("mock StateRoot")) + expLocalExitRoot := common.BytesToHash([]byte("mock LocalExitRoot")) + assert.True(bytes.Equal(expStateRoot.Bytes(), fProof.Public.NewStateRoot), "State roots should be equal") + assert.True(bytes.Equal(expLocalExitRoot.Bytes(), fProof.Public.NewLocalExitRoot), "LocalExit roots should be equal") + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + proverMock := mocks.NewProverInterfaceMock(t) + stateMock := mocks.NewStateInterfaceMock(t) + m := mox{ + proverMock: proverMock, + stateMock: stateMock, + } + a := Aggregator{ + state: stateMock, + logger: log.GetDefaultLogger(), + cfg: Config{ + SenderAddress: common.BytesToAddress([]byte("from")).Hex(), + }, + } + + tc.setup(m, &a) + fProof, err := a.buildFinalProof(context.Background(), proverMock, recursiveProof) + tc.asserts(err, fProof) + }) + } +} + +func Test_tryBuildFinalProof(t *testing.T) { + assert := assert.New(t) + errTest := errors.New("test error") + from := common.BytesToAddress([]byte("from")) + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + TxProfitabilityCheckerType: ProfitabilityAcceptAll, + SenderAddress: from.Hex(), + } + latestVerifiedBatchNum := uint64(22) + batchNum := uint64(23) + batchNumFinal := uint64(42) + finalProofID := "finalProofID" + finalProof := prover.FinalProof{ + Proof: "", + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte("newStateRoot"), + NewLocalExitRoot: []byte("newLocalExitRoot"), + }, + } + proofToVerify := state.Proof{ + ProofID: &proofID, + Proof: proof, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + invalidProof := state.Proof{ + ProofID: &proofID, + Proof: proof, + BatchNumber: uint64(123), + BatchNumberFinal: uint64(456), + } + + proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + testCases := []struct { + name string + proof *state.Proof + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + assertFinalMsg func(*finalProofMsg) + }{ + { + name: "can't verify proof (verifyingProof = true)", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return("addr").Once() + a.verifyingProof = true + }, + asserts: func(result bool, a *Aggregator, err error) { + a.verifyingProof = false // reset + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "can't verify proof (veryfy time not reached yet)", + setup: func(m mox, a *Aggregator) { + a.timeSendFinalProof = time.Now().Add(10 * time.Second) + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return("addr").Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "nil proof, error requesting the proof triggers defer", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(nil, errTest).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). + Return(nil). + Once(). + NotBefore(proofGeneratingTrueCall) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, error building the proof triggers defer", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(nil, errTest).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). + Return(nil). + Once(). + NotBefore(proofGeneratingTrueCall) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, generic error from GetProofReadyToVerify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, ErrNotFound from GetProofReadyToVerify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "nil proof gets a proof ready to verify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID).Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + assertFinalMsg: func(msg *finalProofMsg) { + assert.Equal(finalProof.Proof, msg.finalProof.Proof) + assert.Equal(finalProof.Public.NewStateRoot, msg.finalProof.Public.NewStateRoot) + assert.Equal(finalProof.Public.NewLocalExitRoot, msg.finalProof.Public.NewLocalExitRoot) + }, + }, + { + name: "error checking if proof is a complete sequence", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "invalid proof (not consecutive to latest verified batch) rejected", + proof: &invalidProof, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "invalid proof (not a complete sequence) rejected", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "valid proof", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID).Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(true, nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + assertFinalMsg: func(msg *finalProofMsg) { + assert.Equal(finalProof.Proof, msg.finalProof.Proof) + assert.Equal(finalProof.Public.NewStateRoot, msg.finalProof.Public.NewStateRoot) + assert.Equal(finalProof.Public.NewLocalExitRoot, msg.finalProof.Public.NewLocalExitRoot) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + } + + aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + + var wg sync.WaitGroup + if tc.assertFinalMsg != nil { + // wait for the final proof over the channel + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + msg := <-a.finalProof + tc.assertFinalMsg(&msg) + }() + } + + result, err := a.tryBuildFinalProof(proverCtx, proverMock, tc.proof) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + + if tc.assertFinalMsg != nil { + WaitUntil(t, &wg, time.Second) + } + }) + } +} + +func Test_tryAggregateProofs(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + errTest := errors.New("test error") + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + } + + recursiveProof := "recursiveProof" + proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + batchNum := uint64(23) + batchNumFinal := uint64(42) + proof1 := state.Proof{ + Proof: "proof1", + BatchNumber: batchNum, + } + proof2 := state.Proof{ + Proof: "proof2", + BatchNumberFinal: batchNumFinal, + } + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + }{ + { + name: "getAndLockProofsToAggregate returns generic error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "getAndLockProofsToAggregate returns ErrNotFound", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "getAndLockProofsToAggregate error updating proofs", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(errTest). + Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "AggregatedProof error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + // Use a type assertion with a check + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.NotNil(proofArg.GeneratingSince) + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(nil, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.Nil(proofArg.GeneratingSince) + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "unlockProofsToAggregate error after WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID) + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(errTest). + Once(). + NotBefore(proof1GeneratingTrueCall) + dbTx.On("Rollback", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "rollback after DeleteGeneratedProofs error in db transaction", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(errTest).Once() + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "rollback after AddGeneratedProof error in db transaction", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Return(errTest).Once() + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "time to send final, state error", + setup: func(m mox, a *Aggregator) { + a.cfg.VerifyProofInterval = types.Duration{Duration: time.Nanosecond} + m.proverMock.On("Name").Return(proverName).Times(3) + m.proverMock.On("ID").Return(proverID).Times(3) + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Twice() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + expectedInputProver := map[string]interface{}{ + "recursive_proof_1": proof1.Proof, + "recursive_proof_2": proof2.Proof, + } + b, err := json.Marshal(expectedInputProver) + require.NoError(err) + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(proof1.BatchNumber, proof.BatchNumber) + assert.Equal(proof2.BatchNumberFinal, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal(string(b), proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(proof1.BatchNumber, proof.BatchNumber) + assert.Equal(proof2.BatchNumberFinal, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal(string(b), proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.Nil(proof.GeneratingSince) + }, + ).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + } + aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + a.resetVerifyProofTime() + + result, err := a.tryAggregateProofs(proverCtx, proverMock) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + }) + } +} diff --git a/aggregator/config.go b/aggregator/config.go index 4550c637..fbbc9c9b 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -10,7 +10,7 @@ import ( "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" syncronizerConfig "github.com/0xPolygonHermez/zkevm-synchronizer-l1/config" "github.com/ethereum/go-ethereum/accounts/keystore" ) @@ -152,8 +152,8 @@ type Config struct { // MaxWitnessRetrievalWorkers is the maximum number of workers that will be used to retrieve the witness MaxWitnessRetrievalWorkers int `mapstructure:"MaxWitnessRetrievalWorkers"` - // SyncModeOnlyEnabled is a flag to enable the sync mode only - // In this mode the aggregator will only sync from L1 and will not generate or read the data stream + // SyncModeOnlyEnabled is a flag that activates sync mode exclusively. + // When enabled, the aggregator will sync data only from L1 and will not generate or read the data stream. SyncModeOnlyEnabled bool `mapstructure:"SyncModeOnlyEnabled"` } diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index b231de35..ee70d07c 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -7,14 +7,18 @@ import ( ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/jackc/pgx/v4" ) // Consumer interfaces required by the package. -type proverInterface interface { +type ProverInterface interface { Name() string ID() string Addr() string @@ -26,8 +30,8 @@ type proverInterface interface { WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) } -// etherman contains the methods required to interact with ethereum -type etherman interface { +// Etherman contains the methods required to interact with ethereum +type Etherman interface { GetRollupId() uint32 GetLatestVerifiedBatchNum() (uint64, error) BuildTrustedVerifyBatchesTxData( @@ -35,6 +39,7 @@ type etherman interface { ) (to *common.Address, data []byte, err error) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) } // aggregatorTxProfitabilityChecker interface for different profitability @@ -43,8 +48,8 @@ type aggregatorTxProfitabilityChecker interface { IsProfitable(context.Context, *big.Int) (bool, error) } -// stateInterface gathers the methods to interact with the state. -type stateInterface interface { +// StateInterface gathers the methods to interact with the state. +type StateInterface interface { BeginStateTransaction(ctx context.Context) (pgx.Tx, error) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) @@ -62,3 +67,49 @@ type stateInterface interface { DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error } + +// StreamClient represents the stream client behaviour +type StreamClient interface { + Start() error + ExecCommandStart(fromEntry uint64) error + ExecCommandStartBookmark(fromBookmark []byte) error + ExecCommandStop() error + ExecCommandGetHeader() (datastreamer.HeaderEntry, error) + ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) + ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) + GetFromStream() uint64 + GetTotalEntries() uint64 + SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) + ResetProcessEntryFunc() + IsStarted() bool +} + +// EthTxManagerClient represents the eth tx manager interface +type EthTxManagerClient interface { + Add( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + ) (common.Hash, error) + AddWithGas( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + gas uint64, + ) (common.Hash, error) + EncodeBlobData(data []byte) (kzg4844.Blob, error) + MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar + ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) + Remove(ctx context.Context, id common.Hash) error + RemoveAll(ctx context.Context) error + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) + ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Start() + Stop() +} diff --git a/aggregator/mocks/mock_StreamClient.go b/aggregator/mocks/mock_StreamClient.go new file mode 100644 index 00000000..7962d31e --- /dev/null +++ b/aggregator/mocks/mock_StreamClient.go @@ -0,0 +1,247 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + datastreamer "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + mock "github.com/stretchr/testify/mock" +) + +// StreamClientMock is an autogenerated mock type for the StreamClient type +type StreamClientMock struct { + mock.Mock +} + +// ExecCommandGetBookmark provides a mock function with given fields: fromBookmark +func (_m *StreamClientMock) ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) { + ret := _m.Called(fromBookmark) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetBookmark") + } + + var r0 datastreamer.FileEntry + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (datastreamer.FileEntry, error)); ok { + return rf(fromBookmark) + } + if rf, ok := ret.Get(0).(func([]byte) datastreamer.FileEntry); ok { + r0 = rf(fromBookmark) + } else { + r0 = ret.Get(0).(datastreamer.FileEntry) + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(fromBookmark) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandGetEntry provides a mock function with given fields: fromEntry +func (_m *StreamClientMock) ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) { + ret := _m.Called(fromEntry) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetEntry") + } + + var r0 datastreamer.FileEntry + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (datastreamer.FileEntry, error)); ok { + return rf(fromEntry) + } + if rf, ok := ret.Get(0).(func(uint64) datastreamer.FileEntry); ok { + r0 = rf(fromEntry) + } else { + r0 = ret.Get(0).(datastreamer.FileEntry) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(fromEntry) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandGetHeader provides a mock function with given fields: +func (_m *StreamClientMock) ExecCommandGetHeader() (datastreamer.HeaderEntry, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetHeader") + } + + var r0 datastreamer.HeaderEntry + var r1 error + if rf, ok := ret.Get(0).(func() (datastreamer.HeaderEntry, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() datastreamer.HeaderEntry); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(datastreamer.HeaderEntry) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandStart provides a mock function with given fields: fromEntry +func (_m *StreamClientMock) ExecCommandStart(fromEntry uint64) error { + ret := _m.Called(fromEntry) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStart") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(fromEntry) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecCommandStartBookmark provides a mock function with given fields: fromBookmark +func (_m *StreamClientMock) ExecCommandStartBookmark(fromBookmark []byte) error { + ret := _m.Called(fromBookmark) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStartBookmark") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(fromBookmark) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecCommandStop provides a mock function with given fields: +func (_m *StreamClientMock) ExecCommandStop() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStop") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetFromStream provides a mock function with given fields: +func (_m *StreamClientMock) GetFromStream() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFromStream") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetTotalEntries provides a mock function with given fields: +func (_m *StreamClientMock) GetTotalEntries() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetTotalEntries") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// IsStarted provides a mock function with given fields: +func (_m *StreamClientMock) IsStarted() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsStarted") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ResetProcessEntryFunc provides a mock function with given fields: +func (_m *StreamClientMock) ResetProcessEntryFunc() { + _m.Called() +} + +// SetProcessEntryFunc provides a mock function with given fields: f +func (_m *StreamClientMock) SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) { + _m.Called(f) +} + +// Start provides a mock function with given fields: +func (_m *StreamClientMock) Start() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStreamClientMock creates a new instance of StreamClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStreamClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StreamClientMock { + mock := &StreamClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_agglayer_client.go b/aggregator/mocks/mock_agglayer_client.go new file mode 100644 index 00000000..2923ebe0 --- /dev/null +++ b/aggregator/mocks/mock_agglayer_client.go @@ -0,0 +1,79 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + agglayer "github.com/0xPolygon/cdk/aggregator/agglayer" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// AgglayerClientInterfaceMock is an autogenerated mock type for the AgglayerClientInterface type +type AgglayerClientInterfaceMock struct { + mock.Mock +} + +// SendTx provides a mock function with given fields: signedTx +func (_m *AgglayerClientInterfaceMock) SendTx(signedTx agglayer.SignedTx) (common.Hash, error) { + ret := _m.Called(signedTx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(agglayer.SignedTx) (common.Hash, error)); ok { + return rf(signedTx) + } + if rf, ok := ret.Get(0).(func(agglayer.SignedTx) common.Hash); ok { + r0 = rf(signedTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(agglayer.SignedTx) error); ok { + r1 = rf(signedTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitTxToBeMined provides a mock function with given fields: hash, ctx +func (_m *AgglayerClientInterfaceMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { + ret := _m.Called(hash, ctx) + + if len(ret) == 0 { + panic("no return value specified for WaitTxToBeMined") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { + r0 = rf(hash, ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewAgglayerClientInterfaceMock creates a new instance of AgglayerClientInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgglayerClientInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AgglayerClientInterfaceMock { + mock := &AgglayerClientInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_dbtx.go b/aggregator/mocks/mock_dbtx.go new file mode 100644 index 00000000..f870cd57 --- /dev/null +++ b/aggregator/mocks/mock_dbtx.go @@ -0,0 +1,350 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + pgconn "github.com/jackc/pgconn" + mock "github.com/stretchr/testify/mock" + + pgx "github.com/jackc/pgx/v4" +) + +// DbTxMock is an autogenerated mock type for the Tx type +type DbTxMock struct { + mock.Mock +} + +// Begin provides a mock function with given fields: ctx +func (_m *DbTxMock) Begin(ctx context.Context) (pgx.Tx, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Begin") + } + + var r0 pgx.Tx + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Tx) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BeginFunc provides a mock function with given fields: ctx, f +func (_m *DbTxMock) BeginFunc(ctx context.Context, f func(pgx.Tx) error) error { + ret := _m.Called(ctx, f) + + if len(ret) == 0 { + panic("no return value specified for BeginFunc") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, func(pgx.Tx) error) error); ok { + r0 = rf(ctx, f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Commit provides a mock function with given fields: ctx +func (_m *DbTxMock) Commit(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Conn provides a mock function with given fields: +func (_m *DbTxMock) Conn() *pgx.Conn { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Conn") + } + + var r0 *pgx.Conn + if rf, ok := ret.Get(0).(func() *pgx.Conn); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pgx.Conn) + } + } + + return r0 +} + +// CopyFrom provides a mock function with given fields: ctx, tableName, columnNames, rowSrc +func (_m *DbTxMock) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) { + ret := _m.Called(ctx, tableName, columnNames, rowSrc) + + if len(ret) == 0 { + panic("no return value specified for CopyFrom") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) (int64, error)); ok { + return rf(ctx, tableName, columnNames, rowSrc) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) int64); ok { + r0 = rf(ctx, tableName, columnNames, rowSrc) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) error); ok { + r1 = rf(ctx, tableName, columnNames, rowSrc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Exec provides a mock function with given fields: ctx, sql, arguments +func (_m *DbTxMock) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error) { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, arguments...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Exec") + } + + var r0 pgconn.CommandTag + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgconn.CommandTag, error)); ok { + return rf(ctx, sql, arguments...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgconn.CommandTag); ok { + r0 = rf(ctx, sql, arguments...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgconn.CommandTag) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, sql, arguments...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LargeObjects provides a mock function with given fields: +func (_m *DbTxMock) LargeObjects() pgx.LargeObjects { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LargeObjects") + } + + var r0 pgx.LargeObjects + if rf, ok := ret.Get(0).(func() pgx.LargeObjects); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(pgx.LargeObjects) + } + + return r0 +} + +// Prepare provides a mock function with given fields: ctx, name, sql +func (_m *DbTxMock) Prepare(ctx context.Context, name string, sql string) (*pgconn.StatementDescription, error) { + ret := _m.Called(ctx, name, sql) + + if len(ret) == 0 { + panic("no return value specified for Prepare") + } + + var r0 *pgconn.StatementDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*pgconn.StatementDescription, error)); ok { + return rf(ctx, name, sql) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *pgconn.StatementDescription); ok { + r0 = rf(ctx, name, sql) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pgconn.StatementDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, name, sql) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: ctx, sql, args +func (_m *DbTxMock) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 pgx.Rows + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgx.Rows, error)); ok { + return rf(ctx, sql, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Rows); ok { + r0 = rf(ctx, sql, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Rows) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, sql, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryFunc provides a mock function with given fields: ctx, sql, args, scans, f +func (_m *DbTxMock) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { + ret := _m.Called(ctx, sql, args, scans, f) + + if len(ret) == 0 { + panic("no return value specified for QueryFunc") + } + + var r0 pgconn.CommandTag + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error)); ok { + return rf(ctx, sql, args, scans, f) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) pgconn.CommandTag); ok { + r0 = rf(ctx, sql, args, scans, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgconn.CommandTag) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) error); ok { + r1 = rf(ctx, sql, args, scans, f) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryRow provides a mock function with given fields: ctx, sql, args +func (_m *DbTxMock) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for QueryRow") + } + + var r0 pgx.Row + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Row); ok { + r0 = rf(ctx, sql, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Row) + } + } + + return r0 +} + +// Rollback provides a mock function with given fields: ctx +func (_m *DbTxMock) Rollback(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Rollback") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendBatch provides a mock function with given fields: ctx, b +func (_m *DbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for SendBatch") + } + + var r0 pgx.BatchResults + if rf, ok := ret.Get(0).(func(context.Context, *pgx.Batch) pgx.BatchResults); ok { + r0 = rf(ctx, b) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.BatchResults) + } + } + + return r0 +} + +// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDbTxMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DbTxMock { + mock := &DbTxMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_eth_tx_manager.go b/aggregator/mocks/mock_eth_tx_manager.go new file mode 100644 index 00000000..8db7a440 --- /dev/null +++ b/aggregator/mocks/mock_eth_tx_manager.go @@ -0,0 +1,258 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethtxmanager "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + + kzg4844 "github.com/ethereum/go-ethereum/crypto/kzg4844" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" + + zkevm_ethtx_managertypes "github.com/0xPolygon/zkevm-ethtx-manager/types" +) + +// EthTxManagerClientMock is an autogenerated mock type for the EthTxManagerClient type +type EthTxManagerClientMock struct { + mock.Mock +} + +// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar +func (_m *EthTxManagerClientMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas +func (_m *EthTxManagerClientMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) + + if len(ret) == 0 { + panic("no return value specified for AddWithGas") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar, gas) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodeBlobData provides a mock function with given fields: data +func (_m *EthTxManagerClientMock) EncodeBlobData(data []byte) (kzg4844.Blob, error) { + ret := _m.Called(data) + + if len(ret) == 0 { + panic("no return value specified for EncodeBlobData") + } + + var r0 kzg4844.Blob + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (kzg4844.Blob, error)); ok { + return rf(data) + } + if rf, ok := ret.Get(0).(func([]byte) kzg4844.Blob); ok { + r0 = rf(data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(kzg4844.Blob) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MakeBlobSidecar provides a mock function with given fields: blobs +func (_m *EthTxManagerClientMock) MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar { + ret := _m.Called(blobs) + + if len(ret) == 0 { + panic("no return value specified for MakeBlobSidecar") + } + + var r0 *types.BlobTxSidecar + if rf, ok := ret.Get(0).(func([]kzg4844.Blob) *types.BlobTxSidecar); ok { + r0 = rf(blobs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlobTxSidecar) + } + } + + return r0 +} + +// ProcessPendingMonitoredTxs provides a mock function with given fields: ctx, resultHandler +func (_m *EthTxManagerClientMock) ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) { + _m.Called(ctx, resultHandler) +} + +// Remove provides a mock function with given fields: ctx, id +func (_m *EthTxManagerClientMock) Remove(ctx context.Context, id common.Hash) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveAll provides a mock function with given fields: ctx +func (_m *EthTxManagerClientMock) RemoveAll(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for RemoveAll") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Result provides a mock function with given fields: ctx, id +func (_m *EthTxManagerClientMock) Result(ctx context.Context, id common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Result") + } + + var r0 zkevm_ethtx_managertypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) zkevm_ethtx_managertypes.MonitoredTxResult); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(zkevm_ethtx_managertypes.MonitoredTxResult) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResultsByStatus provides a mock function with given fields: ctx, statuses +func (_m *EthTxManagerClientMock) ResultsByStatus(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, statuses) + + if len(ret) == 0 { + panic("no return value specified for ResultsByStatus") + } + + var r0 []zkevm_ethtx_managertypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { + return rf(ctx, statuses) + } + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) []zkevm_ethtx_managertypes.MonitoredTxResult); ok { + r0 = rf(ctx, statuses) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]zkevm_ethtx_managertypes.MonitoredTxResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) error); ok { + r1 = rf(ctx, statuses) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: +func (_m *EthTxManagerClientMock) Start() { + _m.Called() +} + +// Stop provides a mock function with given fields: +func (_m *EthTxManagerClientMock) Stop() { + _m.Called() +} + +// NewEthTxManagerClientMock creates a new instance of EthTxManagerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthTxManagerClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthTxManagerClientMock { + mock := &EthTxManagerClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go new file mode 100644 index 00000000..351acef3 --- /dev/null +++ b/aggregator/mocks/mock_etherman.go @@ -0,0 +1,210 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethmantypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthermanMock is an autogenerated mock type for the Etherman type +type EthermanMock struct { + mock.Mock +} + +// BuildTrustedVerifyBatchesTxData provides a mock function with given fields: lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary +func (_m *EthermanMock) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch uint64, newVerifiedBatch uint64, inputs *ethmantypes.FinalProofInputs, beneficiary common.Address) (*common.Address, []byte, error) { + ret := _m.Called(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + + if len(ret) == 0 { + panic("no return value specified for BuildTrustedVerifyBatchesTxData") + } + + var r0 *common.Address + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) (*common.Address, []byte, error)); ok { + return rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } + if rf, ok := ret.Get(0).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) *common.Address); ok { + r0 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*common.Address) + } + } + + if rf, ok := ret.Get(1).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) []byte); ok { + r1 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) error); ok { + r2 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetBatchAccInputHash provides a mock function with given fields: ctx, batchNumber +func (_m *EthermanMock) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBatchAccInputHash") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (common.Hash, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) common.Hash); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBlockHeader provides a mock function with given fields: ctx +func (_m *EthermanMock) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Header, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Header); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestVerifiedBatchNum provides a mock function with given fields: +func (_m *EthermanMock) GetLatestVerifiedBatchNum() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLatestVerifiedBatchNum") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRollupId provides a mock function with given fields: +func (_m *EthermanMock) GetRollupId() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetRollupId") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthermanMock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanMock { + mock := &EthermanMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_prover.go b/aggregator/mocks/mock_prover.go new file mode 100644 index 00000000..72bd66dc --- /dev/null +++ b/aggregator/mocks/mock_prover.go @@ -0,0 +1,271 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + prover "github.com/0xPolygon/cdk/aggregator/prover" +) + +// ProverInterfaceMock is an autogenerated mock type for the ProverInterface type +type ProverInterfaceMock struct { + mock.Mock +} + +// Addr provides a mock function with given fields: +func (_m *ProverInterfaceMock) Addr() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Addr") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// AggregatedProof provides a mock function with given fields: inputProof1, inputProof2 +func (_m *ProverInterfaceMock) AggregatedProof(inputProof1 string, inputProof2 string) (*string, error) { + ret := _m.Called(inputProof1, inputProof2) + + if len(ret) == 0 { + panic("no return value specified for AggregatedProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (*string, error)); ok { + return rf(inputProof1, inputProof2) + } + if rf, ok := ret.Get(0).(func(string, string) *string); ok { + r0 = rf(inputProof1, inputProof2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(inputProof1, inputProof2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchProof provides a mock function with given fields: input +func (_m *ProverInterfaceMock) BatchProof(input *prover.StatelessInputProver) (*string, error) { + ret := _m.Called(input) + + if len(ret) == 0 { + panic("no return value specified for BatchProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(*prover.StatelessInputProver) (*string, error)); ok { + return rf(input) + } + if rf, ok := ret.Get(0).(func(*prover.StatelessInputProver) *string); ok { + r0 = rf(input) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(*prover.StatelessInputProver) error); ok { + r1 = rf(input) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalProof provides a mock function with given fields: inputProof, aggregatorAddr +func (_m *ProverInterfaceMock) FinalProof(inputProof string, aggregatorAddr string) (*string, error) { + ret := _m.Called(inputProof, aggregatorAddr) + + if len(ret) == 0 { + panic("no return value specified for FinalProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (*string, error)); ok { + return rf(inputProof, aggregatorAddr) + } + if rf, ok := ret.Get(0).(func(string, string) *string); ok { + r0 = rf(inputProof, aggregatorAddr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(inputProof, aggregatorAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ID provides a mock function with given fields: +func (_m *ProverInterfaceMock) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// IsIdle provides a mock function with given fields: +func (_m *ProverInterfaceMock) IsIdle() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsIdle") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Name provides a mock function with given fields: +func (_m *ProverInterfaceMock) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// WaitFinalProof provides a mock function with given fields: ctx, proofID +func (_m *ProverInterfaceMock) WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) { + ret := _m.Called(ctx, proofID) + + if len(ret) == 0 { + panic("no return value specified for WaitFinalProof") + } + + var r0 *prover.FinalProof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*prover.FinalProof, error)); ok { + return rf(ctx, proofID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *prover.FinalProof); ok { + r0 = rf(ctx, proofID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*prover.FinalProof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, proofID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitRecursiveProof provides a mock function with given fields: ctx, proofID +func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, error) { + ret := _m.Called(ctx, proofID) + + if len(ret) == 0 { + panic("no return value specified for WaitRecursiveProof") + } + + var r0 string + var r1 common.Hash + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, common.Hash, error)); ok { + return rf(ctx, proofID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(ctx, proofID) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) common.Hash); ok { + r1 = rf(ctx, proofID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + r2 = rf(ctx, proofID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewProverInterfaceMock creates a new instance of ProverInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProverInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ProverInterfaceMock { + mock := &ProverInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go new file mode 100644 index 00000000..8879dd05 --- /dev/null +++ b/aggregator/mocks/mock_state.go @@ -0,0 +1,406 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + pgx "github.com/jackc/pgx/v4" + mock "github.com/stretchr/testify/mock" + + state "github.com/0xPolygon/cdk/state" +) + +// StateInterfaceMock is an autogenerated mock type for the StateInterface type +type StateInterfaceMock struct { + mock.Mock +} + +// AddBatch provides a mock function with given fields: ctx, dbBatch, dbTx +func (_m *StateInterfaceMock) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbBatch, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.DBBatch, pgx.Tx) error); ok { + r0 = rf(ctx, dbBatch, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddGeneratedProof provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddGeneratedProof") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddSequence provides a mock function with given fields: ctx, sequence, dbTx +func (_m *StateInterfaceMock) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { + ret := _m.Called(ctx, sequence, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddSequence") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, state.Sequence, pgx.Tx) error); ok { + r0 = rf(ctx, sequence, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BeginStateTransaction provides a mock function with given fields: ctx +func (_m *StateInterfaceMock) BeginStateTransaction(ctx context.Context) (pgx.Tx, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BeginStateTransaction") + } + + var r0 pgx.Tx + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Tx) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckProofContainsCompleteSequences provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CheckProofContainsCompleteSequences") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) (bool, error)); ok { + return rf(ctx, proof, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) bool); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r1 = rf(ctx, proof, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckProofExistsForBatch provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CheckProofExistsForBatch") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (bool, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) bool); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CleanupGeneratedProofs provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CleanupGeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CleanupLockedProofs provides a mock function with given fields: ctx, duration, dbTx +func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) { + ret := _m.Called(ctx, duration, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CleanupLockedProofs") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) (int64, error)); ok { + return rf(ctx, duration, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) int64); ok { + r0 = rf(ctx, duration, dbTx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, pgx.Tx) error); ok { + r1 = rf(ctx, duration, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteBatchesNewerThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteBatchesNewerThanBatchNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteBatchesOlderThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteBatchesOlderThanBatchNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteGeneratedProofs provides a mock function with given fields: ctx, batchNumber, batchNumberFinal, dbTx +func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, batchNumberFinal, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteGeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, batchNumberFinal, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteUngeneratedProofs provides a mock function with given fields: ctx, dbTx +func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteUngeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) error); ok { + r0 = rf(ctx, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetBatch provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.DBBatch, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetBatch") + } + + var r0 *state.DBBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.DBBatch, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.DBBatch); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.DBBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProofReadyToVerify provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx +func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) { + ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetProofReadyToVerify") + } + + var r0 *state.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Proof, error)); ok { + return rf(ctx, lastVerfiedBatchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Proof); ok { + r0 = rf(ctx, lastVerfiedBatchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, lastVerfiedBatchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProofsToAggregate provides a mock function with given fields: ctx, dbTx +func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetProofsToAggregate") + } + + var r0 *state.Proof + var r1 *state.Proof + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.Proof, *state.Proof, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.Proof); ok { + r0 = rf(ctx, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) *state.Proof); ok { + r1 = rf(ctx, dbTx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*state.Proof) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, pgx.Tx) error); ok { + r2 = rf(ctx, dbTx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UpdateGeneratedProof provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateGeneratedProof") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStateInterfaceMock creates a new instance of StateInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StateInterfaceMock { + mock := &StateInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_synchronizer.go b/aggregator/mocks/mock_synchronizer.go new file mode 100644 index 00000000..28811e8c --- /dev/null +++ b/aggregator/mocks/mock_synchronizer.go @@ -0,0 +1,321 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + synchronizer "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" +) + +// SynchronizerInterfaceMock is an autogenerated mock type for the Synchronizer type +type SynchronizerInterfaceMock struct { + mock.Mock +} + +// GetL1BlockByNumber provides a mock function with given fields: ctx, blockNumber +func (_m *SynchronizerInterfaceMock) GetL1BlockByNumber(ctx context.Context, blockNumber uint64) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockByNumber") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.L1Block, error)); ok { + return rf(ctx, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.L1Block); ok { + r0 = rf(ctx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetL1InfoRootPerIndex provides a mock function with given fields: ctx, L1InfoTreeIndex +func (_m *SynchronizerInterfaceMock) GetL1InfoRootPerIndex(ctx context.Context, L1InfoTreeIndex uint32) (common.Hash, error) { + ret := _m.Called(ctx, L1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoRootPerIndex") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (common.Hash, error)); ok { + return rf(ctx, L1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) common.Hash); ok { + r0 = rf(ctx, L1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, L1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetL1InfoTreeLeaves provides a mock function with given fields: ctx, indexLeaves +func (_m *SynchronizerInterfaceMock) GetL1InfoTreeLeaves(ctx context.Context, indexLeaves []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, indexLeaves) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeLeaves") + } + + var r0 map[uint32]synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, indexLeaves) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint32) map[uint32]synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, indexLeaves) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint32]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint32) error); ok { + r1 = rf(ctx, indexLeaves) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastL1Block provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastL1Block(ctx context.Context) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastL1Block") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*synchronizer.L1Block, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *synchronizer.L1Block); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastestVirtualBatchNumber provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastestVirtualBatchNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastestVirtualBatchNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot +func (_m *SynchronizerInterfaceMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash) ([]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, l1InfoRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLeafsByL1InfoRoot") + } + + var r0 []synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, l1InfoRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, l1InfoRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, l1InfoRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSequenceByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetSequenceByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.SequencedBatches, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetSequenceByBatchNumber") + } + + var r0 *synchronizer.SequencedBatches + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.SequencedBatches, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.SequencedBatches); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.SequencedBatches) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetVirtualBatchByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetVirtualBatchByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.VirtualBatch, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetVirtualBatchByBatchNumber") + } + + var r0 *synchronizer.VirtualBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.VirtualBatch, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.VirtualBatch); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.VirtualBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsSynced provides a mock function with given fields: +func (_m *SynchronizerInterfaceMock) IsSynced() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsSynced") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// SetCallbackOnReorgDone provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnReorgDone(callback func(synchronizer.ReorgExecutionResult)) { + _m.Called(callback) +} + +// SetCallbackOnRollbackBatches provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnRollbackBatches(callback func(synchronizer.RollbackBatchesData)) { + _m.Called(callback) +} + +// Stop provides a mock function with given fields: +func (_m *SynchronizerInterfaceMock) Stop() { + _m.Called() +} + +// Sync provides a mock function with given fields: returnOnSync +func (_m *SynchronizerInterfaceMock) Sync(returnOnSync bool) error { + ret := _m.Called(returnOnSync) + + if len(ret) == 0 { + panic("no return value specified for Sync") + } + + var r0 error + if rf, ok := ret.Get(0).(func(bool) error); ok { + r0 = rf(returnOnSync) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSynchronizerInterfaceMock creates a new instance of SynchronizerInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSynchronizerInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *SynchronizerInterfaceMock { + mock := &SynchronizerInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/profitabilitychecker.go b/aggregator/profitabilitychecker.go index f05799eb..dc91a21e 100644 --- a/aggregator/profitabilitychecker.go +++ b/aggregator/profitabilitychecker.go @@ -18,14 +18,14 @@ const ( // TxProfitabilityCheckerBase checks pol collateral with min reward type TxProfitabilityCheckerBase struct { - State stateInterface + State StateInterface IntervalAfterWhichBatchSentAnyway time.Duration MinReward *big.Int } // NewTxProfitabilityCheckerBase init base tx profitability checker func NewTxProfitabilityCheckerBase( - state stateInterface, interval time.Duration, minReward *big.Int, + state StateInterface, interval time.Duration, minReward *big.Int, ) *TxProfitabilityCheckerBase { return &TxProfitabilityCheckerBase{ State: state, @@ -50,12 +50,12 @@ func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polColla // TxProfitabilityCheckerAcceptAll validate batch anyway and don't check anything type TxProfitabilityCheckerAcceptAll struct { - State stateInterface + State StateInterface IntervalAfterWhichBatchSentAnyway time.Duration } // NewTxProfitabilityCheckerAcceptAll init tx profitability checker that accept all txs -func NewTxProfitabilityCheckerAcceptAll(state stateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { +func NewTxProfitabilityCheckerAcceptAll(state StateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { return &TxProfitabilityCheckerAcceptAll{ State: state, IntervalAfterWhichBatchSentAnyway: interval, @@ -77,7 +77,7 @@ func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, pol } // TODO: now it's impossible to check, when batch got consolidated, bcs it's not saved -// func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface, +// func isConsolidatedBatchAppeared(ctx context.Context, state StateInterface, // intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) { // batch, err := state.GetLastVerifiedBatch(ctx, nil) // if err != nil { diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index e79fba2e..e6a61c5e 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -160,7 +160,7 @@ func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) return s.processor.GetLastProcessedBlock(ctx) } -func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (tree.Root, error) { +func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (*tree.Root, error) { return s.processor.exitTree.GetRootByHash(ctx, root) } @@ -172,10 +172,7 @@ func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) return s.processor.GetBridges(ctx, fromBlock, toBlock) } -// GetProof retrieves the Merkle proof for the given deposit count and exit root. -func (s *BridgeSync) GetProof( - ctx context.Context, depositCount uint32, localExitRoot common.Hash, -) ([32]common.Hash, error) { +func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) { return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) } @@ -186,3 +183,11 @@ func (p *processor) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, } return root.BlockNum, nil } + +func (s *BridgeSync) GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) { + root, err := s.processor.exitTree.GetRootByHash(ctx, ler) + if err != nil { + return root, err + } + return root, nil +} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go index 1319835b..b8b432ae 100644 --- a/bridgesync/claimcalldata_test.go +++ b/bridgesync/claimcalldata_test.go @@ -10,6 +10,8 @@ import ( "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/claimmock" "github.com/0xPolygon/cdk/test/contracts/claimmockcaller" + "github.com/0xPolygon/cdk/test/contracts/claimmocktest" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -48,15 +50,17 @@ func TestClaimCalldata(t *testing.T) { // Deploy contracts bridgeAddr, _, bridgeContract, err := claimmock.DeployClaimmock(auth, client) require.NoError(t, err) - _, _, claimCaller, err := claimmockcaller.DeployClaimmockcaller(auth, client, bridgeAddr) + claimCallerAddr, _, claimCaller, err := claimmockcaller.DeployClaimmockcaller(auth, client, bridgeAddr) + require.NoError(t, err) + _, _, claimTest, err := claimmocktest.DeployClaimmocktest(auth, client, bridgeAddr, claimCallerAddr) require.NoError(t, err) proofLocal := [32][32]byte{} - proofLocalH := [32]common.Hash{} + proofLocalH := tree.Proof{} proofLocal[5] = common.HexToHash("beef") proofLocalH[5] = common.HexToHash("beef") proofRollup := [32][32]byte{} - proofRollupH := [32]common.Hash{} + proofRollupH := tree.Proof{} proofRollup[4] = common.HexToHash("a1fa") proofRollupH[4] = common.HexToHash("a1fa") expectedClaim := Claim{ @@ -83,6 +87,18 @@ func TestClaimCalldata(t *testing.T) { DestinationNetwork: 0, Metadata: []byte{}, } + expectedClaim3 := Claim{ + OriginNetwork: 69, + OriginAddress: common.HexToAddress("ffaaffaa"), + DestinationAddress: common.HexToAddress("2233445566"), + Amount: big.NewInt(5), + MainnetExitRoot: common.HexToHash("5ca1e"), + RollupExitRoot: common.HexToHash("dead"), + ProofLocalExitRoot: proofLocalH, + ProofRollupExitRoot: proofRollupH, + DestinationNetwork: 0, + Metadata: []byte{}, + } auth.GasLimit = 999999 // for some reason gas estimation fails :( abi, err := claimmock.ClaimmockMetaData.GetAbi() @@ -940,6 +956,937 @@ func TestClaimCalldata(t *testing.T) { expectedClaim: expectedClaim2, }) + // indirect + indirect call claim message bytes + expectedClaim.GlobalIndex = big.NewInt(426) + expectedClaim.IsMessage = true + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.ClaimTestInternal( + auth, + expectedClaimBytes, + false, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "indirect + indirect call to claim message bytes", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + reverted = [2]bool{false, false} + + // 2 indirect + indirect call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim2TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + reverted, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 indirect + indirect call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "2 indirect + indirect call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted3 := [3]bool{false, false, false} + + // 3 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err := abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 3 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[2], + expectedClaim: expectedClaim3, + }) + + // 3 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "3 ok (indirectx2, indirect, indirectx2) call claim message 3 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[2], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{true, false, false} + + // 1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, true, false} + + // 1 ok 1 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, false, true} + + // 1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (diff global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(428) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(429) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted3 = [3]bool{true, false, false} + + // 1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, true, false} + + // 1 ok 1 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, false, true} + + // 1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + testCases = append(testCases, testCase{ + description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[1], + expectedClaim: expectedClaim2, + }) + + reverted3 = [3]bool{true, true, false} + + // 2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim3, + }) + + reverted3 = [3]bool{false, true, true} + + // 1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim, + }) + + reverted3 = [3]bool{true, false, true} + + // 1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (same global index) + expectedClaim.IsMessage = true + expectedClaim.GlobalIndex = big.NewInt(427) + expectedClaim2.IsMessage = true + expectedClaim2.GlobalIndex = big.NewInt(427) + expectedClaim3.IsMessage = true + expectedClaim3.GlobalIndex = big.NewInt(427) + expectedClaimBytes, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim.GlobalIndex, + expectedClaim.MainnetExitRoot, + expectedClaim.RollupExitRoot, + expectedClaim.OriginNetwork, + expectedClaim.OriginAddress, + expectedClaim.DestinationNetwork, + expectedClaim.DestinationAddress, + expectedClaim.Amount, + expectedClaim.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes2, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim2.GlobalIndex, + expectedClaim2.MainnetExitRoot, + expectedClaim2.RollupExitRoot, + expectedClaim2.OriginNetwork, + expectedClaim2.OriginAddress, + expectedClaim2.DestinationNetwork, + expectedClaim2.DestinationAddress, + expectedClaim2.Amount, + expectedClaim2.Metadata, + ) + require.NoError(t, err) + expectedClaimBytes3, err = abi.Pack( + "claimMessage", + proofLocal, + proofRollup, + expectedClaim3.GlobalIndex, + expectedClaim3.MainnetExitRoot, + expectedClaim3.RollupExitRoot, + expectedClaim3.OriginNetwork, + expectedClaim3.OriginAddress, + expectedClaim3.DestinationNetwork, + expectedClaim3.DestinationAddress, + expectedClaim3.Amount, + expectedClaim3.Metadata, + ) + require.NoError(t, err) + tx, err = claimTest.Claim3TestInternal( + auth, + expectedClaimBytes, + expectedClaimBytes2, + expectedClaimBytes3, + reverted3, + ) + require.NoError(t, err) + time.Sleep(1 * time.Second) + r, err = client.TransactionReceipt(ctx, tx.Hash()) + testCases = append(testCases, testCase{ + description: "1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", + bridgeAddr: bridgeAddr, + log: *r.Logs[0], + expectedClaim: expectedClaim2, + }) + for _, tc := range testCases { log.Info(tc.description) t.Run(tc.description, func(t *testing.T) { diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go index b34267ce..dbea8c8f 100644 --- a/bridgesync/downloader.go +++ b/bridgesync/downloader.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge" "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" rpcTypes "github.com/0xPolygon/cdk-rpc/types" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/sync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum" @@ -150,8 +151,6 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H } // find the claim linked to the event using DFS - // TODO: take into account potential reverts that may be found on the path, - // and other edge cases callStack := stack.New() callStack.Push(*c) for { @@ -181,7 +180,7 @@ func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.H callStack.Push(c) } } - return ErrNotFound + return db.ErrNotFound } func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) { diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index a19afb8d..c0a22484 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -8,60 +8,28 @@ import ( "testing" "time" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/require" ) -func newSimulatedClient(t *testing.T, auth *bind.TransactOpts) ( - client *simulated.Backend, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, -) { - t.Helper() - - var err error - balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeAddr, _, bridgeContract, err = polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(auth, client.Client()) - require.NoError(t, err) - client.Commit() - - return -} - func TestBridgeEventE2E(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, bridgeAddr, bridgeSc := newSimulatedClient(t, auth) + + client, setup := helpers.SimulatedBackend(t, nil, 0) rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg}) require.NoError(t, err) go rd.Start(ctx) //nolint:errcheck testClient := helpers.TestClient{ClientRenamed: client.Client()} - syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) + syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) require.NoError(t, err) go syncer.Start(ctx) @@ -71,15 +39,15 @@ func TestBridgeEventE2E(t *testing.T) { for i := 0; i < 100; i++ { bridge := bridgesync.Bridge{ - BlockNum: uint64(2 + i), + BlockNum: uint64(4 + i), Amount: big.NewInt(0), DepositCount: uint32(i), DestinationNetwork: 3, DestinationAddress: common.HexToAddress("f00"), Metadata: []byte{}, } - tx, err := bridgeSc.BridgeAsset( - auth, + tx, err := setup.EBZkevmBridgeContract.BridgeAsset( + setup.UserAuth, bridge.DestinationNetwork, bridge.DestinationAddress, bridge.Amount, diff --git a/bridgesync/mock_l2_test.go b/bridgesync/mock_l2_test.go index a8f33ef8..ef842d18 100644 --- a/bridgesync/mock_l2_test.go +++ b/bridgesync/mock_l2_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package bridgesync @@ -12,6 +12,8 @@ import ( mock "github.com/stretchr/testify/mock" + rpc "github.com/ethereum/go-ethereum/rpc" + types "github.com/ethereum/go-ethereum/core/types" ) @@ -138,6 +140,26 @@ func (_m *L2Mock) CallContract(ctx context.Context, call ethereum.CallMsg, block return r0, r1 } +// Client provides a mock function with given fields: +func (_m *L2Mock) Client() *rpc.Client { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Client") + } + + var r0 *rpc.Client + if rf, ok := ret.Get(0).(func() *rpc.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*rpc.Client) + } + } + + return r0 +} + // CodeAt provides a mock function with given fields: ctx, contract, blockNumber func (_m *L2Mock) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, contract, blockNumber) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 47b26595..e4ba5423 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -23,7 +23,6 @@ import ( var ( // ErrBlockNotProcessed indicates that the given block(s) have not been processed yet. ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNotFound = errors.New("not found") ) // Bridge is the representation of a bridge event @@ -184,7 +183,7 @@ func (p *processor) queryBlockRange(tx db.Querier, fromBlock, toBlock uint64, ta `, table), fromBlock, toBlock) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return nil, ErrNotFound + return nil, db.ErrNotFound } return nil, err } diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index fbcdca73..c9df6561 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -9,8 +9,10 @@ import ( "time" dbCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" + tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" @@ -31,14 +33,13 @@ const ( var ( ErrInvalidClaim = errors.New("invalid claim") - ErrNotFound = errors.New("not found") ) // Claim representation of a claim event type Claim struct { LeafType uint8 - ProofLocalExitRoot [32]common.Hash - ProofRollupExitRoot [32]common.Hash + ProofLocalExitRoot tree.Proof + ProofRollupExitRoot tree.Proof GlobalIndex *big.Int MainnetExitRoot common.Hash RollupExitRoot common.Hash @@ -131,7 +132,7 @@ func (c *ClaimSponsor) Start(ctx context.Context) { if err2 != nil { err = err2 tx.Rollback() - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { c.logger.Debugf("queue is empty") err = nil time.Sleep(c.waitOnEmptyQueue) @@ -242,7 +243,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error } _, err = getClaim(tx, claim.GlobalIndex) - if !errors.Is(err, ErrNotFound) { + if !errors.Is(err, db.ErrNotFound) { if err != nil { tx.Rollback() @@ -264,7 +265,7 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error var queuePosition uint64 lastQueuePosition, _, err := getLastQueueIndex(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): queuePosition = 0 case err != nil: @@ -307,7 +308,7 @@ func (c *ClaimSponsor) getClaimByQueueIndex(ctx context.Context, queueIndex uint return nil, err } if globalIndexBytes == nil { - return nil, ErrNotFound + return nil, db.ErrNotFound } return getClaim(tx, new(big.Int).SetBytes(globalIndexBytes)) @@ -345,7 +346,7 @@ func getIndex(iter iter.KV) (uint64, *big.Int, error) { return 0, nil, err } if k == nil { - return 0, nil, ErrNotFound + return 0, nil, db.ErrNotFound } globalIndex := new(big.Int).SetBytes(v) @@ -368,7 +369,7 @@ func getClaim(tx kv.Tx, globalIndex *big.Int) (*Claim, error) { return nil, err } if claimBytes == nil { - return nil, ErrNotFound + return nil, db.ErrNotFound } claim := &Claim{} err = json.Unmarshal(claimBytes, claim) diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 8a037a58..b4fce499 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -13,6 +13,7 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -22,7 +23,7 @@ import ( func TestE2EL1toEVML2(t *testing.T) { // start other needed components ctx := context.Background() - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0) diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go index 540f3203..12d0c4ca 100644 --- a/claimsponsor/evmclaimsponsor.go +++ b/claimsponsor/evmclaimsponsor.go @@ -9,7 +9,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridgev2" configTypes "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -33,10 +34,9 @@ type EthClienter interface { type EthTxManager interface { Remove(ctx context.Context, id common.Hash) error - ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus, - ) ([]ethtxmanager.MonitoredTxResult, error) - Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) - Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, + ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) + Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) } @@ -152,7 +152,7 @@ func (c *EVMClaimSponsor) sendClaim(ctx context.Context, claim *Claim) (string, if err != nil { return "", err } - id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, nil, big.NewInt(0), data, c.gasOffest, nil) + id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, big.NewInt(0), data, c.gasOffest, nil) if err != nil { return "", err } @@ -166,14 +166,14 @@ func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStat return "", err } switch res.Status { - case ethtxmanager.MonitoredTxStatusCreated, - ethtxmanager.MonitoredTxStatusSent: + case ethtxtypes.MonitoredTxStatusCreated, + ethtxtypes.MonitoredTxStatusSent: return WIPStatus, nil - case ethtxmanager.MonitoredTxStatusFailed: + case ethtxtypes.MonitoredTxStatusFailed: return FailedClaimStatus, nil - case ethtxmanager.MonitoredTxStatusMined, - ethtxmanager.MonitoredTxStatusSafe, - ethtxmanager.MonitoredTxStatusFinalized: + case ethtxtypes.MonitoredTxStatusMined, + ethtxtypes.MonitoredTxStatusSafe, + ethtxtypes.MonitoredTxStatusFinalized: return SuccessClaimStatus, nil default: return "", fmt.Errorf("unexpected tx status: %v", res.Status) diff --git a/cmd/run.go b/cmd/run.go index 773c5e24..b113c06e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -25,7 +25,6 @@ import ( "github.com/0xPolygon/cdk/etherman" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" @@ -36,10 +35,10 @@ import ( "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/pgstatestorage" "github.com/0xPolygon/cdk/translator" - ethtxman "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman/etherscan" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" + ethtxman "github.com/0xPolygon/zkevm-ethtx-manager/etherman" + "github.com/0xPolygon/zkevm-ethtx-manager/etherman/etherscan" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" "github.com/ethereum/go-ethereum/ethclient" "github.com/jackc/pgx/v4/pgxpool" "github.com/urfave/cli/v2" @@ -81,10 +80,6 @@ func start(cliCtx *cli.Context) error { claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, c.ClaimSponsor) l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, c.BridgeL1Sync, reorgDetectorL1, l1Client) l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, c.BridgeL2Sync, reorgDetectorL2, l2Client) - l1Bridge2InfoIndexSync := runL1Bridge2InfoIndexSyncIfNeeded( - cliCtx.Context, components, c.L1Bridge2InfoIndexSync, - l1BridgeSync, l1InfoTreeSync, l1Client, - ) lastGERSync := runLastGERSyncIfNeeded( cliCtx.Context, components, c.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, ) @@ -115,7 +110,6 @@ func start(cliCtx *cli.Context) error { c.Common.NetworkID, claimSponsor, l1InfoTreeSync, - l1Bridge2InfoIndexSync, lastGERSync, l1BridgeSync, l2BridgeSync, @@ -185,6 +179,12 @@ func createSequenceSender( l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, ) *sequencesender.SequenceSender { logger := log.WithFields("module", cdkcommon.SEQUENCE_SENDER) + + // Check config + if cfg.SequenceSender.RPCURL == "" { + logger.Fatal("Required field RPCURL is empty in sequence sender config") + } + ethman, err := etherman.NewClient(ethermanconfig.Config{ EthermanConfig: ethtxman.Config{ URL: cfg.SequenceSender.EthTxManager.Etherman.URL, @@ -206,9 +206,9 @@ func createSequenceSender( logger.Fatal(err) } cfg.SequenceSender.SenderAddress = auth.From - blockFialityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality) + blockFinalityType := etherman.BlockNumberFinality(cfg.SequenceSender.BlockFinality) - blockFinality, err := blockFialityType.ToBlockNum() + blockFinality, err := blockFinalityType.ToBlockNum() if err != nil { logger.Fatalf("Failed to create block finality. Err: %w, ", err) } @@ -498,6 +498,7 @@ func runL1InfoTreeSyncerIfNeeded( cfg.L1InfoTreeSync.InitialBlock, cfg.L1InfoTreeSync.RetryAfterErrorPeriod.Duration, cfg.L1InfoTreeSync.MaxRetryAttemptsAfterError, + l1infotreesync.FlagNone, ) if err != nil { log.Fatal(err) @@ -517,7 +518,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client log.Debugf("dialing L1 client at: %s", urlRPCL1) l1CLient, err := ethclient.Dial(urlRPCL1) if err != nil { - log.Fatal(err) + log.Fatalf("failed to create client for L1 using URL: %s. Err:%v", urlRPCL1, err) } return l1CLient @@ -623,34 +624,6 @@ func runClaimSponsorIfNeeded( return cs } -func runL1Bridge2InfoIndexSyncIfNeeded( - ctx context.Context, - components []string, - cfg l1bridge2infoindexsync.Config, - l1BridgeSync *bridgesync.BridgeSync, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l1Client *ethclient.Client, -) *l1bridge2infoindexsync.L1Bridge2InfoIndexSync { - if !isNeeded([]string{cdkcommon.RPC}, components) { - return nil - } - l1Bridge2InfoIndexSync, err := l1bridge2infoindexsync.New( - cfg.DBPath, - l1BridgeSync, - l1InfoTreeSync, - l1Client, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - cfg.WaitForSyncersPeriod.Duration, - ) - if err != nil { - log.Fatalf("error creating l1Bridge2InfoIndexSync: %s", err) - } - go l1Bridge2InfoIndexSync.Start(ctx) - - return l1Bridge2InfoIndexSync -} - func runLastGERSyncIfNeeded( ctx context.Context, components []string, @@ -751,7 +724,6 @@ func createRPC( cdkNetworkID uint32, sponsor *claimsponsor.ClaimSponsor, l1InfoTree *l1infotreesync.L1InfoTreeSync, - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync, injectedGERs *lastgersync.LastGERSync, bridgeL1 *bridgesync.BridgeSync, bridgeL2 *bridgesync.BridgeSync, @@ -767,7 +739,6 @@ func createRPC( cdkNetworkID, sponsor, l1InfoTree, - l1Bridge2Index, injectedGERs, bridgeL1, bridgeL2, diff --git a/config/config.go b/config/config.go index cb899df8..720a11e4 100644 --- a/config/config.go +++ b/config/config.go @@ -3,7 +3,6 @@ package config import ( "bytes" "errors" - "fmt" "path/filepath" "strings" @@ -14,13 +13,12 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/sequencesender" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" "github.com/mitchellh/mapstructure" "github.com/spf13/viper" "github.com/urfave/cli/v2" @@ -52,6 +50,33 @@ const ( FlagOutputFile = "output" // FlagMaxAmount is the flag to avoid to use the flag FlagAmount FlagMaxAmount = "max-amount" + + deprecatedFieldSyncDB = "Aggregator.Synchronizer.DB is deprecated. Use Aggregator.Synchronizer.SQLDB instead." + + deprecatedFieldPersistenceFilename = "EthTxManager.PersistenceFilename is deprecated." + + " Use EthTxManager.StoragePath instead." +) + +type ForbiddenField struct { + FieldName string + Reason string +} + +var ( + forbiddenFieldsOnConfig = []ForbiddenField{ + { + FieldName: "aggregator.synchronizer.db.", + Reason: deprecatedFieldSyncDB, + }, + { + FieldName: "sequencesender.ethtxmanager.persistencefilename", + Reason: deprecatedFieldPersistenceFilename, + }, + { + FieldName: "aggregator.ethtxmanager.persistencefilename", + Reason: deprecatedFieldPersistenceFilename, + }, + } ) /* @@ -96,10 +121,6 @@ type Config struct { // ClaimSponsor is the config for the claim sponsor ClaimSponsor claimsponsor.EVMClaimSponsorConfig - // L1Bridge2InfoIndexSync is the config for the synchronizers that maintains the relation of - // bridge from L1 --> L1 Info tree index. Needed for the bridge service (RPC) - L1Bridge2InfoIndexSync l1bridge2infoindexsync.Config - // BridgeL1Sync is the configuration for the synchronizer of the bridge of the L1 BridgeL1Sync bridgesync.Config @@ -128,15 +149,18 @@ func Default() (*Config, error) { return &cfg, nil } +func Load(ctx *cli.Context) (*Config, error) { + configFilePath := ctx.String(FlagCfg) + return LoadFile(configFilePath) +} // Load loads the configuration -func Load(ctx *cli.Context) (*Config, error) { +func LoadFile(configFilePath string) (*Config, error) { cfg, err := Default() if err != nil { return nil, err } - - configFilePath := ctx.String(FlagCfg) + expectedKeys := viper.AllKeys() if configFilePath != "" { dirName, fileName := filepath.Split(configFilePath) @@ -160,7 +184,6 @@ func Load(ctx *cli.Context) (*Config, error) { log.Error("config file not found") } else { log.Errorf("error reading config file: ", err) - return nil, err } } @@ -179,8 +202,45 @@ func Load(ctx *cli.Context) (*Config, error) { if err != nil { return nil, err } + if expectedKeys != nil { + configKeys := viper.AllKeys() + unexpectedFields := getUnexpectedFields(configKeys, expectedKeys) + for _, field := range unexpectedFields { + forbbidenInfo := getForbiddenField(field) + if forbbidenInfo != nil { + log.Warnf("forbidden field %s in config file: %s", field, forbbidenInfo.Reason) + } else { + log.Debugf("field %s in config file doesnt have a default value", field) + } + } + } + return cfg, nil +} + +func getForbiddenField(fieldName string) *ForbiddenField { + for _, forbiddenField := range forbiddenFieldsOnConfig { + if forbiddenField.FieldName == fieldName || strings.HasPrefix(fieldName, forbiddenField.FieldName) { + return &forbiddenField + } + } + return nil +} - fmt.Println("cfg", cfg.NetworkConfig.L1Config) +func getUnexpectedFields(keysOnFile, expectedConfigKeys []string) []string { + wrongFields := make([]string, 0) + for _, key := range keysOnFile { + if !contains(expectedConfigKeys, key) { + wrongFields = append(wrongFields, key) + } + } + return wrongFields +} - return cfg, nil +func contains(keys []string, key string) bool { + for _, k := range keys { + if k == key { + return true + } + } + return false } diff --git a/config/config_test.go b/config/config_test.go new file mode 100644 index 00000000..a0844d96 --- /dev/null +++ b/config/config_test.go @@ -0,0 +1,66 @@ +package config + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadDeafaultConfig(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(DefaultValues)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +const configWithUnexpectedFields = ` +[UnknownField] +Field = "value" +` + +func TestLoadConfigWithUnexpectedFields(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(configWithUnexpectedFields)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) +} + +func TestLoadConfigWithForbiddenFields(t *testing.T) { + cases := []struct { + name string + input string + }{ + { + name: "[Aggregator.Synchronizer] DB", + input: `[aggregator.synchronizer.db] + name = "value"`, + }, + { + name: "[SequenceSender.EthTxManager] PersistenceFilename", + input: `[SequenceSender.EthTxManager] + PersistenceFilename = "foo.json"`, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + tmpFile, err := os.CreateTemp("", "ut_config") + require.NoError(t, err) + defer os.Remove(tmpFile.Name()) + _, err = tmpFile.Write([]byte(c.input)) + require.NoError(t, err) + cfg, err := LoadFile(tmpFile.Name()) + require.NoError(t, err) + require.NotNil(t, cfg) + }) + } +} diff --git a/config/default.go b/config/default.go index ce76abc4..74eec57d 100644 --- a/config/default.go +++ b/config/default.go @@ -5,6 +5,18 @@ const DefaultValues = ` ForkUpgradeBatchNumber = 0 ForkUpgradeNewForkId = 0 +[Etherman] + URL="http://localhost:8545" + ForkIDChunkSize=100 + [Etherman.EthermanConfig] + URL="http://localhost:8545" + MultiGasProvider=false + L1ChainID=1337 + HTTPHeaders=[] + [Etherman.EthermanConfig.Etherscan] + ApiKey="" + Url="https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=" + [Common] NetworkID = 1 IsValidiumMode = false @@ -28,8 +40,8 @@ WaitPeriodPurgeTxFile = "15m" MaxPendingTx = 1 MaxBatchesForL1 = 300 BlockFinality = "FinalizedBlock" - [SequenceSender.StreamClient] - Server = "127.0.0.1:6900" +RPCURL = "localhost:8123" +GetBatchWaitInterval = "10s" [SequenceSender.EthTxManager] FrequencyToMonitorTxs = "1s" WaitTxToBeMined = "2m" @@ -41,7 +53,7 @@ BlockFinality = "FinalizedBlock" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" + StoragePath = "ethtxmanager.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -97,7 +109,7 @@ SequencerPrivateKey = {} ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "" + StoragePath = "" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -106,14 +118,13 @@ SequencerPrivateKey = {} L1ChainID = 11155111 HTTPHeaders = [] [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 + [Aggregator.Synchronizer.Log] + Environment = "development" # "production" or "development" + Level = "info" + Outputs = ["stderr"] + [Aggregator.Synchronizer.SQLDB] + DriverName = "sqlite3" + DataSourceName = "file:/tmp/aggregator_sync_db.sqlite" [Aggregator.Synchronizer.Synchronizer] SyncInterval = "10s" SyncChunkSize = 1000 @@ -122,9 +133,19 @@ SequencerPrivateKey = {} BlockFinality = "finalized" OverrideStorageCheck = false [Aggregator.Synchronizer.Etherman] + L1URL = "http://localhost:8545" + ForkIDChunkSize = 100 + L1ChainID = 0 [Aggregator.Synchronizer.Etherman.Validium] Enabled = false - + TrustedSequencerURL = "" + RetryOnDACErrorInterval = "1m" + DataSourcePriority = ["trusted", "external"] + [Aggregator.Synchronizer.Etherman.Validium.Translator] + FullMatchRules = [] + [Aggregator.Synchronizer.Etherman.Validium.RateLimit] + NumRequests = 900 + Interval = "1s" [ReorgDetectorL1] DBPath = "/tmp/reorgdetectorl1" @@ -132,7 +153,7 @@ DBPath = "/tmp/reorgdetectorl1" DBPath = "/tmp/reorgdetectorl2" [L1InfoTreeSync] -DBPath = "/tmp/L1InfoTreeSync" +DBPath = "/tmp/L1InfoTreeSync.sqlite" GlobalExitRootAddr="0x8464135c8F25Da09e49BC8782676a84730C318bC" SyncBlockChunkSize=10 BlockFinality="LatestBlock" @@ -163,7 +184,7 @@ WaitPeriodNextGER="100ms" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "/tmp/ethtxmanager-sequencesender.json" + StoragePath = "/tmp/ethtxmanager-sequencesender.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -202,7 +223,7 @@ GasOffset = 0 ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "/tmp/ethtxmanager-claimsopnsor.json" + StoragePath = "/tmp/ethtxmanager-claimsponsor.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -212,12 +233,6 @@ GasOffset = 0 L1ChainID = 1337 HTTPHeaders = [] -[L1Bridge2InfoIndexSync] -DBPath = "/tmp/l1bridge2infoindexsync" -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForSyncersPeriod = "3s" - [BridgeL1Sync] DBPath = "/tmp/bridgel1sync" BlockFinality = "LatestBlock" @@ -247,4 +262,12 @@ RetryAfterErrorPeriod = "1s" MaxRetryAttemptsAfterError = -1 WaitForNewBlocksPeriod = "1s" DownloadBufferSize = 100 + +[NetworkConfig.L1] +L1ChainID = 0 +PolAddr = "0x0000000000000000000000000000000000000000" +ZkEVMAddr = "0x0000000000000000000000000000000000000000" +RollupManagerAddr = "0x0000000000000000000000000000000000000000" +GlobalExitRootManagerAddr = "0x0000000000000000000000000000000000000000" + ` diff --git a/config/example-config.toml b/config/example-config.toml deleted file mode 100644 index e7207861..00000000 --- a/config/example-config.toml +++ /dev/null @@ -1,122 +0,0 @@ -ForkUpgradeBatchNumber = 0 -ForkUpgradeNewForkId = 0 - -[Log] -Environment = "development" # "production" or "development" -Level = "info" -Outputs = ["stderr"] - -[SequenceSender] -IsValidiumMode = false -WaitPeriodSendSequence = "15s" -LastBatchVirtualizationTimeMaxWaitPeriod = "10s" -L1BlockTimestampMargin = "30s" -MaxTxSizeForL1 = 131072 -L2Coinbase = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" -PrivateKey = {Path = "./test/sequencer.keystore", Password = "testonly"} -SequencesTxFileName = "sequencesender.json" -GasOffset = 80000 -WaitPeriodPurgeTxFile = "15m" -MaxPendingTx = 1 - [SequenceSender.StreamClient] - Server = "127.0.0.1:6900" - [SequenceSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "./test/sequencer.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [SequenceSender.EthTxManager.Etherman] - URL = "http://127.0.0.1:32771" - MultiGasProvider = false - L1ChainID = 1337 -[Aggregator] -Host = "0.0.0.0" -Port = 50081 -RetryTime = "5s" -VerifyProofInterval = "10s" -TxProfitabilityCheckerType = "acceptall" -TxProfitabilityMinReward = "1.1" -ProofStatePollingInterval = "5s" -SenderAddress = "" -CleanupLockedProofsInterval = "2m" -GeneratingProofCleanupThreshold = "10m" -ForkId = 9 -GasOffset = 0 -WitnessURL = "localhost:8123" -UseL1BatchData = true -UseFullWitness = false -SettlementBackend = "l1" -AggLayerTxTimeout = "5m" -AggLayerURL = "" -SequencerPrivateKey = {} - [Aggregator.DB] - Name = "aggregator_db" - User = "aggregator_user" - Password = "master_password" - Host = "localhost" - Port = "32780" - EnableLog = false - MaxConns = 200 - [Aggregator.Log] - Environment = "development" # "production" or "development" - Level = "info" - Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "localhost:6900" - [Aggregator.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/pk/aggregator.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [Aggregator.EthTxManager.Etherman] - URL = "" - L1ChainID = 11155111 - HTTPHeaders = [] - [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 - [Aggregator.Synchronizer.Synchronizer] - SyncInterval = "10s" - SyncChunkSize = 1000 - GenesisBlockNumber = 5511080 - SyncUpToBlock = "finalized" - BlockFinality = "finalized" - OverrideStorageCheck = false - [Aggregator.Synchronizer.Etherman] - [Aggregator.Synchronizer.Etherman.Validium] - Enabled = false - -[RPC] - -[NetworkConfig.L1] -ChainID = 11155111 -PolAddr = "0xEdE9cf798E0fE25D35469493f43E88FeA4a5da0E" -ZkEVMAddr = "0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" -RollupManagerAddr = "0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" -GlobalExitRootManagerAddr = "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674" diff --git a/crates/cdk-config/src/aggregator.rs b/crates/cdk-config/src/aggregator.rs new file mode 100644 index 00000000..2e059a2f --- /dev/null +++ b/crates/cdk-config/src/aggregator.rs @@ -0,0 +1,149 @@ +use ethers::types::Address; +use serde::Deserialize; +use url::Url; + +/// The StreamClient configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct StreamClient { + #[serde(rename = "Server", default)] + pub server: String, +} + +impl Default for StreamClient { + fn default() -> Self { + Self { + server: "localhost:9092".to_string(), + } + } +} + +#[derive(Deserialize, Debug, Clone)] +pub struct EthTxManager { + #[serde(rename = "Etherman")] + pub etherman: Etherman, +} + +impl Default for EthTxManager { + fn default() -> Self { + Self { + etherman: Etherman::default(), + } + } +} + +#[derive(Deserialize, Debug, Clone)] +pub struct Etherman { + #[serde(rename = "URL", default)] + pub url: String, +} + +impl Default for Etherman { + fn default() -> Self { + Self { + url: "http://localhost:8545".to_string(), + } + } +} + +/// The Aggregator configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct Aggregator { + #[serde(rename = "ChainID", default)] + pub chain_id: String, + #[serde(rename = "Host", default)] + pub host: String, + #[serde(rename = "Port", default)] + pub port: String, + #[serde(rename = "RetryTime", default)] + pub retry_time: String, + #[serde(rename = "VerifyProofInterval", default)] + pub verify_proof_interval: String, + #[serde(rename = "ProofStatePollingInterval", default)] + pub proof_state_polling_interval: String, + #[serde(rename = "TxProfitabilityCheckerType", default)] + pub tx_profitability_checker_type: String, + #[serde(rename = "TxProfitabilityMinReward", default)] + pub tx_profitability_min_reward: String, + #[serde(rename = "IntervalAfterWhichBatchConsolidateAnyway", default)] + pub interval_after_which_batch_consolidate_anyway: String, + #[serde(rename = "ForkId", default)] + pub fork_id: u64, + #[serde(rename = "CleanupLockedProofsInterval", default)] + pub cleanup_locked_proofs_interval: String, + #[serde(rename = "GeneratingProofCleanupThreshold", default)] + pub generating_proof_cleanup_threshold: String, + #[serde(rename = "GasOffset", default)] + pub gas_offset: u64, + #[serde(rename = "WitnessURL", default = "default_url")] + pub witness_url: Url, + #[serde(rename = "SenderAddress", default = "default_address")] + pub sender_address: Address, + #[serde(rename = "SettlementBackend", default)] + pub settlement_backend: String, + #[serde(rename = "AggLayerTxTimeout", default)] + pub agg_layer_tx_timeout: String, + #[serde(rename = "AggLayerURL", default = "default_url")] + pub agg_layer_url: Url, + #[serde(rename = "UseL1BatchData", default)] + pub use_l1_batch_data: bool, + #[serde(rename = "UseFullWitness", default)] + pub use_full_witness: bool, + #[serde(rename = "MaxWitnessRetrievalWorkers", default)] + pub max_witness_retrieval_workers: u32, + #[serde(rename = "SyncModeOnlyEnabled", default)] + pub sync_mode_only_enabled: bool, + + #[serde(rename = "StreamClient", default)] + pub stream_client: StreamClient, + + #[serde(rename = "EthTxManager", default)] + pub eth_tx_manager: EthTxManager, +} + +fn default_url() -> Url { + Url::parse("http://localhost:8546").unwrap() +} + +fn default_address() -> Address { + "0x0000000000000000000000000000000000000000" + .parse() + .unwrap() +} + +impl Default for Aggregator { + fn default() -> Self { + // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 + Self { + chain_id: "1".to_string(), + host: "localhost".to_string(), + port: "8545".to_string(), + retry_time: "10s".to_string(), + verify_proof_interval: "1m".to_string(), + proof_state_polling_interval: "10s".to_string(), + tx_profitability_checker_type: "default".to_string(), + tx_profitability_min_reward: "0.1".to_string(), + interval_after_which_batch_consolidate_anyway: "5m".to_string(), + fork_id: 0, + cleanup_locked_proofs_interval: "1h".to_string(), + generating_proof_cleanup_threshold: "10m".to_string(), + gas_offset: 0, + witness_url: default_url(), + sender_address: default_address(), + settlement_backend: "default".to_string(), + agg_layer_tx_timeout: "30s".to_string(), + agg_layer_url: Url::parse("http://localhost:8547").unwrap(), + use_l1_batch_data: true, + use_full_witness: false, + max_witness_retrieval_workers: 4, + sync_mode_only_enabled: false, + stream_client: StreamClient { + server: "localhost:9092".to_string(), + }, + eth_tx_manager: EthTxManager { + etherman: Etherman { + url: "http://localhost:9093".to_string(), + }, + }, + } + } +} diff --git a/crates/cdk-config/src/l1.rs b/crates/cdk-config/src/l1.rs new file mode 100644 index 00000000..4c339b98 --- /dev/null +++ b/crates/cdk-config/src/l1.rs @@ -0,0 +1,38 @@ +use ethers::types::Address; +use serde::Deserialize; + +/// The L1 configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct L1 { + #[serde(rename = "L1ChainID", alias = "ChainID", default)] + pub l1_chain_id: String, + #[serde(rename = "PolAddr", default)] + pub pol_addr: Address, + #[serde(rename = "ZkEVMAddr", default)] + pub zk_evm_addr: Address, + #[serde(rename = "RollupManagerAddr", default)] + pub rollup_manager_addr: Address, + #[serde(rename = "GlobalExitRootManagerAddr", default)] + pub global_exit_root_manager_addr: Address, +} + +impl Default for L1 { + fn default() -> Self { + // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 + Self { + l1_chain_id: "1337".to_string(), + pol_addr: "0x5b06837A43bdC3dD9F114558DAf4B26ed49842Ed" + .parse() + .unwrap(), + zk_evm_addr: "0x2F50ef6b8e8Ee4E579B17619A92dE3E2ffbD8AD2" + .parse() + .unwrap(), + rollup_manager_addr: "0x1Fe038B54aeBf558638CA51C91bC8cCa06609e91" + .parse() + .unwrap(), + global_exit_root_manager_addr: "0x1f7ad7caA53e35b4f0D138dC5CBF91aC108a2674" + .parse() + .unwrap(), + } + } +} diff --git a/crates/cdk-config/src/layer1.rs b/crates/cdk-config/src/layer1.rs deleted file mode 100644 index a5bd19d0..00000000 --- a/crates/cdk-config/src/layer1.rs +++ /dev/null @@ -1,28 +0,0 @@ -use ethers::types::Address; -use serde::Deserialize; -use url::Url; - -/// The L1 configuration. -#[derive(Deserialize, Debug, Clone)] -pub struct Layer1 { - #[serde(rename = "ChainID")] - pub chain_id: u64, - #[serde(rename = "NodeURL")] - pub node_url: Url, - #[serde(rename = "RollupManagerContract")] - pub rollup_manager_contract: Address, -} - -#[cfg(any(test, feature = "testutils"))] -impl Default for Layer1 { - fn default() -> Self { - // Values are coming from https://github.com/0xPolygon/agglayer/blob/main/config/default.go#L11 - Self { - chain_id: 1337, - node_url: "http://zkevm-mock-l1-network:8545".parse().unwrap(), - rollup_manager_contract: "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" - .parse() - .unwrap(), - } - } -} diff --git a/crates/cdk-config/src/lib.rs b/crates/cdk-config/src/lib.rs index 13298073..25478bb7 100644 --- a/crates/cdk-config/src/lib.rs +++ b/crates/cdk-config/src/lib.rs @@ -6,23 +6,33 @@ use serde::Deserialize; pub(crate) const DEFAULT_IP: std::net::Ipv4Addr = std::net::Ipv4Addr::new(0, 0, 0, 0); -pub(crate) mod layer1; +pub(crate) mod aggregator; +pub(crate) mod l1; pub mod log; +pub(crate) mod network_config; +pub(crate) mod sequence_sender; pub(crate) mod telemetry; -pub use layer1::Layer1; pub use log::Log; +use sequence_sender::SequenceSender; /// The Agglayer configuration. #[derive(Deserialize, Debug)] #[cfg_attr(any(test, feature = "testutils"), derive(Default))] pub struct Config { - /// A map of Zkevm node RPC endpoints for each rollup. - /// /// The log configuration. - #[serde(rename = "Log")] + #[serde(rename = "Log", default)] pub log: Log, #[serde(rename = "ForkUpgradeBatchNumber")] pub fork_upgrade_batch_number: Option, + + #[serde(rename = "NetworkConfig", default)] + pub network_config: network_config::NetworkConfig, + + #[serde(rename = "Aggregator", default)] + pub aggregator: aggregator::Aggregator, + + #[serde(rename = "SequenceSender", default)] + pub sequence_sender: SequenceSender, } diff --git a/crates/cdk-config/src/network_config.rs b/crates/cdk-config/src/network_config.rs new file mode 100644 index 00000000..3f49b786 --- /dev/null +++ b/crates/cdk-config/src/network_config.rs @@ -0,0 +1,15 @@ +use crate::l1::L1; +use serde::Deserialize; + +/// The L1 configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct NetworkConfig { + #[serde(rename = "L1", default)] + pub l1: L1, +} + +impl Default for NetworkConfig { + fn default() -> Self { + Self { l1: L1::default() } + } +} diff --git a/crates/cdk-config/src/sequence_sender.rs b/crates/cdk-config/src/sequence_sender.rs new file mode 100644 index 00000000..c4e83cc5 --- /dev/null +++ b/crates/cdk-config/src/sequence_sender.rs @@ -0,0 +1,50 @@ +use serde::Deserialize; + +/// The SequenceSender configuration. +#[derive(Deserialize, Debug, Clone)] +pub struct SequenceSender { + #[serde(rename = "WaitPeriodSendSequence", default)] + pub wait_period_send_sequence: String, + #[serde(rename = "LastBatchVirtualizationTimeMaxWaitPeriod", default)] + pub last_batch_virtualization_time_max_wait_period: String, + #[serde(rename = "MaxTxSizeForL1", default)] + pub max_tx_size_for_l1: u32, + #[serde(rename = "L2Coinbase", default)] + pub l2_coinbase: String, + #[serde(rename = "SequencesTxFileName", default)] + pub sequences_tx_file_name: String, + #[serde(rename = "GasOffset", default)] + pub gas_offset: u64, + #[serde(rename = "WaitPeriodPurgeTxFile", default)] + pub wait_period_purge_tx_file: String, + #[serde(rename = "MaxPendingTx", default)] + pub max_pending_tx: u32, + #[serde(rename = "MaxBatchesForL1", default)] + pub max_batches_for_l1: u32, + #[serde(rename = "BlockFinality", default)] + pub block_finality: String, + #[serde(rename = "RPCURL", default)] + pub rpc_url: String, + #[serde(rename = "GetBatchWaitInterval", default)] + pub get_batch_wait_interval: String, +} + +// Default trait implementation +impl Default for SequenceSender { + fn default() -> Self { + Self { + wait_period_send_sequence: "1s".to_string(), + last_batch_virtualization_time_max_wait_period: "1s".to_string(), + max_tx_size_for_l1: 1000, + l2_coinbase: "0x".to_string(), + sequences_tx_file_name: "sequences_tx.json".to_string(), + gas_offset: 0, + wait_period_purge_tx_file: "1s".to_string(), + max_pending_tx: 1000, + max_batches_for_l1: 100, + block_finality: "1s".to_string(), + rpc_url: "http://localhost:8545".to_string(), + get_batch_wait_interval: "1s".to_string(), + } + } +} diff --git a/crates/cdk-config/src/telemetry.rs b/crates/cdk-config/src/telemetry.rs index 407145f2..728611ce 100644 --- a/crates/cdk-config/src/telemetry.rs +++ b/crates/cdk-config/src/telemetry.rs @@ -1,11 +1,10 @@ -use std::net::SocketAddr; - -use serde::Deserialize; - use super::DEFAULT_IP; +use serde::Deserialize; +use std::net::SocketAddr; #[derive(Deserialize, Debug, Clone, Copy)] #[serde(rename_all = "PascalCase")] +#[allow(dead_code)] pub struct TelemetryConfig { #[serde(rename = "PrometheusAddr", default = "default_metrics_api_addr")] pub addr: SocketAddr, diff --git a/crates/cdk/Cargo.toml b/crates/cdk/Cargo.toml index 913fc492..0c1f8274 100644 --- a/crates/cdk/Cargo.toml +++ b/crates/cdk/Cargo.toml @@ -13,6 +13,16 @@ execute = "0.2.13" toml = "0.8.14" tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } +url = { workspace = true, features = ["serde"] } +colored = "2.0" cdk-config = { path = "../cdk-config" } +serde.workspace = true +serde_json.workspace = true +tempfile = "3.12.0" +alloy-rpc-client = "0.4.2" +alloy-transport-http = "0.4.2" +tokio = "1.40.0" +reqwest = "0.12.8" +alloy-json-rpc = "0.4.2" diff --git a/crates/cdk/build.rs b/crates/cdk/build.rs index 1a01704a..59fffda7 100644 --- a/crates/cdk/build.rs +++ b/crates/cdk/build.rs @@ -23,7 +23,7 @@ fn main() { // Call the make command let output = Command::new("make") - .arg("build") // Create a new make command + .arg("build-go") // Create a new make command .current_dir(build_path) // Set the current directory for the command .output() // Execute the command and capture the output .expect("Failed to execute make command"); diff --git a/crates/cdk/src/allocs_render.rs b/crates/cdk/src/allocs_render.rs new file mode 100644 index 00000000..3b881149 --- /dev/null +++ b/crates/cdk/src/allocs_render.rs @@ -0,0 +1,99 @@ +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use serde_json::{self, Value}; +use std::collections::HashMap; +use std::fs::File; +use std::io::Read; +use std::path::Path; + +#[derive(Serialize, Deserialize, Debug, Clone)] +struct Input { + #[serde(rename = "contractName", skip_serializing_if = "Option::is_none")] + contract_name: Option, + #[serde(rename = "accountName", skip_serializing_if = "Option::is_none")] + account_name: Option, + balance: String, + nonce: String, + address: String, + #[serde(skip_serializing_if = "Option::is_none")] + bytecode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + storage: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Wrapper { + pub root: String, + #[serde(rename = "L1Config")] + pub l1_config: L1Config, + genesis: Vec, + #[serde(rename = "rollupCreationBlockNumber")] + pub rollup_creation_block_number: u64, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct L1Config { + #[serde(rename = "chainId")] + pub chain_id: u64, + #[serde(rename = "polygonZkEVMGlobalExitRootAddress")] + pub zkevm_global_exit_root_address: String, + #[serde(rename = "polygonRollupManagerAddress")] + pub rollup_manager_address: String, + #[serde(rename = "polTokenAddress")] + pub pol_token_address: String, + #[serde(rename = "polygonZkEVMAddress")] + pub zkevm_address: String, +} + +#[derive(Serialize, Deserialize, Debug)] +struct Output { + #[serde(rename = "contractName", skip_serializing_if = "Option::is_none")] + contract_name: Option, + #[serde(rename = "accountName", skip_serializing_if = "Option::is_none")] + account_name: Option, + balance: Option, + nonce: Option, + code: Option, + storage: Option, +} + +pub struct Rendered { + pub output: String, + pub wrapper: Wrapper, +} + +pub fn render_allocs(genesis_file_path: &str) -> Result { + let path = Path::new(genesis_file_path); + let display = path.display(); + + let mut file = File::open(&path).with_context(|| format!("couldn't open {}", display))?; + + let mut data = String::new(); + file.read_to_string(&mut data) + .with_context(|| format!("couldn't read {}", display))?; + + let wrapper: Wrapper = serde_json::from_str(&data) + .with_context(|| format!("couldn't parse JSON from {}", display))?; + + let mut outputs: HashMap = HashMap::new(); + + for input in wrapper.genesis.clone() { + let output = Output { + contract_name: input.contract_name, + account_name: input.account_name, + balance: Some(input.balance), + nonce: Some(input.nonce), + code: input.bytecode, + storage: input.storage.map(|s| serde_json::to_value(s).unwrap()), + }; + outputs.insert(input.address, output); + } + + // outputs.sort_by(|a, b| a.contract_name.cmp(&b.contract_name)); + + Ok(Rendered { + output: serde_json::to_string_pretty(&outputs) + .with_context(|| "couldn't serialize outputs to JSON")?, + wrapper, + }) +} diff --git a/crates/cdk/src/cli.rs b/crates/cdk/src/cli.rs index 1bf29d2c..d108543d 100644 --- a/crates/cdk/src/cli.rs +++ b/crates/cdk/src/cli.rs @@ -5,35 +5,54 @@ use clap::{Parser, Subcommand, ValueHint}; /// Command line interface. #[derive(Parser)] +#[command(author, version, about, long_about = None)] pub(crate) struct Cli { - /// The path to the configuration file. - #[arg( - long, - short, - value_hint = ValueHint::FilePath, - global = true, - default_value = "config/example-config.toml", - env = "CDK_CONFIG_PATH" - )] - pub(crate) config: PathBuf, - - /// The path to a chain specification file. - #[arg( - long, - short = 'g', - value_hint = ValueHint::FilePath, - global = true, - default_value = "config/genesis.json", - env = "CDK_GENESIS_PATH" - )] - pub(crate) chain: PathBuf, - #[command(subcommand)] pub(crate) cmd: Commands, } #[derive(Subcommand)] pub(crate) enum Commands { - Node, - Erigon, + /// Run the cdk-node with the provided configuration + Node { + /// The path to the configuration file + #[arg( + long, + short = 'C', + value_hint = ValueHint::FilePath, + env = "CDK_CONFIG_PATH" + )] + config: PathBuf, + + /// Components to run. + #[arg( + long, + short, + value_hint = ValueHint::CommandString, + env = "CDK_COMPONENTS", + )] + components: Option, + }, + /// Run cdk-erigon node with the provided default configuration + Erigon { + /// The path to the configuration file + #[arg( + long, + short = 'C', + value_hint = ValueHint::FilePath, + env = "CDK_CONFIG_PATH" + )] + config: PathBuf, + + /// The path to a chain specification file. + #[arg( + long, + short = 'g', + value_hint = ValueHint::FilePath, + env = "CDK_GENESIS_PATH" + )] + chain: PathBuf, + }, + /// Output the corresponding versions of the components + Versions, } diff --git a/crates/cdk/src/config_render.rs b/crates/cdk/src/config_render.rs new file mode 100644 index 00000000..2c230c52 --- /dev/null +++ b/crates/cdk/src/config_render.rs @@ -0,0 +1,130 @@ +use crate::allocs_render::Rendered; +use anyhow::Error; +use cdk_config::Config; +use std::fs; +use std::path::PathBuf; +use tempfile::{tempdir, TempDir}; + +pub fn render(config: &Config, genesis_file: PathBuf, timestamp: u64) -> Result { + // Create a temporary directory + let tmp_dir = tempdir()?; + let chain_id = config.aggregator.chain_id.clone(); + let res = crate::allocs_render::render_allocs(genesis_file.to_str().unwrap())?; + // Write the three files to disk + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}-allocs.json", chain_id.clone())), + res.output.clone(), + )?; + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}-chainspec.json", chain_id.clone())), + render_chainspec(chain_id.clone()), + )?; + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}-conf.json", chain_id.clone())), + render_conf(res.wrapper.root.clone(), timestamp), + )?; + + let contents = render_yaml(config, res); + fs::write( + tmp_dir + .path() + .join(format!("dynamic-{}.yaml", chain_id.clone())), + contents, + )?; + + Ok(tmp_dir) +} + +fn render_chainspec(chain_id: String) -> String { + format!( + r#" +{{ + "ChainName": "dynamic-{chain_id}", + "chainId": {chain_id}, + "consensus": "ethash", + "homesteadBlock": 0, + "daoForkBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 9999999999999999999999999999999999999999999999999, + "arrowGlacierBlock": 9999999999999999999999999999999999999999999999999, + "grayGlacierBlock": 9999999999999999999999999999999999999999999999999, + "terminalTotalDifficulty": 58750000000000000000000, + "terminalTotalDifficultyPassed": false, + "shanghaiTime": 9999999999999999999999999999999999999999999999999, + "cancunTime": 9999999999999999999999999999999999999999999999999, + "pragueTime": 9999999999999999999999999999999999999999999999999, + "ethash": {{}} +}} + "# + ) +} + +fn render_conf(root: String, timestamp: u64) -> String { + format!( + r#" +{{ + "root": {:?}, + "timestamp": {:?}, + "gasLimit": 0, + "difficulty": 0 +}} + "#, + root, timestamp + ) +} + +// render_config renders the configuration file for the Erigon node. +fn render_yaml(config: &Config, res: Rendered) -> String { + format!( + r#" +chain: dynamic-{chain_id} +zkevm.l2-chain-id: {chain_id} +zkevm.l2-sequencer-rpc-url: {l2_sequencer_rpc_url} +zkevm.l2-datastreamer-url: {datastreamer_host} +zkevm.l1-chain-id: {l1_chain_id} +zkevm.l1-rpc-url: {l1_rpc_url} +zkevm.address-sequencer: {sequencer_address} +zkevm.address-zkevm: {zkevm_address} +zkevm.address-rollup: {rollup_address} +zkevm.address-ger-manager: {ger_manager_address} +zkevm.l1-matic-contract-address: {pol_token_address} +zkevm.l1-first-block: {l1_first_block} +datadir: ./data/dynamic-{chain_id} + +externalcl: true +http: true +private.api.addr: "localhost:9092" +zkevm.rpc-ratelimit: 250 +zkevm.datastream-version: 3 +http.api: [eth, debug,net,trace,web3,erigon,zkevm] +http.addr: "0.0.0.0" +http.vhosts: any +http.corsdomain: any +ws: true +"#, + chain_id = config.aggregator.chain_id.clone(), + l2_sequencer_rpc_url = config.aggregator.witness_url.to_string(), + datastreamer_host = config.aggregator.stream_client.server, + l1_rpc_url = config.aggregator.eth_tx_manager.etherman.url, + l1_chain_id = config.network_config.l1.l1_chain_id, + sequencer_address = config.sequence_sender.l2_coinbase, + zkevm_address = res.wrapper.l1_config.zkevm_address, + rollup_address = res.wrapper.l1_config.rollup_manager_address, + ger_manager_address = res.wrapper.l1_config.zkevm_global_exit_root_address, + pol_token_address = res.wrapper.l1_config.pol_token_address, + l1_first_block = res.wrapper.rollup_creation_block_number + ) +} diff --git a/crates/cdk/src/helpers.rs b/crates/cdk/src/helpers.rs new file mode 100644 index 00000000..4fac948b --- /dev/null +++ b/crates/cdk/src/helpers.rs @@ -0,0 +1,13 @@ +use std::env; + +const CDK_CLIENT_BIN: &str = "cdk-node"; + +pub(crate) fn get_bin_path() -> String { + // This is to find the binary when running in development mode + // otherwise it will use system path + let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_CLIENT_BIN.into()); + if bin_path != CDK_CLIENT_BIN { + bin_path = format!("{}/../../target/{}", bin_path, CDK_CLIENT_BIN); + } + bin_path +} diff --git a/crates/cdk/src/main.rs b/crates/cdk/src/main.rs index 99a8a752..61e66eab 100644 --- a/crates/cdk/src/main.rs +++ b/crates/cdk/src/main.rs @@ -1,33 +1,63 @@ //! Command line interface. +use alloy_rpc_client::ClientBuilder; +use alloy_rpc_client::ReqwestClient; use cdk_config::Config; use clap::Parser; use cli::Cli; +use colored::*; use execute::Execute; -use std::env; use std::path::PathBuf; use std::process::Command; -use std::sync::Arc; +use url::Url; +pub mod allocs_render; mod cli; +mod config_render; +mod helpers; mod logging; +mod versions; -const CDK_CLIENT_PATH: &str = "cdk-node"; -const CDK_ERIGON_PATH: &str = "cdk-erigon"; +const CDK_ERIGON_BIN: &str = "cdk-erigon"; -fn main() -> anyhow::Result<()> { +#[tokio::main] +async fn main() -> anyhow::Result<()> { dotenvy::dotenv().ok(); let cli = Cli::parse(); + println!( + "{}", + r#"🐼 + _____ _ _____ _____ _ __ + | __ \ | | / ____| __ \| |/ / + | |__) |__ | |_ _ __ _ ___ _ __ | | | | | | ' / + | ___/ _ \| | | | |/ _` |/ _ \| '_ \ | | | | | | < + | | | (_) | | |_| | (_| | (_) | | | | | |____| |__| | . \ + |_| \___/|_|\__, |\__, |\___/|_| |_| \_____|_____/|_|\_\ + __/ | __/ | + |___/ |___/ +"# + .purple() + ); + match cli.cmd { - cli::Commands::Node {} => node(cli.config)?, - cli::Commands::Erigon {} => erigon(cli.config)?, - // _ => forward()?, + cli::Commands::Node { config, components } => node(config, components)?, + cli::Commands::Erigon { config, chain } => erigon(config, chain).await?, + cli::Commands::Versions {} => versions::versions(), } Ok(()) } +// read_config reads the configuration file and returns the configuration. +fn read_config(config_path: PathBuf) -> anyhow::Result { + let config = std::fs::read_to_string(config_path) + .map_err(|e| anyhow::anyhow!("Failed to read configuration file: {}", e))?; + let config: Config = toml::from_str(&config)?; + + Ok(config) +} + /// This is the main node entrypoint. /// /// This function starts everything needed to run an Agglayer node. @@ -36,29 +66,30 @@ fn main() -> anyhow::Result<()> { /// /// This function returns on fatal error or after graceful shutdown has /// completed. -pub fn node(config_path: PathBuf) -> anyhow::Result<()> { - // Load the configuration file - let config_read = std::fs::read_to_string(config_path.clone()); - let toml_str = match config_read { - Ok(toml) => toml, - Err(e) => { - eprintln!( - "Failed to read configuration file, from path: {}", - config_path.to_str().unwrap() - ); - return Err(e.into()); - } - }; - let config: Arc = Arc::new(toml::from_str(&toml_str)?); +pub fn node(config_path: PathBuf, components: Option) -> anyhow::Result<()> { + // Read the config + let config = read_config(config_path.clone())?; - let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_CLIENT_PATH.into()); - if bin_path != CDK_CLIENT_PATH { - bin_path = format!("{}/../../{}", bin_path, CDK_CLIENT_PATH); - } + // Initialize the logger + logging::tracing(&config.log); + + // This is to find the binary when running in development mode + // otherwise it will use system path + let bin_path = helpers::get_bin_path(); + + let components_param = match components { + Some(components) => format!("-components={}", components), + None => "".to_string(), + }; // Run the node passing the config file path as argument let mut command = Command::new(bin_path.clone()); - command.args(&["run", "-cfg", config_path.canonicalize()?.to_str().unwrap()]); + command.args(&[ + "run", + "-cfg", + config_path.canonicalize()?.to_str().unwrap(), + components_param.as_str(), + ]); let output_result = command.execute_output(); let output = match output_result { @@ -82,38 +113,46 @@ pub fn node(config_path: PathBuf) -> anyhow::Result<()> { eprintln!("Interrupted!"); } - // Initialize the logger - logging::tracing(&config.log); - Ok(()) } /// This is the main erigon entrypoint. /// This function starts everything needed to run an Erigon node. -pub fn erigon(config_path: PathBuf) -> anyhow::Result<()> { - // Load the configuration file - let _config: Arc = Arc::new(toml::from_str(&std::fs::read_to_string( - config_path.clone(), - )?)?); - - let mut bin_path = env::var("CARGO_MANIFEST_DIR").unwrap_or(CDK_ERIGON_PATH.into()); - if bin_path != CDK_ERIGON_PATH { - bin_path = format!("{}/../../{}", bin_path, CDK_ERIGON_PATH); - } - - let mut command = Command::new(bin_path); - - // TODO: 1. Prepare erigon config files or flags +pub async fn erigon(config_path: PathBuf, genesis_file: PathBuf) -> anyhow::Result<()> { + // Read the config + let config = read_config(config_path.clone())?; - command.args(&["--config", config_path.to_str().unwrap()]); + // Initialize the logger + logging::tracing(&config.log); - let output = command.execute_output().unwrap(); + // Render configuration files + let chain_id = config.aggregator.chain_id.clone(); + let rpc_url = Url::parse(&config.sequence_sender.rpc_url).unwrap(); + let timestamp = get_timestamp(rpc_url).await.unwrap(); + let erigon_config_path = config_render::render(&config, genesis_file, timestamp)?; + + println!("Starting erigon with config: {:?}", erigon_config_path); + + // Run cdk-erigon in system path + let output = Command::new(CDK_ERIGON_BIN) + .args(&[ + "--config", + erigon_config_path + .path() + .join(format!("dynamic-{}.yaml", chain_id)) + .to_str() + .unwrap(), + ]) + .execute_output() + .unwrap(); if let Some(exit_code) = output.status.code() { - if exit_code == 0 { - println!("Ok."); - } else { - eprintln!("Failed."); + if exit_code != 0 { + eprintln!( + "Failed. Leaving configuration files in: {:?}", + erigon_config_path + ); + std::process::exit(1); } } else { eprintln!("Interrupted!"); @@ -121,3 +160,25 @@ pub fn erigon(config_path: PathBuf) -> anyhow::Result<()> { Ok(()) } + +/// Call the rpc server to retrieve the first batch timestamp +async fn get_timestamp(url: Url) -> Result { + // Instantiate a new client over a transport. + let client: ReqwestClient = ClientBuilder::default().http(url); + + // Prepare a request to the server. + let request = client.request("zkevm_getBatchByNumber", vec!["0"]); + + // Poll the request to completion. + let batch_json: Batch = request.await.unwrap(); + + // Parse the timestamp hex string into u64. + let ts = u64::from_str_radix(batch_json.timestamp.trim_start_matches("0x"), 16)?; + + Ok(ts) +} + +#[derive(serde::Deserialize, Debug, Clone)] +struct Batch { + timestamp: String, +} diff --git a/crates/cdk/src/versions.rs b/crates/cdk/src/versions.rs new file mode 100644 index 00000000..77581452 --- /dev/null +++ b/crates/cdk/src/versions.rs @@ -0,0 +1,47 @@ +use colored::*; +use execute::Execute; +use std::io; +use std::process::{Command, Output}; + +fn version() -> Result { + let bin_path = crate::helpers::get_bin_path(); + + // Run the node passing the config file path as argument + let mut command = Command::new(bin_path.clone()); + command.args(&["version"]); + + command.execute_output() +} + +pub(crate) fn versions() { + // Get the version of the cdk-node binary. + let output = version().unwrap(); + let version = String::from_utf8(output.stdout).unwrap(); + + println!("{}", format!("{}", version.trim()).green()); + + let versions = vec![ + ( + "zkEVM Contracts", + "https://github.com/0xPolygonHermez/zkevm-contracts/releases/tag/v8.0.0-rc.4-fork.12", + ), + ("zkEVM Prover", "v8.0.0-RC12"), + ("CDK Erigon", "hermeznetwork/cdk-erigon:0948e33"), + ( + "zkEVM Pool Manager", + "hermeznetwork/zkevm-pool-manager:v0.1.1", + ), + ( + "CDK Data Availability Node", + "0xpolygon/cdk-data-availability:0.0.10", + ), + ]; + + // Multi-line string to print the versions with colors. + let formatted_versions: Vec = versions + .iter() + .map(|(key, value)| format!("{}: {}", key.green(), value.blue())) + .collect(); + + println!("{}", formatted_versions.join("\n")); +} diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index fcacef3c..7e2a8d3e 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -9,9 +9,9 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygondatacommittee" "github.com/0xPolygon/cdk/log" erc1967proxy "github.com/0xPolygon/cdk/test/contracts/erc1967proxy" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/assert" @@ -20,7 +20,7 @@ import ( func TestUpdateDataCommitteeEvent(t *testing.T) { // Set up testing environment - dac, ethBackend, auth, da := newTestingEnv(t) + dac, ethBackend, da, auth := newSimulatedDacman(t) // Update the committee requiredAmountOfSignatures := big.NewInt(2) @@ -63,82 +63,39 @@ func init() { }) } -// This function prepare the blockchain, the wallet with funds and deploy the smc -func newTestingEnv(t *testing.T) ( - dac *Backend, - ethBackend *simulated.Backend, - auth *bind.TransactOpts, - da *polygondatacommittee.Polygondatacommittee, -) { - t.Helper() - privateKey, err := crypto.GenerateKey() - if err != nil { - log.Fatal(err) - } - auth, err = bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - if err != nil { - log.Fatal(err) - } - dac, ethBackend, da, err = newSimulatedDacman(t, auth) - if err != nil { - log.Fatal(err) - } - - return dac, ethBackend, auth, da -} - // NewSimulatedEtherman creates an etherman that uses a simulated blockchain. It's important to notice that the ChainID of the auth // must be 1337. The address that holds the auth will have an initial balance of 10 ETH -func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( - dacman *Backend, - ethBackend *simulated.Backend, - da *polygondatacommittee.Polygondatacommittee, - err error, +func newSimulatedDacman(t *testing.T) ( + *Backend, + *simulated.Backend, + *polygondatacommittee.Polygondatacommittee, + *bind.TransactOpts, ) { t.Helper() - if auth == nil { - // read only client - return &Backend{}, nil, nil, nil - } - // 10000000 ETH in wei - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + + ethBackend, setup := helpers.SimulatedBackend(t, nil, 0) // DAC Setup - addr, _, _, err := smcparis.DeployPolygondatacommittee(auth, client.Client()) - if err != nil { - return &Backend{}, nil, nil, err - } - client.Commit() - proxyAddr, err := deployDACProxy(auth, client.Client(), addr) - if err != nil { - return &Backend{}, nil, nil, err - } + addr, _, _, err := smcparis.DeployPolygondatacommittee(setup.UserAuth, ethBackend.Client()) + require.NoError(t, err) + ethBackend.Commit() - client.Commit() - da, err = polygondatacommittee.NewPolygondatacommittee(proxyAddr, client.Client()) - if err != nil { - return &Backend{}, nil, nil, err - } + proxyAddr, err := deployDACProxy(setup.UserAuth, ethBackend.Client(), addr) + require.NoError(t, err) + ethBackend.Commit() - _, err = da.SetupCommittee(auth, big.NewInt(0), []string{}, []byte{}) - if err != nil { - return &Backend{}, nil, nil, err - } - client.Commit() + da, err := polygondatacommittee.NewPolygondatacommittee(proxyAddr, ethBackend.Client()) + require.NoError(t, err) + + _, err = da.SetupCommittee(setup.UserAuth, big.NewInt(0), []string{}, []byte{}) + require.NoError(t, err) + ethBackend.Commit() c := &Backend{ dataCommitteeContract: da, } - return c, client, da, nil + return c, ethBackend, da, setup.UserAuth } func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImpl common.Address) (common.Address, error) { diff --git a/db/meddler.go b/db/meddler.go index 90071916..8dd17fe8 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -13,12 +13,13 @@ import ( "github.com/russross/meddler" ) -// initMeddler registers tags to be used to read/write from SQL DBs using meddler -func initMeddler() { +// init registers tags to be used to read/write from SQL DBs using meddler +func init() { meddler.Default = meddler.SQLite meddler.Register("bigint", BigIntMeddler{}) meddler.Register("merkleproof", MerkleProofMeddler{}) meddler.Register("hash", HashMeddler{}) + meddler.Register("address", AddressMeddler{}) } func SQLiteErr(err error) (*sqlite.Error, bool) { @@ -37,7 +38,7 @@ func SliceToSlicePtrs(slice interface{}) interface{} { v := reflect.ValueOf(slice) vLen := v.Len() typ := v.Type().Elem() - res := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(typ)), vLen, vLen) + res := reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(typ)), vLen, vLen) for i := 0; i < vLen; i++ { res.Index(i).Set(v.Index(i).Addr()) } @@ -56,7 +57,7 @@ func SlicePtrsToSlice(slice interface{}) interface{} { return res.Interface() } -// BigIntMeddler encodes or decodes the field value to or from JSON +// BigIntMeddler encodes or decodes the field value to or from string type BigIntMeddler struct{} // PreRead is called before a Scan operation for fields that have the BigIntMeddler @@ -96,16 +97,16 @@ func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, er return field.String(), nil } -// MerkleProofMeddler encodes or decodes the field value to or from JSON +// MerkleProofMeddler encodes or decodes the field value to or from string type MerkleProofMeddler struct{} -// PreRead is called before a Scan operation for fields that have the ProofMeddler +// PreRead is called before a Scan operation for fields that have the MerkleProofMeddler func (b MerkleProofMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { // give a pointer to a byte buffer to grab the raw data return new(string), nil } -// PostRead is called after a Scan operation for fields that have the ProofMeddler +// PostRead is called after a Scan operation for fields that have the MerkleProofMeddler func (b MerkleProofMeddler) PostRead(fieldPtr, scanTarget interface{}) error { ptr, ok := scanTarget.(*string) if !ok { @@ -128,7 +129,7 @@ func (b MerkleProofMeddler) PostRead(fieldPtr, scanTarget interface{}) error { return nil } -// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +// PreWrite is called before an Insert or Update operation for fields that have the MerkleProofMeddler func (b MerkleProofMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { field, ok := fieldPtr.(tree.Proof) if !ok { @@ -142,16 +143,16 @@ func (b MerkleProofMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{ return s, nil } -// HashMeddler encodes or decodes the field value to or from JSON +// HashMeddler encodes or decodes the field value to or from string type HashMeddler struct{} -// PreRead is called before a Scan operation for fields that have the ProofMeddler +// PreRead is called before a Scan operation for fields that have the HashMeddler func (b HashMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { // give a pointer to a byte buffer to grab the raw data return new(string), nil } -// PostRead is called after a Scan operation for fields that have the ProofMeddler +// PostRead is called after a Scan operation for fields that have the HashMeddler func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { ptr, ok := scanTarget.(*string) if !ok { @@ -168,7 +169,7 @@ func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { return nil } -// PreWrite is called before an Insert or Update operation for fields that have the ProofMeddler +// PreWrite is called before an Insert or Update operation for fields that have the HashMeddler func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { field, ok := fieldPtr.(common.Hash) if !ok { @@ -176,3 +177,38 @@ func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err } return field.Hex(), nil } + +// AddressMeddler encodes or decodes the field value to or from string +type AddressMeddler struct{} + +// PreRead is called before a Scan operation for fields that have the AddressMeddler +func (b AddressMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { + // give a pointer to a byte buffer to grab the raw data + return new(string), nil +} + +// PostRead is called after a Scan operation for fields that have the AddressMeddler +func (b AddressMeddler) PostRead(fieldPtr, scanTarget interface{}) error { + ptr, ok := scanTarget.(*string) + if !ok { + return errors.New("scanTarget is not *string") + } + if ptr == nil { + return errors.New("AddressMeddler.PostRead: nil pointer") + } + field, ok := fieldPtr.(*common.Address) + if !ok { + return errors.New("fieldPtr is not common.Address") + } + *field = common.HexToAddress(*ptr) + return nil +} + +// PreWrite is called before an Insert or Update operation for fields that have the AddressMeddler +func (b AddressMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { + field, ok := fieldPtr.(common.Address) + if !ok { + return nil, errors.New("fieldPtr is not common.Address") + } + return field.Hex(), nil +} diff --git a/db/sqlite.go b/db/sqlite.go index e30e9e26..df0c1d28 100644 --- a/db/sqlite.go +++ b/db/sqlite.go @@ -2,6 +2,7 @@ package db import ( "database/sql" + "errors" _ "github.com/mattn/go-sqlite3" ) @@ -10,9 +11,12 @@ const ( UniqueConstrain = 1555 ) +var ( + ErrNotFound = errors.New("not found") +) + // NewSQLiteDB creates a new SQLite DB func NewSQLiteDB(dbPath string) (*sql.DB, error) { - initMeddler() db, err := sql.Open("sqlite3", dbPath) if err != nil { return nil, err @@ -25,3 +29,10 @@ func NewSQLiteDB(dbPath string) (*sql.DB, error) { `) return db, err } + +func ReturnErrNotFound(err error) error { + if errors.Is(err, sql.ErrNoRows) { + return ErrNotFound + } + return err +} diff --git a/etherman/config/config.go b/etherman/config/config.go index c9208ee4..fcf7cd79 100644 --- a/etherman/config/config.go +++ b/etherman/config/config.go @@ -1,7 +1,7 @@ package config import ( - "github.com/0xPolygonHermez/zkevm-ethtx-manager/etherman" + "github.com/0xPolygon/zkevm-ethtx-manager/etherman" "github.com/ethereum/go-ethereum/common" ) diff --git a/go.mod b/go.mod index a2ca38f4..cd587fb2 100644 --- a/go.mod +++ b/go.mod @@ -3,91 +3,91 @@ module github.com/0xPolygon/cdk go 1.22.4 require ( - github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f - github.com/0xPolygon/cdk-data-availability v0.0.9 - github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 + github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2 + github.com/0xPolygon/cdk-data-availability v0.0.10 + github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 + github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 - github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 - github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 - github.com/ethereum/go-ethereum v1.14.5 + github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 + github.com/ethereum/go-ethereum v1.14.8 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/hermeznetwork/tracerr v0.3.2 - github.com/iden3/go-iden3-crypto v0.0.16 + github.com/iden3/go-iden3-crypto v0.0.17 github.com/invopop/jsonschema v0.12.0 github.com/jackc/pgconn v1.14.3 github.com/jackc/pgx/v4 v4.18.3 github.com/ledgerwatch/erigon-lib v1.0.0 github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/mapstructure v1.5.0 - github.com/rubenv/sql-migrate v1.6.1 + github.com/rubenv/sql-migrate v1.7.0 github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 - github.com/urfave/cli/v2 v2.27.2 + github.com/urfave/cli/v2 v2.27.4 go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.24.0 - golang.org/x/net v0.26.0 - golang.org/x/sync v0.7.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 + golang.org/x/sync v0.8.0 google.golang.org/grpc v1.64.0 google.golang.org/protobuf v1.34.2 modernc.org/sqlite v1.32.0 ) require ( - github.com/DataDog/zstd v1.5.2 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect github.com/VictoriaMetrics/metrics v1.23.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/bits-and-blooms/bitset v1.14.2 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/errors v1.11.3 // indirect + github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.0 // indirect + github.com/cockroachdb/pebble v1.1.2 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.1 // indirect + github.com/consensys/gnark-crypto v0.13.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect - github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect + github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/didip/tollbooth/v6 v6.1.2 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/erigontech/mdbx-go v0.27.14 // indirect - github.com/ethereum/c-kzg-4844 v1.0.0 // indirect + github.com/ethereum/c-kzg-4844 v1.0.3 // indirect github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 // indirect - github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/getsentry/sentry-go v0.28.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-stack/stack v1.8.1 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 // indirect github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.4 // indirect + github.com/holiman/uint256 v1.3.1 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect @@ -98,7 +98,7 @@ require ( github.com/jackc/puddle v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jmoiron/sqlx v1.2.0 // indirect - github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/ledgerwatch/log/v3 v3.9.0 // indirect @@ -107,10 +107,11 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/gomega v1.27.10 // indirect @@ -118,13 +119,13 @@ require ( github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -145,12 +146,12 @@ require ( github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 818a9b5d..3de7966e 100644 --- a/go.sum +++ b/go.sum @@ -1,18 +1,18 @@ -github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f h1:i9oCNDG4N7ha3fNkEKbito/HF3o4gjnW6//cpTwnp8E= -github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240819092536-5a65d4761b2f/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= -github.com/0xPolygon/cdk-data-availability v0.0.9 h1:KkP+hJH9nY5fljpSNtW2pfP5YQCJOsSRzfnl0yT78rI= -github.com/0xPolygon/cdk-data-availability v0.0.9/go.mod h1:5A+CU4FGeyG8zTDJc0khMzRxPzFzmpRydeEWmLztgh4= -github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8 h1:Jri+ydl8PudddGacnVLatrCuAG9e1Ut8W4J0GoawssU= -github.com/0xPolygon/cdk-rpc v0.0.0-20240905074455-431d3c271fe8/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2 h1:N5qvWG4amhUt6d1F4Kf8AdJZs4z7/xZfE3v/Im2afNM= +github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240826154954-f6182d2b17a2/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= +github.com/0xPolygon/cdk-data-availability v0.0.10 h1:pVcke2I7GuPH7JeRLKokEOHffPMwEHmJd9yDXHqh9B4= +github.com/0xPolygon/cdk-data-availability v0.0.10/go.mod h1:nn5RmnkzOiugAxizSbaYnA+em79YLLLoR25i0UlKc5Q= +github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3kRFw+C7J6vmGnl8gcazg+Gh/NVmnas= +github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.0 h1:QWE6nKBBHkMEiza723hJk0+oZbLSdQZTX4I48jWw15I= +github.com/0xPolygon/zkevm-ethtx-manager v0.2.0/go.mod h1:lqQmzSo2OXEZItD0R4Cd+lqKFxphXEWgqHefVcGDZZc= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6 h1:BSO1uu6dmLQ5kKb3uyDvsUxbnIoyumKvlwr0OtpTYMo= github.com/0xPolygonHermez/zkevm-data-streamer v0.2.6/go.mod h1:RC6ouyNsUtJrv5aGPcM6Dm5xhXN209tRSzcsJsaOtZI= -github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234 h1:QElCysO7f2xaknY/RDjxcs7IVmcgORfsCX2g+YD0Ko4= -github.com/0xPolygonHermez/zkevm-ethtx-manager v0.1.10-0.20240716105056-c051c96d0234/go.mod h1:zBZWxwOHKlw+ghd9roQLgIkDZWA7e7qO3EsfQQT/+oQ= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0 h1:h/B5AzWSZTxb1HouulXeE9nbHD1d4/nc67ZQc0khAQA= -github.com/0xPolygonHermez/zkevm-synchronizer-l1 v0.7.0/go.mod h1:+tQwkDf+5AL3dgL6G1t0qmwct0NJDlGlzqycOM5jn5g= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4 h1:+ZbyEpaBZu88jWtov/7iBWvwgBMu5cxlvAFDxsPrnGQ= +github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.4/go.mod h1:X4Su/M/+hSISqdl9yomKlRsbTyuZHsRohporyHsP8gg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= @@ -31,10 +31,10 @@ github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPn github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= -github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/bits-and-blooms/bitset v1.14.2 h1:YXVoyPndbdvcEVcseEovVfp0qjJp7S+i5+xgp/Nfbdc= +github.com/bits-and-blooms/bitset v1.14.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= @@ -53,28 +53,30 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= -github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= +github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0 h1:pU88SPhIFid6/k0egdR5V6eALQYq2qbSmukrkgIh/0A= +github.com/cockroachdb/fifo v0.0.0-20240816210425-c5d0cb0b6fc0/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= -github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= +github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA= +github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= -github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= +github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc= +github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I= -github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= -github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= -github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= +github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= +github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -83,24 +85,22 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/didip/tollbooth/v6 v6.1.2 h1:Kdqxmqw9YTv0uKajBUiWQg+GURL/k4vy9gmLCL01PjQ= github.com/didip/tollbooth/v6 v6.1.2/go.mod h1:xjcse6CTHCLuOkzsWrEgdy9WPJFv+p/x6v+MyfP+O9s= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/erigontech/mdbx-go v0.27.14 h1:IVVeQVCAjZRpAR8bThlP2ISxrOwdV35NZdGwAgotaRw= github.com/erigontech/mdbx-go v0.27.14/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= -github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= -github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.14.5 h1:szuFzO1MhJmweXjoM5nSAeDvjNUH3vIQoMzzQnfvjpw= -github.com/ethereum/go-ethereum v1.14.5/go.mod h1:VEDGGhSxY7IEjn98hJRFXl/uFvpRgbIIf2PpXiyGGgc= +github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs= +github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.14.8 h1:NgOWvXS+lauK+zFukEvi85UmmsS/OkV0N23UZ1VTIig= +github.com/ethereum/go-ethereum v1.14.8/go.mod h1:TJhyuDq0JDppAkFXgqjwpdlQApywnu/m10kFPxh8vvs= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0 h1:KrE8I4reeVvf7C1tm8elRjj4BdscTYzz/WAbYyf/JI4= github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w= -github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= -github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -110,8 +110,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= -github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/getsentry/sentry-go v0.28.1 h1:zzaSm/vHmGllRM6Tpx1492r0YDzauArdBfkJRtY6P5k= +github.com/getsentry/sentry-go v0.28.1/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= @@ -135,8 +135,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -172,8 +172,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= @@ -188,14 +188,14 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= +github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs= +github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= -github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= +github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY= +github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -254,8 +254,8 @@ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhB github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= -github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -299,8 +299,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= @@ -315,11 +315,14 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -349,29 +352,29 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubenv/sql-migrate v1.6.1 h1:bo6/sjsan9HaXAsNxYP/jCEDUGibHp8JmOBw7NTGRos= -github.com/rubenv/sql-migrate v1.6.1/go.mod h1:tPzespupJS0jacLfhbwto/UjSX+8h2FdWB7ar+QlHa0= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/meddler v1.0.1 h1:JLR7Z4M4iGm1nr7DIURBq18UW8cTrm+qArUFgOhELo8= @@ -431,16 +434,16 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= -github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= +github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= +github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= @@ -476,8 +479,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -499,15 +502,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -539,8 +542,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -550,8 +553,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= diff --git a/l1bridge2infoindexsync/config.go b/l1bridge2infoindexsync/config.go deleted file mode 100644 index ef37f738..00000000 --- a/l1bridge2infoindexsync/config.go +++ /dev/null @@ -1,15 +0,0 @@ -package l1bridge2infoindexsync - -import "github.com/0xPolygon/cdk/config/types" - -type Config struct { - // DBPath path of the DB - DBPath string `mapstructure:"DBPath"` - // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry - RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. - // Any number smaller than zero will be considered as unlimited retries - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` - // WaitForSyncersPeriod time that will be waited when the synchronizer has reached the latest state - WaitForSyncersPeriod types.Duration `mapstructure:"WaitForSyncersPeriod"` -} diff --git a/l1bridge2infoindexsync/downloader.go b/l1bridge2infoindexsync/downloader.go deleted file mode 100644 index f4db8422..00000000 --- a/l1bridge2infoindexsync/downloader.go +++ /dev/null @@ -1,70 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "math/big" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rpc" -) - -type downloader struct { - l1Bridge *bridgesync.BridgeSync - l1Info *l1infotreesync.L1InfoTreeSync - l1Client ethereum.ChainReader -} - -func newDownloader( - l1Bridge *bridgesync.BridgeSync, - l1Info *l1infotreesync.L1InfoTreeSync, - l1Client ethereum.ChainReader, -) *downloader { - return &downloader{ - l1Bridge: l1Bridge, - l1Info: l1Info, - l1Client: l1Client, - } -} - -func (d *downloader) getLastFinalizedL1Block(ctx context.Context) (uint64, error) { - b, err := d.l1Client.BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return 0, err - } - - return b.NumberU64(), nil -} - -func (d *downloader) getLastProcessedBlockBridge(ctx context.Context) (uint64, error) { - return d.l1Bridge.GetLastProcessedBlock(ctx) -} - -func (d *downloader) getLastProcessedBlockL1InfoTree(ctx context.Context) (uint64, error) { - return d.l1Info.GetLastProcessedBlock(ctx) -} - -func (d *downloader) getLastL1InfoIndexUntilBlock(ctx context.Context, blockNum uint64) (uint32, error) { - info, err := d.l1Info.GetLatestInfoUntilBlock(ctx, blockNum) - if err != nil { - return 0, err - } - - return info.L1InfoTreeIndex, nil -} - -func (d *downloader) getMainnetExitRootAtL1InfoTreeIndex(ctx context.Context, index uint32) (common.Hash, error) { - leaf, err := d.l1Info.GetInfoByIndex(ctx, index) - if err != nil { - return common.Hash{}, err - } - - return leaf.MainnetExitRoot, nil -} - -func (d *downloader) getBridgeIndex(ctx context.Context, mainnetExitRoot common.Hash) (types.Root, error) { - return d.l1Bridge.GetBridgeRootByHash(ctx, mainnetExitRoot) -} diff --git a/l1bridge2infoindexsync/driver.go b/l1bridge2infoindexsync/driver.go deleted file mode 100644 index 921a0c41..00000000 --- a/l1bridge2infoindexsync/driver.go +++ /dev/null @@ -1,221 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "errors" - "time" - - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" -) - -type driver struct { - downloader *downloader - processor *processor - rh *sync.RetryHandler - waitForSyncersPeriod time.Duration -} - -func newDriver( - downloader *downloader, - processor *processor, - rh *sync.RetryHandler, - waitForSyncersPeriod time.Duration, -) *driver { - return &driver{ - downloader: downloader, - processor: processor, - rh: rh, - waitForSyncersPeriod: waitForSyncersPeriod, - } -} - -func (d *driver) sync(ctx context.Context) { - var ( - attempts int - lpbProcessor uint64 - lastProcessedL1InfoIndex uint32 - err error - ) - for { - lpbProcessor, lastProcessedL1InfoIndex, err = d.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) - if err != nil { - attempts++ - log.Errorf("error getting last processed block and index: %v", err) - d.rh.Handle("GetLastProcessedBlockAndL1InfoTreeIndex", attempts) - - continue - } - - break - } - for { - attempts = 0 - var ( - syncUntilBlock uint64 - shouldWait bool - ) - for { - syncUntilBlock, shouldWait, err = d.getTargetSynchronizationBlock(ctx, lpbProcessor) - if err != nil { - attempts++ - log.Errorf("error getting target sync block: %v", err) - d.rh.Handle("getTargetSynchronizationBlock", attempts) - - continue - } - - break - } - if shouldWait { - log.Debugf("waiting for syncers to catch up") - time.Sleep(d.waitForSyncersPeriod) - - continue - } - - attempts = 0 - var lastL1InfoTreeIndex uint32 - found := false - for { - lastL1InfoTreeIndex, err = d.downloader.getLastL1InfoIndexUntilBlock(ctx, syncUntilBlock) - if err != nil { - if errors.Is(err, l1infotreesync.ErrNotFound) || errors.Is(err, l1infotreesync.ErrBlockNotProcessed) { - log.Debugf("l1 info tree index not ready, querying until block %d: %s", syncUntilBlock, err) - - break - } - attempts++ - log.Errorf("error getting last l1 info tree index: %v", err) - d.rh.Handle("getLastL1InfoIndexUntilBlock", attempts) - - continue - } - found = true - - break - } - if !found { - time.Sleep(d.waitForSyncersPeriod) - - continue - } - - relations := []bridge2L1InfoRelation{} - var init uint32 - if lastProcessedL1InfoIndex > 0 { - init = lastProcessedL1InfoIndex + 1 - } - if init <= lastL1InfoTreeIndex { - log.Debugf("getting relations from index %d to %d", init, lastL1InfoTreeIndex) - } - for i := init; i <= lastL1InfoTreeIndex; i++ { - attempts = 0 - for { - relation, err := d.getRelation(ctx, i) - if err != nil { - attempts++ - log.Errorf("error getting relation: %v", err) - d.rh.Handle("getRelation", attempts) - - continue - } - relations = append(relations, relation) - - break - } - } - - attempts = 0 - log.Debugf("processing until block %d: %+v", syncUntilBlock, relations) - for { - if err := d.processor.processUntilBlock(ctx, syncUntilBlock, relations); err != nil { - attempts++ - log.Errorf("error processing block: %v", err) - d.rh.Handle("processUntilBlock", attempts) - - continue - } - - break - } - - lpbProcessor = syncUntilBlock - if len(relations) > 0 { - lastProcessedL1InfoIndex = relations[len(relations)-1].l1InfoTreeIndex - log.Debugf("last processed index %d", lastProcessedL1InfoIndex) - } - } -} - -func (d *driver) getTargetSynchronizationBlock( - ctx context.Context, lpbProcessor uint64, -) (syncUntilBlock uint64, shouldWait bool, err error) { - // NOTE: if this had configurable finality, it would be needed to deal with reorgs - lastFinalised, err := d.downloader.getLastFinalizedL1Block(ctx) - if err != nil { - return - } - checkProcessedBlockFn := func(blockToCheck, lastProcessed uint64, blockType string) bool { - if blockToCheck >= lastProcessed { - log.Debugf( - "should wait because the last processed block (%d) is greater or equal than the %s (%d)", - blockToCheck, blockType, lastProcessed) - shouldWait = true - - return true - } - - return false - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last finalised") { - return - } - lpbInfo, err := d.downloader.getLastProcessedBlockL1InfoTree(ctx) - if err != nil { - return - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from L1 Info tree sync") { - return - } - lpbBridge, err := d.downloader.getLastProcessedBlockBridge(ctx) - if err != nil { - return - } - if checkProcessedBlockFn(lpbProcessor, lastFinalised, "last block from l1 bridge sync") { - return - } - - // Bridge, L1Info and L1 ahead of procesor. Pick the smallest block num as target - if lastFinalised <= lpbInfo { - log.Debugf("target sync block is the last finalised block (%d)", lastFinalised) - syncUntilBlock = lastFinalised - } else { - log.Debugf("target sync block is the last processed block from L1 info tree (%d)", lpbInfo) - syncUntilBlock = lpbInfo - } - if lpbBridge < syncUntilBlock { - log.Debugf("target sync block is the last processed block from bridge (%d)", lpbBridge) - syncUntilBlock = lpbBridge - } - - return -} - -func (d *driver) getRelation(ctx context.Context, l1InfoIndex uint32) (bridge2L1InfoRelation, error) { - mer, err := d.downloader.getMainnetExitRootAtL1InfoTreeIndex(ctx, l1InfoIndex) - if err != nil { - return bridge2L1InfoRelation{}, err - } - - bridgeRoot, err := d.downloader.getBridgeIndex(ctx, mer) - if err != nil { - return bridge2L1InfoRelation{}, err - } - - return bridge2L1InfoRelation{ - bridgeIndex: bridgeRoot.Index, - l1InfoTreeIndex: l1InfoIndex, - }, nil -} diff --git a/l1bridge2infoindexsync/e2e_test.go b/l1bridge2infoindexsync/e2e_test.go deleted file mode 100644 index e134c1ab..00000000 --- a/l1bridge2infoindexsync/e2e_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package l1bridge2infoindexsync_test - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmglobalexitrootv2" - "github.com/0xPolygon/cdk/bridgesync" - cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/ethereum/go-ethereum/rpc" - "github.com/stretchr/testify/require" -) - -func newSimulatedClient(authDeployer, authCaller *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - bridgeAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - genesisAlloc := map[common.Address]types.Account{ - authDeployer.From: { - Balance: balance, - }, - authCaller.From: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - if bridgeABI == nil { - return nil, common.Address{}, common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - dataCallProxy, err := bridgeABI.Pack("initialize", - uint32(0), // networkIDMainnet - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - if precalculatedAddr != checkGERAddr { - err = errors.New("error deploying bridge") - return - } - - gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2( - authDeployer, client.Client(), authCaller.From, bridgeAddr, - ) - if err != nil { - return nil, common.Address{}, common.Address{}, nil, nil, err - } - client.Commit() - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, common.Address{}, nil, nil, errors.New("error calculating addr") - } - - return client, gerAddr, bridgeAddr, gerContract, bridgeContract, nil -} - -func TestE2E(t *testing.T) { - ctx := context.Background() - dbPathBridgeSync := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathL1Sync := path.Join(t.TempDir(), "file::memory:?cache=shared") - dbPathReorg := t.TempDir() - dbPathL12InfoSync := t.TempDir() - - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - privateKey, err = crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - require.NotEqual(t, authDeployer.From, auth.From) - client, gerAddr, bridgeAddr, gerSc, bridgeSc, err := newSimulatedClient(authDeployer, auth) - require.NoError(t, err) - rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Second)}) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - - testClient := helpers.TestClient{ClientRenamed: client.Client()} - bridgeSync, err := bridgesync.NewL1(ctx, dbPathBridgeSync, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) - require.NoError(t, err) - go bridgeSync.Start(ctx) - - l1Sync, err := l1infotreesync.New( - ctx, - dbPathL1Sync, - gerAddr, - common.Address{}, - 10, - etherman.SafeBlock, - rd, - client.Client(), - time.Millisecond, - 0, - time.Millisecond, - 3, - ) - require.NoError(t, err) - go l1Sync.Start(ctx) - - bridge2InfoSync, err := l1bridge2infoindexsync.New(dbPathL12InfoSync, bridgeSync, l1Sync, client.Client(), 0, 0, time.Millisecond) - require.NoError(t, err) - go bridge2InfoSync.Start(ctx) - - // Send bridge txs - expectedIndex := -1 - for i := 0; i < 10; i++ { - bridge := bridgesync.Bridge{ - Amount: big.NewInt(0), - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("f00"), - } - _, err := bridgeSc.BridgeAsset( - auth, - bridge.DestinationNetwork, - bridge.DestinationAddress, - bridge.Amount, - bridge.OriginAddress, - true, nil, - ) - require.NoError(t, err) - expectedIndex++ - client.Commit() - - // Wait for block to be finalised - updateAtBlock, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - for { - lastFinalisedBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) - if lastFinalisedBlock.NumberU64() >= updateAtBlock { - break - } - client.Commit() - time.Sleep(time.Microsecond) - } - - // Wait for syncer to catch up - syncerUpToDate := false - var errMsg string - lb, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - require.NoError(t, err) - for i := 0; i < 10; i++ { - lpb, err := bridge2InfoSync.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb.NumberU64() { - syncerUpToDate = true - - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb.NumberU64(), lpb) - } - require.True(t, syncerUpToDate, errMsg) - - actualIndex, err := bridge2InfoSync.GetL1InfoTreeIndexByDepositCount(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, uint32(expectedIndex), actualIndex) - - if i%2 == 1 { - // Update L1 info tree without a bridge on L1 - _, err = gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - expectedIndex++ - client.Commit() - } - } -} diff --git a/l1bridge2infoindexsync/l1bridge2infoindexsync.go b/l1bridge2infoindexsync/l1bridge2infoindexsync.go deleted file mode 100644 index c24bebba..00000000 --- a/l1bridge2infoindexsync/l1bridge2infoindexsync.go +++ /dev/null @@ -1,62 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "time" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum" -) - -type L1Bridge2InfoIndexSync struct { - processor *processor - driver *driver -} - -func New( - dbPath string, - l1Bridge *bridgesync.BridgeSync, - l1Info *l1infotreesync.L1InfoTreeSync, - l1Client ethereum.ChainReader, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - waitForSyncersPeriod time.Duration, -) (*L1Bridge2InfoIndexSync, error) { - dwn := newDownloader(l1Bridge, l1Info, l1Client) - - prc, err := newProcessor(dbPath) - if err != nil { - return nil, err - } - - rh := &sync.RetryHandler{ - RetryAfterErrorPeriod: retryAfterErrorPeriod, - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - } - drv := newDriver(dwn, prc, rh, waitForSyncersPeriod) - - return &L1Bridge2InfoIndexSync{ - driver: drv, - processor: prc, - }, nil -} - -func (s *L1Bridge2InfoIndexSync) Start(ctx context.Context) { - s.driver.sync(ctx) -} - -// GetLastProcessedBlock retrieves the last processed block number by the processor. -func (s *L1Bridge2InfoIndexSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - lpb, _, err := s.processor.GetLastProcessedBlockAndL1InfoTreeIndex(ctx) - - return lpb, err -} - -// GetL1InfoTreeIndexByDepositCount retrieves the L1 Info Tree index for a given deposit count. -func (s *L1Bridge2InfoIndexSync) GetL1InfoTreeIndexByDepositCount( - ctx context.Context, depositCount uint32, -) (uint32, error) { - return s.processor.getL1InfoTreeIndexByBridgeIndex(ctx, depositCount) -} diff --git a/l1bridge2infoindexsync/processor.go b/l1bridge2infoindexsync/processor.go deleted file mode 100644 index bfe9f3a6..00000000 --- a/l1bridge2infoindexsync/processor.go +++ /dev/null @@ -1,206 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -const ( - lastProcessedTable = "l1bridge2infoindexsync-lastProcessed" - relationTable = "l1bridge2infoindexsync-relation" -) - -var ( - lastProcessedKey = []byte("lp") - ErrNotFound = errors.New("not found") -) - -type processor struct { - db kv.RwDB -} - -type bridge2L1InfoRelation struct { - bridgeIndex uint32 - l1InfoTreeIndex uint32 -} - -type lastProcessed struct { - block uint64 - index uint32 -} - -func (lp *lastProcessed) MarshalBinary() ([]byte, error) { - return append(common.Uint64ToBytes(lp.block), common.Uint32ToBytes(lp.index)...), nil -} - -func (lp *lastProcessed) UnmarshalBinary(data []byte) error { - const expectedDataLength = 12 - if len(data) != expectedDataLength { - return fmt.Errorf("expected len %d, actual len %d", expectedDataLength, len(data)) - } - lp.block = common.BytesToUint64(data[:8]) - lp.index = common.BytesToUint32(data[8:]) - - return nil -} - -func newProcessor(dbPath string) (*processor, error) { - tableCfgFunc := func(defaultBuckets kv.TableCfg) kv.TableCfg { - return kv.TableCfg{ - lastProcessedTable: {}, - relationTable: {}, - } - } - db, err := mdbx.NewMDBX(nil). - Path(dbPath). - WithTableCfg(tableCfgFunc). - Open() - if err != nil { - return nil, err - } - - return &processor{ - db: db, - }, nil -} - -// GetLastProcessedBlockAndL1InfoTreeIndex returns the last processed block oby the processor, including blocks -// that don't have events -func (p *processor) GetLastProcessedBlockAndL1InfoTreeIndex(ctx context.Context) (uint64, uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, 0, err - } - defer tx.Rollback() - - return p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) -} - -func (p *processor) getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.Tx) (uint64, uint32, error) { - if lastProcessedBytes, err := tx.GetOne(lastProcessedTable, lastProcessedKey); err != nil { - return 0, 0, err - } else if lastProcessedBytes == nil { - return 0, 0, nil - } else { - lp := &lastProcessed{} - if err := lp.UnmarshalBinary(lastProcessedBytes); err != nil { - return 0, 0, err - } - - return lp.block, lp.index, nil - } -} - -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndex( - ctx context.Context, blockNum uint64, index uint32, -) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx, blockNum, index); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() -} - -func (p *processor) updateLastProcessedBlockAndL1InfoTreeIndexWithTx(tx kv.RwTx, blockNum uint64, index uint32) error { - lp := &lastProcessed{ - block: blockNum, - index: index, - } - value, err := lp.MarshalBinary() - if err != nil { - return err - } - - return tx.Put(lastProcessedTable, lastProcessedKey, value) -} - -func (p *processor) processUntilBlock( - ctx context.Context, lastProcessedBlock uint64, relations []bridge2L1InfoRelation, -) error { - tx, err := p.db.BeginRw(ctx) - if err != nil { - return err - } - - if len(relations) == 0 { - _, lastIndex, err := p.getLastProcessedBlockAndL1InfoTreeIndexWithTx(tx) - if err != nil { - tx.Rollback() - - return err - } - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( - tx, - lastProcessedBlock, - lastIndex, - ); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() - } - - for _, relation := range relations { - if _, err := p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, relation.bridgeIndex); !errors.Is(err, ErrNotFound) { - // Note that indexes could be repeated as the L1 Info tree update can be produced by a rollup and not mainnet. - // Hence if the index already exist, do not update as it's better to have the lowest index possible for the relation - continue - } - if err := tx.Put( - relationTable, - common.Uint32ToBytes(relation.bridgeIndex), - common.Uint32ToBytes(relation.l1InfoTreeIndex), - ); err != nil { - tx.Rollback() - - return err - } - } - - if err := p.updateLastProcessedBlockAndL1InfoTreeIndexWithTx( - tx, - lastProcessedBlock, - relations[len(relations)-1].l1InfoTreeIndex, - ); err != nil { - tx.Rollback() - - return err - } - - return tx.Commit() -} - -func (p *processor) getL1InfoTreeIndexByBridgeIndex(ctx context.Context, depositCount uint32) (uint32, error) { - tx, err := p.db.BeginRo(ctx) - if err != nil { - return 0, err - } - defer tx.Rollback() - - return p.getL1InfoTreeIndexByBridgeIndexWithTx(tx, depositCount) -} - -func (p *processor) getL1InfoTreeIndexByBridgeIndexWithTx(tx kv.Tx, depositCount uint32) (uint32, error) { - indexBytes, err := tx.GetOne(relationTable, common.Uint32ToBytes(depositCount)) - if err != nil { - return 0, err - } - if indexBytes == nil { - return 0, ErrNotFound - } - - return common.BytesToUint32(indexBytes), nil -} diff --git a/l1bridge2infoindexsync/processor_test.go b/l1bridge2infoindexsync/processor_test.go deleted file mode 100644 index 9305dd9b..00000000 --- a/l1bridge2infoindexsync/processor_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package l1bridge2infoindexsync - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestDuplicatedKey(t *testing.T) { - dbPath := t.TempDir() - p, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - err = p.processUntilBlock(ctx, 5, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 2}}) - require.NoError(t, err) - err = p.processUntilBlock(ctx, 7, []bridge2L1InfoRelation{{bridgeIndex: 2, l1InfoTreeIndex: 3}}) - require.NoError(t, err) - l1InfoTreeIndex, err := p.getL1InfoTreeIndexByBridgeIndex(ctx, 2) - require.NoError(t, err) - require.Equal(t, uint32(2), l1InfoTreeIndex) -} diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index 2051f7b5..ed3c7efb 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -33,15 +33,68 @@ type EthClienter interface { bind.ContractBackend } -func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Address) (sync.LogAppenderMap, error) { - ger, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) +func checkSMCIsRollupManager(rollupManagerAddr common.Address, + rollupManagerContract *polygonrollupmanager.Polygonrollupmanager) error { + bridgeAddr, err := rollupManagerContract.BridgeAddress(nil) if err != nil { - return nil, err + return fmt.Errorf("fail sanity check RollupManager(%s) Contract. Err: %w", rollupManagerAddr.String(), err) + } + log.Infof("sanity check rollupManager(%s) OK. bridgeAddr: %s", rollupManagerAddr.String(), bridgeAddr.String()) + return nil +} + +func checkSMCIsGlobalExitRoot(globalExitRootAddr common.Address, + gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2) error { + depositCount, err := gerContract.DepositCount(nil) + if err != nil { + return fmt.Errorf("fail sanity check GlobalExitRoot(%s) Contract. Err: %w", globalExitRootAddr.String(), err) } - rm, err := polygonrollupmanager.NewPolygonrollupmanager(rollupManager, client) + log.Infof("sanity check GlobalExitRoot(%s) OK. DepositCount: %v", globalExitRootAddr.String(), depositCount) + return nil +} + +func sanityCheckContracts(globalExitRoot, rollupManager common.Address, + gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + rollupManagerContract *polygonrollupmanager.Polygonrollupmanager) error { + errGER := checkSMCIsGlobalExitRoot(globalExitRoot, gerContract) + errRollup := checkSMCIsRollupManager(rollupManager, rollupManagerContract) + if errGER != nil || errRollup != nil { + err := fmt.Errorf("sanityCheckContracts: fails sanity check contracts. ErrGER: %w, ErrRollup: %w", errGER, errRollup) + log.Error(err) + return err + } + return nil +} + +func createContracts(client EthClienter, globalExitRoot, rollupManager common.Address) ( + *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + *polygonrollupmanager.Polygonrollupmanager, + error) { + gerContract, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) if err != nil { + return nil, nil, err + } + + rollupManagerContract, err := polygonrollupmanager.NewPolygonrollupmanager(rollupManager, client) + if err != nil { + return nil, nil, err + } + return gerContract, rollupManagerContract, nil +} + +func buildAppender(client EthClienter, globalExitRoot, + rollupManager common.Address, flags CreationFlags) (sync.LogAppenderMap, error) { + ger, rm, err := createContracts(client, globalExitRoot, rollupManager) + if err != nil { + err := fmt.Errorf("buildAppender: fails contracts creation. Err:%w", err) + log.Error(err) return nil, err } + err = sanityCheckContracts(globalExitRoot, rollupManager, ger, rm) + if err != nil && flags&FlagAllowWrongContractsAddrs == 0 { + return nil, fmt.Errorf("buildAppender: fails sanity check contracts. Err:%w", err) + } + appender := make(sync.LogAppenderMap) appender[initL1InfoRootMapSignature] = func(b *sync.EVMBlock, l types.Log) error { init, err := ger.ParseInitL1InfoRootMap(l) @@ -86,10 +139,12 @@ func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Addr l, err, ) } - log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", common.Bytes2Hex(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) + log.Infof("updateL1InfoTreeSignatureV2: expected root: %s", + common.BytesToHash(l1InfoTreeUpdate.CurrentL1InfoRoot[:])) return nil } + // This event is coming from RollupManager appender[verifyBatchesSignature] = func(b *sync.EVMBlock, l types.Log) error { verifyBatches, err := rm.ParseVerifyBatches(l) if err != nil { diff --git a/l1infotreesync/downloader_test.go b/l1infotreesync/downloader_test.go new file mode 100644 index 00000000..6007a3d6 --- /dev/null +++ b/l1infotreesync/downloader_test.go @@ -0,0 +1,55 @@ +package l1infotreesync + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygonzkevmglobalexitrootv2" + mocks_l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync/mocks" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestBuildAppenderErrorOnBadContractAddr(t *testing.T) { + l1Client := mocks_l1infotreesync.NewEthClienter(t) + globalExitRoot := common.HexToAddress("0x1") + rollupManager := common.HexToAddress("0x2") + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test-error")) + flags := FlagNone + _, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) + require.Error(t, err) +} + +func TestBuildAppenderBypassBadContractAddr(t *testing.T) { + l1Client := mocks_l1infotreesync.NewEthClienter(t) + globalExitRoot := common.HexToAddress("0x1") + rollupManager := common.HexToAddress("0x2") + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test-error")) + flags := FlagAllowWrongContractsAddrs + _, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) + require.NoError(t, err) +} + +func TestBuildAppenderVerifiedContractAddr(t *testing.T) { + l1Client := mocks_l1infotreesync.NewEthClienter(t) + globalExitRoot := common.HexToAddress("0x1") + rollupManager := common.HexToAddress("0x2") + + smcAbi, err := abi.JSON(strings.NewReader(polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2ABI)) + require.NoError(t, err) + bigInt := big.NewInt(1) + returnGER, err := smcAbi.Methods["depositCount"].Outputs.Pack(bigInt) + require.NoError(t, err) + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(returnGER, nil).Once() + v := common.HexToAddress("0x1234") + returnRM, err := smcAbi.Methods["bridgeAddress"].Outputs.Pack(v) + require.NoError(t, err) + l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(returnRM, nil).Once() + flags := FlagNone + _, err = buildAppender(l1Client, globalExitRoot, rollupManager, flags) + require.NoError(t, err) +} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 90f7f091..70986cbf 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -2,7 +2,6 @@ package l1infotreesync_test import ( "context" - "errors" "fmt" "math/big" "path" @@ -16,6 +15,7 @@ import ( "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/verifybatchesmock" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -25,63 +25,49 @@ import ( "github.com/stretchr/testify/require" ) -func newSimulatedClient(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - verifyAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - verifyContract *verifybatchesmock.Verifybatchesmock, - err error, +func newSimulatedClient(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + common.Address, + *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + *verifybatchesmock.Verifybatchesmock, ) { + t.Helper() + ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + client, setup := helpers.SimulatedBackend(t, nil, 0) - nonce, err := client.Client().PendingNonceAt(ctx, auth.From) - if err != nil { - return - } - precalculatedAddr := crypto.CreateAddress(auth.From, nonce+1) - verifyAddr, _, verifyContract, err = verifybatchesmock.DeployVerifybatchesmock(auth, client.Client(), precalculatedAddr) - if err != nil { - return - } + nonce, err := client.Client().PendingNonceAt(ctx, setup.UserAuth.From) + require.NoError(t, err) + + precalculatedAddr := crypto.CreateAddress(setup.UserAuth.From, nonce+1) + verifyAddr, _, verifyContract, err := verifybatchesmock.DeployVerifybatchesmock(setup.UserAuth, client.Client(), precalculatedAddr) + require.NoError(t, err) client.Commit() - gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(auth, client.Client(), verifyAddr, auth.From) - if err != nil { - return - } + gerAddr, _, gerContract, err := polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(setup.UserAuth, client.Client(), verifyAddr, setup.UserAuth.From) + require.NoError(t, err) client.Commit() - if precalculatedAddr != gerAddr { - err = errors.New("error calculating addr") - } + require.Equal(t, precalculatedAddr, gerAddr) - return + return client, setup.UserAuth, gerAddr, verifyAddr, gerContract, verifyContract } func TestE2E(t *testing.T) { - ctx := context.Background() + ctx, cancelCtx := context.WithCancel(context.Background()) dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) + rdm := l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) - syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3, + l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) + go syncer.Start(ctx) // Update GER 3 times @@ -112,6 +98,11 @@ func TestE2E(t *testing.T) { require.Equal(t, common.Hash(expectedRoot), actualRoot.Hash) } + // Restart syncer + cancelCtx() + ctx = context.Background() + go syncer.Start(ctx) + // Update 3 rollups (verify batches event) 3 times for rollupID := uint32(1); rollupID < 3; rollupID++ { for i := 0; i < 3; i++ { @@ -119,96 +110,219 @@ func TestE2E(t *testing.T) { tx, err := verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, i%2 != 0) require.NoError(t, err) client.Commit() - // Let the processor catch up - time.Sleep(time.Millisecond * 100) receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash()) require.NoError(t, err) require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) require.True(t, len(receipt.Logs) == 1+i%2+i%2) + // Let the processor catch + processorUpdated := false + for i := 0; i < 30; i++ { + lpb, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + if receipt.BlockNumber.Uint64() == lpb { + processorUpdated = true + break + } + time.Sleep(time.Millisecond * 10) + } + require.True(t, processorUpdated) + + // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) require.NoError(t, err) require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash, fmt.Sprintf("rollupID: %d, i: %d", rollupID, i)) + + // Assert verify batches + expectedVerify := l1infotreesync.VerifyBatches{ + BlockNumber: receipt.BlockNumber.Uint64(), + BlockPosition: uint64(i%2 + i%2), + RollupID: rollupID, + ExitRoot: newLocalExitRoot, + Aggregator: auth.From, + RollupExitRoot: expectedRollupExitRoot, + } + actualVerify, err := syncer.GetLastVerifiedBatches(rollupID) + require.NoError(t, err) + require.Equal(t, expectedVerify, *actualVerify) } } } -func TestStressAndReorgs(t *testing.T) { - const ( - totalIterations = 200 // Have tested with much larger number (+10k) - enableReorgs = false // test fails when set to true - reorgEveryXIterations = 53 - maxReorgDepth = 5 - maxEventsPerBlock = 7 - maxRollups = 31 - ) - +func TestWithReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}) require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) + require.NoError(t, rd.Start(ctx)) + + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25, + l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go syncer.Start(ctx) + + // Commit block + header, err := client.Client().HeaderByHash(ctx, client.Commit()) // Block 3 require.NoError(t, err) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) + reorgFrom := header.Hash() + fmt.Println("start from header:", header.Number) + + updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(1, 1) + + // Block 4 + commitBlocks(t, client, 1, time.Second*5) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root + expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Assert L1 Info tree root + expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) + require.NoError(t, err) + info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) + require.NoError(t, err) + + require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) + require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + + // Forking from block 3 + err = client.Fork(reorgFrom) + require.NoError(t, err) + + // Block 4, 5, 6 after the fork + commitBlocks(t, client, 3, time.Millisecond*500) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.ErrorContains(t, err, "not found") // rollup exit tree reorged, it does not have any exits in it + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + + // Forking from block 3 again + err = client.Fork(reorgFrom) require.NoError(t, err) + time.Sleep(time.Millisecond * 500) + + // create some events and update the trees + updateL1InfoTreeAndRollupExitTree(2, 1) + + // Block 4, 5, 6, 7 after the fork + commitBlocks(t, client, 4, time.Millisecond*100) + + // Make sure syncer is up to date + waitForSyncerToCatchUp(ctx, t, syncer, client) + + // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork + expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) + require.NoError(t, err) + t.Log("exit roots", common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) + require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) +} + +func TestStressAndReorgs(t *testing.T) { + const ( + totalIterations = 3 + blocksInIteration = 140 + reorgEveryXIterations = 70 + reorgSizeInBlocks = 2 + maxRollupID = 31 + extraBlocksToMine = 10 + ) + + ctx := context.Background() + dbPathSyncer := path.Join(t.TempDir(), "file:TestStressAndReorgs:memory:?cache=shared") + dbPathReorg := t.TempDir() + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3) + + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100, + l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) go syncer.Start(ctx) - for i := 0; i < totalIterations; i++ { - for j := 0; j < i%maxEventsPerBlock; j++ { - switch j % 3 { - case 0: // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - case 1: // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) - case 2: // Update Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, false) + updateL1InfoTreeAndRollupExitTree := func(i, j int, rollupID uint32) { + // Update L1 Info Tree + _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) + require.NoError(t, err) + + // Update L1 Info Tree + Rollup Exit Tree + newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) + require.NoError(t, err) + + // Update Rollup Exit Tree + newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "fffa" + strconv.Itoa(j)) + _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) + require.NoError(t, err) + } + + for i := 1; i <= totalIterations; i++ { + for j := 1; j <= blocksInIteration; j++ { + commitBlocks(t, client, 1, time.Millisecond*10) + + if j%reorgEveryXIterations == 0 { + currentBlockNum, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - } - } - client.Commit() - time.Sleep(time.Microsecond * 30) // Sleep just enough for goroutine to switch - if enableReorgs && i%reorgEveryXIterations == 0 { - reorgDepth := i%maxReorgDepth + 1 - currentBlockNum, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - targetReorgBlockNum := currentBlockNum - uint64(reorgDepth) - if targetReorgBlockNum < currentBlockNum { // we are dealing with uints... - reorgBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(targetReorgBlockNum))) + + block, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(currentBlockNum-reorgSizeInBlocks))) require.NoError(t, err) - err = client.Fork(reorgBlock.Hash()) + reorgFrom := block.Hash() + err = client.Fork(reorgFrom) require.NoError(t, err) + } else { + updateL1InfoTreeAndRollupExitTree(i, j, uint32(j%maxRollupID)+1) } } } - syncerUpToDate := false - var errMsg string - lb, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - for i := 0; i < 50; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb { - syncerUpToDate = true + commitBlocks(t, client, 1, time.Millisecond*10) - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) - } - require.True(t, syncerUpToDate, errMsg) + waitForSyncerToCatchUp(ctx, t, syncer, client) // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) @@ -227,6 +341,39 @@ func TestStressAndReorgs(t *testing.T) { info, err := syncer.GetInfoByIndex(ctx, lastRoot.Index) require.NoError(t, err, fmt.Sprintf("index: %d", lastRoot.Index)) - require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) + t.Logf("expectedL1InfoRoot: %s", common.Hash(expectedL1InfoRoot).String()) require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) + require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) +} + +func waitForSyncerToCatchUp(ctx context.Context, t *testing.T, syncer *l1infotreesync.L1InfoTreeSync, client *simulated.Backend) { + t.Helper() + + syncerUpToDate := false + var errMsg string + + for i := 0; i < 200; i++ { + lpb, err := syncer.GetLastProcessedBlock(ctx) + require.NoError(t, err) + lb, err := client.Client().BlockNumber(ctx) + require.NoError(t, err) + if lpb == lb { + syncerUpToDate = true + break + } + time.Sleep(time.Second / 2) + errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) + } + + require.True(t, syncerUpToDate, errMsg) +} + +// commitBlocks commits the specified number of blocks with the given client and waits for the specified duration after each block +func commitBlocks(t *testing.T, client *simulated.Backend, numBlocks int, waitDuration time.Duration) { + t.Helper() + + for i := 0; i < numBlocks; i++ { + client.Commit() + time.Sleep(waitDuration) + } } diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 546a8ead..a7e50128 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -5,6 +5,7 @@ import ( "errors" "time" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" @@ -12,9 +13,18 @@ import ( "github.com/ethereum/go-ethereum/common" ) +type CreationFlags uint64 + const ( reorgDetectorID = "l1infotreesync" downloadBufferSize = 1000 + // CreationFlags defitinion + FlagNone CreationFlags = 0 + FlagAllowWrongContractsAddrs CreationFlags = 1 << iota // Allow to set wrong contracts addresses +) + +var ( + ErrNotFound = errors.New("l1infotreesync: not found") ) type L1InfoTreeSync struct { @@ -36,6 +46,7 @@ func New( initialBlock uint64, retryAfterErrorPeriod time.Duration, maxRetryAttemptsAfterError int, + flags CreationFlags, ) (*L1InfoTreeSync, error) { processor, err := newProcessor(dbPath) if err != nil { @@ -59,7 +70,7 @@ func New( MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, } - appender, err := buildAppender(l1Client, globalExitRoot, rollupManager) + appender, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) if err != nil { return nil, err } @@ -111,10 +122,21 @@ func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof( return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root) } +func translateError(err error) error { + if errors.Is(err, db.ErrNotFound) { + return ErrNotFound + } + return err +} + // GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned +// It can returns next errors: +// - ErrBlockNotProcessed, +// - ErrNotFound func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { - return s.processor.GetLatestInfoUntilBlock(ctx, blockNum) + leaf, err := s.processor.GetLatestInfoUntilBlock(ctx, blockNum) + return leaf, translateError(err) } // GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree @@ -129,12 +151,12 @@ func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uin // GetLastRollupExitRoot return the last rollup exit root processed func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, error) { - return s.processor.rollupExitTree.GetLastRoot(ctx) + return s.processor.rollupExitTree.GetLastRoot(nil) } // GetLastL1InfoTreeRoot return the last root and index processed from the L1 Info tree func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, error) { - return s.processor.l1InfoTree.GetLastRoot(ctx) + return s.processor.l1InfoTree.GetLastRoot(nil) } // GetLastProcessedBlock return the last processed block @@ -149,5 +171,49 @@ func (s *L1InfoTreeSync) GetLocalExitRoot( return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree") } - return s.processor.rollupExitTree.GetLeaf(ctx, networkID-1, rollupExitRoot) + return s.processor.rollupExitTree.GetLeaf(nil, networkID-1, rollupExitRoot) +} + +func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + return s.processor.GetLastVerifiedBatches(rollupID) +} + +func (s *L1InfoTreeSync) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + return s.processor.GetFirstVerifiedBatches(rollupID) +} + +func (s *L1InfoTreeSync) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + return s.processor.GetFirstVerifiedBatchesAfterBlock(rollupID, blockNum) +} + +func (s *L1InfoTreeSync) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstL1InfoWithRollupExitRoot(rollupExitRoot) +} + +func (s *L1InfoTreeSync) GetLastInfo() (*L1InfoTreeLeaf, error) { + return s.processor.GetLastInfo() +} + +func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstInfo() +} + +func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + return s.processor.GetFirstInfoAfterBlock(blockNum) +} + +func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + return s.processor.GetInfoByGlobalExitRoot(ger) +} + +// GetL1InfoTreeMerkleProofFromIndexToRoot creates a merkle proof for the L1 Info tree +func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( + ctx context.Context, index uint32, root common.Hash, +) (types.Proof, error) { + return s.processor.l1InfoTree.GetProof(ctx, index, root) +} + +// GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set +func (s *L1InfoTreeSync) GetInitL1InfoRootMap(ctx context.Context) (*L1InfoTreeInitial, error) { + return s.processor.GetInitL1InfoRootMap(nil) } diff --git a/l1infotreesync/migrations/l1infotreesync0001.sql b/l1infotreesync/migrations/l1infotreesync0001.sql index 39a45dd4..7a689281 100644 --- a/l1infotreesync/migrations/l1infotreesync0001.sql +++ b/l1infotreesync/migrations/l1infotreesync0001.sql @@ -16,7 +16,19 @@ CREATE TABLE l1info_leaf ( timestamp INTEGER NOT NULL, mainnet_exit_root VARCHAR NOT NULL, rollup_exit_root VARCHAR NOT NULL, - global_exit_root VARCHAR NOT NULL, + global_exit_root VARCHAR NOT NULL UNIQUE, hash VARCHAR NOT NULL, PRIMARY KEY (block_num, block_pos) ); + +CREATE TABLE verify_batches ( + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + block_pos INTEGER NOT NULL, + rollup_id INTEGER NOT NULL, + batch_num INTEGER NOT NULL, + state_root VARCHAR NOT NULL, + exit_root VARCHAR NOT NULL, + aggregator VARCHAR NOT NULL, + rollup_exit_root VARCHAR NOT NULL, + PRIMARY KEY (block_num, block_pos) +); diff --git a/l1infotreesync/migrations/l1infotreesync0002.sql b/l1infotreesync/migrations/l1infotreesync0002.sql new file mode 100644 index 00000000..d1f09481 --- /dev/null +++ b/l1infotreesync/migrations/l1infotreesync0002.sql @@ -0,0 +1,14 @@ +-- +migrate Down +DROP TABLE IF EXISTS l1info_initial; + +-- +migrate Up + +CREATE TABLE l1info_initial ( + -- single_row_id prevent to have more than 1 row in this table + single_row_id INTEGER check(single_row_id=1) NOT NULL DEFAULT 1, + block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, + leaf_count INTEGER NOT NULL, + l1_info_root VARCHAR NOT NULL, + PRIMARY KEY (single_row_id) +); + diff --git a/l1infotreesync/migrations/migrations.go b/l1infotreesync/migrations/migrations.go index 768dde37..47fac070 100644 --- a/l1infotreesync/migrations/migrations.go +++ b/l1infotreesync/migrations/migrations.go @@ -16,12 +16,19 @@ const ( //go:embed l1infotreesync0001.sql var mig001 string +//go:embed l1infotreesync0002.sql +var mig002 string + func RunMigrations(dbPath string) error { migrations := []types.Migration{ { ID: "l1infotreesync0001", SQL: mig001, }, + { + ID: "l1infotreesync0002", + SQL: mig002, + }, } for _, tm := range treeMigrations.Migrations { migrations = append(migrations, types.Migration{ diff --git a/l1infotreesync/mock_reorgdetector_test.go b/l1infotreesync/mock_reorgdetector_test.go index 8255443e..18ac7bc8 100644 --- a/l1infotreesync/mock_reorgdetector_test.go +++ b/l1infotreesync/mock_reorgdetector_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package l1infotreesync diff --git a/l1infotreesync/mocks/eth_clienter.go b/l1infotreesync/mocks/eth_clienter.go new file mode 100644 index 00000000..270c40d9 --- /dev/null +++ b/l1infotreesync/mocks/eth_clienter.go @@ -0,0 +1,1086 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks_l1infotreesync + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethereum "github.com/ethereum/go-ethereum" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthClienter is an autogenerated mock type for the EthClienter type +type EthClienter struct { + mock.Mock +} + +type EthClienter_Expecter struct { + mock *mock.Mock +} + +func (_m *EthClienter) EXPECT() *EthClienter_Expecter { + return &EthClienter_Expecter{mock: &_m.Mock} +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' +type EthClienter_BlockByHash_Call struct { + *mock.Call +} + +// BlockByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) BlockByHash(ctx interface{}, hash interface{}) *EthClienter_BlockByHash_Call { + return &EthClienter_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} +} + +func (_c *EthClienter_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_BlockByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *EthClienter_BlockByHash_Call { + _c.Call.Return(run) + return _c +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type EthClienter_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) BlockByNumber(ctx interface{}, number interface{}) *EthClienter_BlockByNumber_Call { + return &EthClienter_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *EthClienter_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *EthClienter_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// BlockNumber provides a mock function with given fields: ctx +func (_m *EthClienter) BlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type EthClienter_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) BlockNumber(ctx interface{}) *EthClienter_BlockNumber_Call { + return &EthClienter_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *EthClienter_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClienter_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClienter_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClienter_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// CallContract provides a mock function with given fields: ctx, call, blockNumber +func (_m *EthClienter) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, call, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CallContract") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { + return rf(ctx, call, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { + r0 = rf(ctx, call, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { + r1 = rf(ctx, call, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' +type EthClienter_CallContract_Call struct { + *mock.Call +} + +// CallContract is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *EthClienter_CallContract_Call { + return &EthClienter_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} +} + +func (_c *EthClienter_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *EthClienter_CallContract_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CallContract_Call) Return(_a0 []byte, _a1 error) *EthClienter_CallContract_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *EthClienter_CallContract_Call { + _c.Call.Return(run) + return _c +} + +// CodeAt provides a mock function with given fields: ctx, contract, blockNumber +func (_m *EthClienter) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { + ret := _m.Called(ctx, contract, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for CodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { + return rf(ctx, contract, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { + r0 = rf(ctx, contract, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { + r1 = rf(ctx, contract, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' +type EthClienter_CodeAt_Call struct { + *mock.Call +} + +// CodeAt is a helper method to define mock.On call +// - ctx context.Context +// - contract common.Address +// - blockNumber *big.Int +func (_e *EthClienter_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *EthClienter_CodeAt_Call { + return &EthClienter_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} +} + +func (_c *EthClienter_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *EthClienter_CodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_CodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_CodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *EthClienter_CodeAt_Call { + _c.Call.Return(run) + return _c +} + +// EstimateGas provides a mock function with given fields: ctx, call +func (_m *EthClienter) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { + ret := _m.Called(ctx, call) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { + return rf(ctx, call) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { + r0 = rf(ctx, call) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { + r1 = rf(ctx, call) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' +type EthClienter_EstimateGas_Call struct { + *mock.Call +} + +// EstimateGas is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +func (_e *EthClienter_Expecter) EstimateGas(ctx interface{}, call interface{}) *EthClienter_EstimateGas_Call { + return &EthClienter_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} +} + +func (_c *EthClienter_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *EthClienter_EstimateGas_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg)) + }) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) Return(_a0 uint64, _a1 error) *EthClienter_EstimateGas_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *EthClienter_EstimateGas_Call { + _c.Call.Return(run) + return _c +} + +// FilterLogs provides a mock function with given fields: ctx, q +func (_m *EthClienter) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + ret := _m.Called(ctx, q) + + if len(ret) == 0 { + panic("no return value specified for FilterLogs") + } + + var r0 []types.Log + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { + return rf(ctx, q) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { + r0 = rf(ctx, q) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Log) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { + r1 = rf(ctx, q) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type EthClienter_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *EthClienter_Expecter) FilterLogs(ctx interface{}, q interface{}) *EthClienter_FilterLogs_Call { + return &EthClienter_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *EthClienter_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *EthClienter_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *EthClienter_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *EthClienter_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *EthClienter) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type EthClienter_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *EthClienter_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClienter_HeaderByHash_Call { + return &EthClienter_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *EthClienter_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClienter_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthClienter) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type EthClienter_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *EthClienter_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClienter_HeaderByNumber_Call { + return &EthClienter_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *EthClienter_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClienter_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + +// PendingCodeAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingCodeAt") + } + + var r0 []byte + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { + r0 = rf(ctx, account) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' +type EthClienter_PendingCodeAt_Call struct { + *mock.Call +} + +// PendingCodeAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *EthClienter_PendingCodeAt_Call { + return &EthClienter_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} +} + +func (_c *EthClienter_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingCodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *EthClienter_PendingCodeAt_Call { + _c.Call.Return(run) + return _c +} + +// PendingNonceAt provides a mock function with given fields: ctx, account +func (_m *EthClienter) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { + ret := _m.Called(ctx, account) + + if len(ret) == 0 { + panic("no return value specified for PendingNonceAt") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, account) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, account) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, account) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' +type EthClienter_PendingNonceAt_Call struct { + *mock.Call +} + +// PendingNonceAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *EthClienter_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *EthClienter_PendingNonceAt_Call { + return &EthClienter_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} +} + +func (_c *EthClienter_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingNonceAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *EthClienter_PendingNonceAt_Call { + _c.Call.Return(run) + return _c +} + +// SendTransaction provides a mock function with given fields: ctx, tx +func (_m *EthClienter) SendTransaction(ctx context.Context, tx *types.Transaction) error { + ret := _m.Called(ctx, tx) + + if len(ret) == 0 { + panic("no return value specified for SendTransaction") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { + r0 = rf(ctx, tx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EthClienter_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' +type EthClienter_SendTransaction_Call struct { + *mock.Call +} + +// SendTransaction is a helper method to define mock.On call +// - ctx context.Context +// - tx *types.Transaction +func (_e *EthClienter_Expecter) SendTransaction(ctx interface{}, tx interface{}) *EthClienter_SendTransaction_Call { + return &EthClienter_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} +} + +func (_c *EthClienter_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *EthClienter_SendTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.Transaction)) + }) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) Return(_a0 error) *EthClienter_SendTransaction_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EthClienter_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *EthClienter_SendTransaction_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch +func (_m *EthClienter) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + ret := _m.Called(ctx, q, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeFilterLogs") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { + return rf(ctx, q, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { + r0 = rf(ctx, q, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { + r1 = rf(ctx, q, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' +type EthClienter_SubscribeFilterLogs_Call struct { + *mock.Call +} + +// SubscribeFilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +// - ch chan<- types.Log +func (_e *EthClienter_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *EthClienter_SubscribeFilterLogs_Call { + return &EthClienter_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) + }) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *EthClienter_SubscribeFilterLogs_Call { + _c.Call.Return(run) + return _c +} + +// SubscribeNewHead provides a mock function with given fields: ctx, ch +func (_m *EthClienter) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { + ret := _m.Called(ctx, ch) + + if len(ret) == 0 { + panic("no return value specified for SubscribeNewHead") + } + + var r0 ethereum.Subscription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { + return rf(ctx, ch) + } + if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { + r0 = rf(ctx, ch) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(ethereum.Subscription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { + r1 = rf(ctx, ch) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' +type EthClienter_SubscribeNewHead_Call struct { + *mock.Call +} + +// SubscribeNewHead is a helper method to define mock.On call +// - ctx context.Context +// - ch chan<- *types.Header +func (_e *EthClienter_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClienter_SubscribeNewHead_Call { + return &EthClienter_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} +} + +func (_c *EthClienter_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(chan<- *types.Header)) + }) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClienter_SubscribeNewHead_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasPrice provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasPrice") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' +type EthClienter_SuggestGasPrice_Call struct { + *mock.Call +} + +// SuggestGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasPrice(ctx interface{}) *EthClienter_SuggestGasPrice_Call { + return &EthClienter_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} +} + +func (_c *EthClienter_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasPrice_Call { + _c.Call.Return(run) + return _c +} + +// SuggestGasTipCap provides a mock function with given fields: ctx +func (_m *EthClienter) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SuggestGasTipCap") + } + + var r0 *big.Int + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*big.Int) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' +type EthClienter_SuggestGasTipCap_Call struct { + *mock.Call +} + +// SuggestGasTipCap is a helper method to define mock.On call +// - ctx context.Context +func (_e *EthClienter_Expecter) SuggestGasTipCap(ctx interface{}) *EthClienter_SuggestGasTipCap_Call { + return &EthClienter_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasTipCap_Call { + _c.Call.Return(run) + return _c +} + +// TransactionCount provides a mock function with given fields: ctx, blockHash +func (_m *EthClienter) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { + ret := _m.Called(ctx, blockHash) + + if len(ret) == 0 { + panic("no return value specified for TransactionCount") + } + + var r0 uint + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { + return rf(ctx, blockHash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { + r0 = rf(ctx, blockHash) + } else { + r0 = ret.Get(0).(uint) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, blockHash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' +type EthClienter_TransactionCount_Call struct { + *mock.Call +} + +// TransactionCount is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +func (_e *EthClienter_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *EthClienter_TransactionCount_Call { + return &EthClienter_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} +} + +func (_c *EthClienter_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *EthClienter_TransactionCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) Return(_a0 uint, _a1 error) *EthClienter_TransactionCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *EthClienter_TransactionCount_Call { + _c.Call.Return(run) + return _c +} + +// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index +func (_m *EthClienter) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { + ret := _m.Called(ctx, blockHash, index) + + if len(ret) == 0 { + panic("no return value specified for TransactionInBlock") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { + return rf(ctx, blockHash, index) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { + r0 = rf(ctx, blockHash, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { + r1 = rf(ctx, blockHash, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthClienter_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' +type EthClienter_TransactionInBlock_Call struct { + *mock.Call +} + +// TransactionInBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +// - index uint +func (_e *EthClienter_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *EthClienter_TransactionInBlock_Call { + return &EthClienter_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} +} + +func (_c *EthClienter_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *EthClienter_TransactionInBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) + }) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthClienter_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *EthClienter_TransactionInBlock_Call { + _c.Call.Return(run) + return _c +} + +// NewEthClienter creates a new instance of EthClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthClienter(t interface { + mock.TestingT + Cleanup(func()) +}) *EthClienter { + mock := &EthClienter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c76d7aac..e7115a60 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -13,7 +13,7 @@ import ( "github.com/0xPolygon/cdk/sync" "github.com/0xPolygon/cdk/tree" treeTypes "github.com/0xPolygon/cdk/tree/types" - ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" "github.com/russross/meddler" "golang.org/x/crypto/sha3" @@ -21,7 +21,6 @@ import ( var ( ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNotFound = errors.New("not found") ErrNoBlock0 = errors.New("blockNum must be greater than 0") ) @@ -34,25 +33,40 @@ type processor struct { // UpdateL1InfoTree representation of the UpdateL1InfoTree event type UpdateL1InfoTree struct { BlockPosition uint64 - MainnetExitRoot ethCommon.Hash - RollupExitRoot ethCommon.Hash - ParentHash ethCommon.Hash + MainnetExitRoot common.Hash + RollupExitRoot common.Hash + ParentHash common.Hash Timestamp uint64 } // VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events type VerifyBatches struct { - BlockPosition uint64 - RollupID uint32 - NumBatch uint64 - StateRoot ethCommon.Hash - ExitRoot ethCommon.Hash - Aggregator ethCommon.Address + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + RollupID uint32 `meddler:"rollup_id"` + NumBatch uint64 `meddler:"batch_num"` + StateRoot common.Hash `meddler:"state_root,hash"` + ExitRoot common.Hash `meddler:"exit_root,hash"` + Aggregator common.Address `meddler:"aggregator,address"` + + // Not provided by downloader + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` +} + +func (v *VerifyBatches) String() string { + return fmt.Sprintf("BlockNumber: %d, BlockPosition: %d, RollupID: %d, NumBatch: %d, StateRoot: %s, "+ + "ExitRoot: %s, Aggregator: %s, RollupExitRoot: %s", + v.BlockNumber, v.BlockPosition, v.RollupID, v.NumBatch, v.StateRoot.String(), + v.ExitRoot.String(), v.Aggregator.String(), v.RollupExitRoot.String()) } type InitL1InfoRootMap struct { LeafCount uint32 - CurrentL1InfoRoot ethCommon.Hash + CurrentL1InfoRoot common.Hash +} + +func (i *InitL1InfoRootMap) String() string { + return fmt.Sprintf("LeafCount: %d, CurrentL1InfoRoot: %s", i.LeafCount, i.CurrentL1InfoRoot.String()) } type Event struct { @@ -63,20 +77,38 @@ type Event struct { // L1InfoTreeLeaf representation of a leaf of the L1 Info tree type L1InfoTreeLeaf struct { - BlockNumber uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_pos"` - L1InfoTreeIndex uint32 `meddler:"position"` - PreviousBlockHash ethCommon.Hash `meddler:"previous_block_hash,hash"` - Timestamp uint64 `meddler:"timestamp"` - MainnetExitRoot ethCommon.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot ethCommon.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` - Hash ethCommon.Hash `meddler:"hash,hash"` + BlockNumber uint64 `meddler:"block_num"` + BlockPosition uint64 `meddler:"block_pos"` + L1InfoTreeIndex uint32 `meddler:"position"` + PreviousBlockHash common.Hash `meddler:"previous_block_hash,hash"` + Timestamp uint64 `meddler:"timestamp"` + MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` + RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` + GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` + Hash common.Hash `meddler:"hash,hash"` +} + +func (l *L1InfoTreeLeaf) String() string { + return fmt.Sprintf("BlockNumber: %d, BlockPosition: %d, L1InfoTreeIndex: %d, PreviousBlockHash: %s, "+ + "Timestamp: %d, MainnetExitRoot: %s, RollupExitRoot: %s, GlobalExitRoot: %s, Hash: %s", + l.BlockNumber, l.BlockPosition, l.L1InfoTreeIndex, l.PreviousBlockHash.String(), + l.Timestamp, l.MainnetExitRoot.String(), l.RollupExitRoot.String(), l.GlobalExitRoot.String(), l.Hash.String()) +} + +// L1InfoTreeInitial representation of the initial info of the L1 Info tree for this rollup +type L1InfoTreeInitial struct { + BlockNumber uint64 `meddler:"block_num"` + LeafCount uint32 `meddler:"leaf_count"` + L1InfoRoot common.Hash `meddler:"l1_info_root,hash"` +} + +func (l *L1InfoTreeInitial) String() string { + return fmt.Sprintf("BlockNumber: %d, LeafCount: %d, L1InfoRoot: %s", l.BlockNumber, l.LeafCount, l.L1InfoRoot.String()) } // Hash as expected by the tree -func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { - var res [32]byte +func (l *L1InfoTreeLeaf) hash() common.Hash { + var res [treeTypes.DefaultHeight]byte t := make([]byte, 8) //nolint:mnd binary.BigEndian.PutUint64(t, l.Timestamp) copy(res[:], keccak256.Hash(l.globalExitRoot().Bytes(), l.PreviousBlockHash.Bytes(), t)) @@ -84,8 +116,8 @@ func (l *L1InfoTreeLeaf) hash() ethCommon.Hash { } // GlobalExitRoot returns the GER -func (l *L1InfoTreeLeaf) globalExitRoot() ethCommon.Hash { - var gerBytes [32]byte +func (l *L1InfoTreeLeaf) globalExitRoot() common.Hash { + var gerBytes [treeTypes.DefaultHeight]byte hasher := sha3.NewLegacyKeccak256() hasher.Write(l.MainnetExitRoot[:]) hasher.Write(l.RollupExitRoot[:]) @@ -153,7 +185,7 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64 ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return nil, ErrNotFound + return nil, db.ErrNotFound } return nil, err } @@ -216,16 +248,12 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return err } - if err := tx.Commit(); err != nil { - return err - } - - return nil + return tx.Commit() } // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) -func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { +func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { tx, err := db.NewTx(ctx, p.db) if err != nil { return err @@ -238,8 +266,8 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } }() - if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, b.Num); err != nil { - return fmt.Errorf("err: %w", err) + if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { + return fmt.Errorf("insert Block. err: %w", err) } var initialL1InfoIndex uint32 @@ -247,16 +275,16 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { lastIndex, err := p.getLastIndex(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): initialL1InfoIndex = 0 err = nil case err != nil: - return fmt.Errorf("err: %w", err) + return fmt.Errorf("getLastIndex err: %w", err) default: initialL1InfoIndex = lastIndex + 1 } - for _, e := range b.Events { + for _, e := range block.Events { event, ok := e.(Event) if !ok { return errors.New("failed to convert from sync.Block.Event into Event") @@ -264,7 +292,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { if event.UpdateL1InfoTree != nil { index := initialL1InfoIndex + l1InfoLeavesAdded info := &L1InfoTreeLeaf{ - BlockNumber: b.Num, + BlockNumber: block.Num, BlockPosition: event.UpdateL1InfoTree.BlockPosition, L1InfoTreeIndex: index, PreviousBlockHash: event.UpdateL1InfoTree.ParentHash, @@ -274,41 +302,45 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { } info.GlobalExitRoot = info.globalExitRoot() info.Hash = info.hash() - err = meddler.Insert(tx, "l1info_leaf", info) - if err != nil { - return fmt.Errorf("err: %w", err) + if err = meddler.Insert(tx, "l1info_leaf", info); err != nil { + return fmt.Errorf("insert l1info_leaf %s. err: %w", info.String(), err) } + err = p.l1InfoTree.AddLeaf(tx, info.BlockNumber, info.BlockPosition, treeTypes.Leaf{ Index: info.L1InfoTreeIndex, Hash: info.Hash, }) if err != nil { - return fmt.Errorf("err: %w", err) + return fmt.Errorf("AddLeaf(%s). err: %w", info.String(), err) } + log.Infof("inserted L1InfoTreeLeaf %s", info.String()) l1InfoLeavesAdded++ } - if event.VerifyBatches != nil { - err = p.rollupExitTree.UpsertLeaf(tx, b.Num, event.VerifyBatches.BlockPosition, treeTypes.Leaf{ - Index: event.VerifyBatches.RollupID - 1, - Hash: event.VerifyBatches.ExitRoot, - }) + log.Debugf("handle VerifyBatches event %s", event.VerifyBatches.String()) + err = p.processVerifyBatches(tx, block.Num, event.VerifyBatches) if err != nil { - return fmt.Errorf("err: %w", err) + err = fmt.Errorf("processVerifyBatches. err: %w", err) + log.Errorf("error processing VerifyBatches: %v", err) + return err } } if event.InitL1InfoRootMap != nil { - // TODO: indicate that l1 Info tree indexes before the one on this - // event are not safe to use - log.Debugf("TODO: handle InitL1InfoRootMap event") + log.Debugf("handle InitL1InfoRootMap event %s", event.InitL1InfoRootMap.String()) + err = processEventInitL1InfoRootMap(tx, block.Num, event.InitL1InfoRootMap) + if err != nil { + err = fmt.Errorf("initL1InfoRootMap. Err: %w", err) + log.Errorf("error processing InitL1InfoRootMap: %v", err) + return err + } } } if err := tx.Commit(); err != nil { return fmt.Errorf("err: %w", err) } - log.Infof("block %d processed with %d events", b.Num, len(b.Events)) + log.Infof("block %d processed with %d events", block.Num, len(block.Events)) return nil } @@ -317,7 +349,66 @@ func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { row := tx.QueryRow("SELECT position FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;") err := row.Scan(&lastProcessedIndex) if errors.Is(err, sql.ErrNoRows) { - return 0, ErrNotFound + return 0, db.ErrNotFound } return lastProcessedIndex, err } + +func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE rollup_exit_root = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupExitRoot.Hex()) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetLastInfo() (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstInfo() (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE block_num >= $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, blockNum) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { + info := &L1InfoTreeLeaf{} + err := meddler.QueryRow(p.db, info, ` + SELECT * FROM l1info_leaf + WHERE global_exit_root = $1 + LIMIT 1; + `, ger.Hex()) + return info, db.ReturnErrNotFound(err) +} + +func (p *processor) getDBQuerier(tx db.Txer) db.Querier { + if tx != nil { + return tx + } + return p.db +} diff --git a/l1infotreesync/processor_initl1inforootmap.go b/l1infotreesync/processor_initl1inforootmap.go new file mode 100644 index 00000000..92732cd9 --- /dev/null +++ b/l1infotreesync/processor_initl1inforootmap.go @@ -0,0 +1,37 @@ +package l1infotreesync + +import ( + "database/sql" + "errors" + "fmt" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + "github.com/russross/meddler" +) + +func processEventInitL1InfoRootMap(tx db.Txer, blockNumber uint64, event *InitL1InfoRootMap) error { + if event == nil { + return nil + } + info := &L1InfoTreeInitial{ + BlockNumber: blockNumber, + LeafCount: event.LeafCount, + L1InfoRoot: event.CurrentL1InfoRoot, + } + log.Infof("insert InitL1InfoRootMap %s ", info.String()) + if err := meddler.Insert(tx, "l1info_initial", info); err != nil { + return fmt.Errorf("err: %w", err) + } + return nil +} + +// GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set +func (p *processor) GetInitL1InfoRootMap(tx db.Txer) (*L1InfoTreeInitial, error) { + info := &L1InfoTreeInitial{} + err := meddler.QueryRow(p.getDBQuerier(tx), info, `SELECT block_num, leaf_count,l1_info_root FROM l1info_initial`) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return info, err +} diff --git a/l1infotreesync/processor_initl1inforootmap_test.go b/l1infotreesync/processor_initl1inforootmap_test.go new file mode 100644 index 00000000..753d7a25 --- /dev/null +++ b/l1infotreesync/processor_initl1inforootmap_test.go @@ -0,0 +1,67 @@ +package l1infotreesync + +import ( + "context" + "testing" + + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestInitL1InfoRootMap(t *testing.T) { + dbPath := "file:TestInitL1InfoRootMap?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.TODO() + event := InitL1InfoRootMap{ + LeafCount: 1, + CurrentL1InfoRoot: common.HexToHash("beef"), + } + block := sync.Block{ + Num: 1, + Events: []interface{}{ + Event{InitL1InfoRootMap: &event}, + }, + } + + err = sut.ProcessBlock(ctx, block) + require.NoError(t, err) + + info, err := sut.GetInitL1InfoRootMap(nil) + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, event.LeafCount, info.LeafCount) + require.Equal(t, event.CurrentL1InfoRoot, info.L1InfoRoot) + require.Equal(t, block.Num, info.BlockNumber) +} + +func TestInitL1InfoRootMapDontAllow2Rows(t *testing.T) { + dbPath := "file:TestInitL1InfoRootMapDontAllow2Rows?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.TODO() + block := sync.Block{ + Num: 1, + Events: []interface{}{ + Event{InitL1InfoRootMap: &InitL1InfoRootMap{ + LeafCount: 1, + CurrentL1InfoRoot: common.HexToHash("beef"), + }}, + }, + } + err = sut.ProcessBlock(ctx, block) + require.NoError(t, err) + block.Num = 2 + err = sut.ProcessBlock(ctx, block) + require.Error(t, err, "should not allow to insert a second row") +} + +func TestGetInitL1InfoRootMap(t *testing.T) { + dbPath := "file:TestGetInitL1InfoRootMap?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + info, err := sut.GetInitL1InfoRootMap(nil) + require.NoError(t, err, "should return no error if no row is present, because it returns data=nil") + require.Nil(t, info, "should return nil if no row is present") +} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index 01550f31..52a81ce8 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -1,3 +1,269 @@ package l1infotreesync -// TODO: add unit test +import ( + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestGetInfo(t *testing.T) { + dbPath := "file:TestGetInfo?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetFirstL1InfoWithRollupExitRoot(common.Hash{}) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetLastInfo() + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstInfo() + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstInfoAfterBlock(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetInfoByGlobalExitRoot(common.Hash{}) + require.Equal(t, db.ErrNotFound, err) + + // First insert + info1 := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + expected1 := L1InfoTreeLeaf{ + BlockNumber: 1, + L1InfoTreeIndex: 0, + PreviousBlockHash: info1.ParentHash, + Timestamp: info1.Timestamp, + MainnetExitRoot: info1.MainnetExitRoot, + RollupExitRoot: info1.RollupExitRoot, + } + expected1.GlobalExitRoot = expected1.globalExitRoot() + expected1.Hash = expected1.hash() + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info1}, + }, + }) + require.NoError(t, err) + actual, err := p.GetFirstL1InfoWithRollupExitRoot(info1.RollupExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetLastInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfoAfterBlock(0) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetInfoByGlobalExitRoot(expected1.GlobalExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + + // Second insert + info2 := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("b055"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + expected2 := L1InfoTreeLeaf{ + BlockNumber: 2, + L1InfoTreeIndex: 1, + PreviousBlockHash: info2.ParentHash, + Timestamp: info2.Timestamp, + MainnetExitRoot: info2.MainnetExitRoot, + RollupExitRoot: info2.RollupExitRoot, + } + expected2.GlobalExitRoot = expected2.globalExitRoot() + expected2.Hash = expected2.hash() + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{UpdateL1InfoTree: info2}, + }, + }) + require.NoError(t, err) + actual, err = p.GetFirstL1InfoWithRollupExitRoot(info2.RollupExitRoot) + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetLastInfo() + require.NoError(t, err) + require.Equal(t, expected2, *actual) + actual, err = p.GetFirstInfo() + require.NoError(t, err) + require.Equal(t, expected1, *actual) + actual, err = p.GetFirstInfoAfterBlock(2) + require.NoError(t, err) + require.Equal(t, expected2, *actual) + actual, err = p.GetInfoByGlobalExitRoot(expected2.GlobalExitRoot) + require.NoError(t, err) + require.Equal(t, expected2, *actual) +} + +func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { + dbPath := "file:TestGetLatestInfoUntilBlock?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + // Fake block 1 + _, err = sut.db.Exec(`INSERT INTO block (num) VALUES ($1)`, 1) + require.NoError(t, err) + + _, err = sut.GetLatestInfoUntilBlock(ctx, 1) + require.Equal(t, db.ErrNotFound, err) +} + +func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + getProcessor func(t *testing.T) *processor + idx uint32 + expectedRoot types.Root + expectedErr error + }{ + { + name: "empty tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_1?mode=memory&cache=shared") + require.NoError(t, err) + + return p + }, + idx: 0, + expectedErr: db.ErrNotFound, + }, + { + name: "single leaf tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_2?mode=memory&cache=shared") + require.NoError(t, err) + + info := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + err = p.ProcessBlock(context.Background(), sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info}, + }, + }) + require.NoError(t, err) + + return p + }, + idx: 0, + expectedRoot: types.Root{ + Hash: common.HexToHash("beef"), + Index: 0, + BlockNum: 1, + BlockPosition: 0, + }, + }, + } + + for _, tt := range testTable { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + p := tt.getProcessor(t) + proof, root, err := p.GetL1InfoTreeMerkleProof(context.Background(), tt.idx) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.NotEmpty(t, proof) + require.NotEmpty(t, root.Hash) + require.Equal(t, tt.expectedRoot.Index, root.Index) + require.Equal(t, tt.expectedRoot.BlockNum, root.BlockNum) + require.Equal(t, tt.expectedRoot.BlockPosition, root.BlockPosition) + } + }) + } +} + +func Test_processor_Reorg(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + getProcessor func(t *testing.T) *processor + reorgBlock uint64 + expectedErr error + }{ + { + name: "empty tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_Reorg_1?mode=memory&cache=shared") + require.NoError(t, err) + return p + }, + reorgBlock: 0, + expectedErr: nil, + }, + { + name: "single leaf tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_Reorg_2?mode=memory&cache=shared") + require.NoError(t, err) + + info := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + err = p.ProcessBlock(context.Background(), sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info}, + }, + }) + require.NoError(t, err) + + return p + }, + reorgBlock: 1, + }, + } + + for _, tt := range testTable { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + p := tt.getProcessor(t) + err := p.Reorg(context.Background(), tt.reorgBlock) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/l1infotreesync/processor_verifybatches.go b/l1infotreesync/processor_verifybatches.go new file mode 100644 index 00000000..9d1d0efb --- /dev/null +++ b/l1infotreesync/processor_verifybatches.go @@ -0,0 +1,104 @@ +package l1infotreesync + +import ( + "errors" + "fmt" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/log" + treeTypes "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/russross/meddler" +) + +func (p *processor) processVerifyBatches(tx db.Txer, blockNumber uint64, event *VerifyBatches) error { + if event == nil { + return fmt.Errorf("processVerifyBatches: event is nil") + } + if tx == nil { + return fmt.Errorf("processVerifyBatches: tx is nil, is mandatory to pass a tx") + } + log.Debugf("VerifyBatches: rollupExitTree.UpsertLeaf (blockNumber=%d, event=%s)", blockNumber, event.String()) + // If ExitRoot is zero if the leaf doesnt exists doesnt change the root of tree. + // if leaf already exists doesn't make sense to 'empty' the leaf, so we keep previous value + if event.ExitRoot == (common.Hash{}) { + log.Infof("skipping VerifyBatches event with empty ExitRoot (blockNumber=%d, event=%s)", blockNumber, event.String()) + return nil + } + isNewLeaf, err := p.isNewValueForRollupExitTree(tx, event) + if err != nil { + return fmt.Errorf("isNewValueForrollupExitTree. err: %w", err) + } + if !isNewLeaf { + log.Infof("skipping VerifyBatches event with same ExitRoot (blockNumber=%d, event=%s)", blockNumber, event.String()) + return nil + } + log.Infof("UpsertLeaf VerifyBatches event (blockNumber=%d, event=%s)", blockNumber, event.String()) + newRoot, err := p.rollupExitTree.UpsertLeaf(tx, blockNumber, event.BlockPosition, treeTypes.Leaf{ + Index: event.RollupID - 1, + Hash: event.ExitRoot, + }) + if err != nil { + return fmt.Errorf("error rollupExitTree.UpsertLeaf. err: %w", err) + } + verifyBatches := event + verifyBatches.BlockNumber = blockNumber + verifyBatches.RollupExitRoot = newRoot + if err = meddler.Insert(tx, "verify_batches", verifyBatches); err != nil { + return fmt.Errorf("error inserting verify_batches. err: %w", err) + } + return nil +} + +func (p *processor) isNewValueForRollupExitTree(tx db.Querier, event *VerifyBatches) (bool, error) { + currentRoot, err := p.rollupExitTree.GetLastRoot(tx) + if err != nil && errors.Is(err, db.ErrNotFound) { + // The tree is empty, so is a new value for sure + return true, nil + } + if err != nil { + return false, fmt.Errorf("error rollupExitTree.GetLastRoot. err: %w", err) + } + leaf, err := p.rollupExitTree.GetLeaf(tx, event.RollupID-1, currentRoot.Hash) + if err != nil && errors.Is(err, db.ErrNotFound) { + // The leaf doesn't exist, so is a new value + return true, nil + } + if err != nil { + return false, fmt.Errorf("error rollupExitTree.GetLeaf. err: %w", err) + } + return leaf != event.ExitRoot, nil +} + +func (p *processor) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num DESC, block_pos DESC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID) + return verified, db.ReturnErrNotFound(err) +} + +func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { + verified := &VerifyBatches{} + err := meddler.QueryRow(p.db, verified, ` + SELECT * FROM verify_batches + WHERE rollup_id = $1 AND block_num >= $2 + ORDER BY block_num ASC, block_pos ASC + LIMIT 1; + `, rollupID, blockNum) + return verified, db.ReturnErrNotFound(err) +} diff --git a/l1infotreesync/processor_verifybatches_test.go b/l1infotreesync/processor_verifybatches_test.go new file mode 100644 index 00000000..d943b541 --- /dev/null +++ b/l1infotreesync/processor_verifybatches_test.go @@ -0,0 +1,127 @@ +package l1infotreesync + +import ( + "context" + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/sync" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestProcessVerifyBatchesNil(t *testing.T) { + dbPath := "file:TestProcessVerifyBatchesNil?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + err = sut.processVerifyBatches(nil, 1, nil) + require.Error(t, err) +} + +func TestProcessVerifyBatchesOK(t *testing.T) { + dbPath := "file:TestProcessVerifyBatchesOK?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + event := VerifyBatches{ + BlockPosition: 1, + RollupID: 1, + NumBatch: 1, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.HexToHash("b455"), + Aggregator: common.HexToAddress("beef"), + RollupExitRoot: common.HexToHash("b455"), + } + ctx := context.TODO() + tx, err := db.NewTx(ctx, sut.db) + require.NoError(t, err) + _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 1) + require.NoError(t, err) + err = sut.processVerifyBatches(tx, 1, &event) + require.NoError(t, err) +} + +func TestProcessVerifyBatchesSkip0000(t *testing.T) { + dbPath := "file:TestProcessVerifyBatchesSkip0000?mode=memory&cache=shared" + sut, err := newProcessor(dbPath) + require.NoError(t, err) + event := VerifyBatches{ + BlockPosition: 1, + RollupID: 1, + NumBatch: 1, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.Hash{}, + Aggregator: common.HexToAddress("beef"), + RollupExitRoot: common.HexToHash("b455"), + } + ctx := context.TODO() + tx, err := db.NewTx(ctx, sut.db) + require.NoError(t, err) + err = sut.processVerifyBatches(tx, 1, &event) + require.NoError(t, err) +} + +func TestGetVerifiedBatches(t *testing.T) { + dbPath := "file:TestGetVerifiedBatches?mode=memory&cache=shared" + p, err := newProcessor(dbPath) + require.NoError(t, err) + ctx := context.Background() + + // Test ErrNotFound returned correctly on all methods + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + _, err = p.GetFirstVerifiedBatchesAfterBlock(0, 0) + require.Equal(t, db.ErrNotFound, err) + + // First insert + expected1 := &VerifyBatches{ + RollupID: 420, + NumBatch: 69, + StateRoot: common.HexToHash("5ca1e"), + ExitRoot: common.HexToHash("b455"), + Aggregator: common.HexToAddress("beef"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 1, + Events: []interface{}{ + Event{VerifyBatches: expected1}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err := p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + + // Second insert + expected2 := &VerifyBatches{ + RollupID: 420, + NumBatch: 690, + StateRoot: common.HexToHash("5ca1e3"), + ExitRoot: common.HexToHash("ba55"), + Aggregator: common.HexToAddress("beef3"), + } + err = p.ProcessBlock(ctx, sync.Block{ + Num: 2, + Events: []interface{}{ + Event{VerifyBatches: expected2}, + }, + }) + require.NoError(t, err) + _, err = p.GetLastVerifiedBatches(0) + require.Equal(t, db.ErrNotFound, err) + actual, err = p.GetLastVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected2, actual) + actual, err = p.GetFirstVerifiedBatches(420) + require.NoError(t, err) + require.Equal(t, expected1, actual) + actual, err = p.GetFirstVerifiedBatchesAfterBlock(420, 2) + require.NoError(t, err) + require.Equal(t, expected2, actual) +} diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index 979d55a2..e4d5e407 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -9,7 +9,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/lastgersync" - "github.com/0xPolygon/cdk/test/helpers" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -17,7 +17,7 @@ import ( func TestE2E(t *testing.T) { ctx := context.Background() - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathSyncer := t.TempDir() syncer, err := lastgersync.New( ctx, diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go index 91e05c7a..e76bb578 100644 --- a/lastgersync/evmdownloader.go +++ b/lastgersync/evmdownloader.go @@ -8,10 +8,10 @@ import ( "time" "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitroot" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -67,7 +67,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC ) for { lastIndex, err = d.processor.getLastIndex(ctx) - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { lastIndex = 0 } else if err != nil { log.Errorf("error getting last indes: %v", err) @@ -105,7 +105,11 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC break } - blockHeader := d.GetBlockHeader(ctx, lastBlock) + blockHeader, isCanceled := d.GetBlockHeader(ctx, lastBlock) + if isCanceled { + return + } + block := &sync.EVMBlock{ EVMBlockHeader: sync.EVMBlockHeader{ Num: blockHeader.Num, @@ -129,7 +133,7 @@ func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedC func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) { lastRoot, err := d.l1InfoTreesync.GetLastL1InfoTreeRoot(ctx) - if errors.Is(err, tree.ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { return nil, nil } if err != nil { diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 628ea04a..45104f09 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -7,6 +7,7 @@ import ( "math" "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" ethCommon "github.com/ethereum/go-ethereum/common" @@ -22,7 +23,6 @@ const ( var ( lastProcessedKey = []byte("lp") - ErrNotFound = errors.New("not found") ) type Event struct { @@ -111,7 +111,7 @@ func (p *processor) getLastIndexWithTx(tx kv.Tx) (uint32, error) { return 0, err } if k == nil { - return 0, ErrNotFound + return 0, db.ErrNotFound } return common.BytesToUint32(k), nil @@ -142,7 +142,7 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { if lenEvents > 0 { li, err := p.getLastIndexWithTx(tx) switch { - case errors.Is(err, ErrNotFound): + case errors.Is(err, db.ErrNotFound): lastIndex = -1 case err != nil: @@ -286,7 +286,7 @@ func (p *processor) GetFirstGERAfterL1InfoTreeIndex( return 0, ethCommon.Hash{}, err } if l1InfoIndexBytes == nil { - return 0, ethCommon.Hash{}, ErrNotFound + return 0, ethCommon.Hash{}, db.ErrNotFound } return common.BytesToUint32(l1InfoIndexBytes), ethCommon.BytesToHash(ger), nil diff --git a/packaging/deb/cdk/DEBIAN/postinst b/packaging/deb/cdk/DEBIAN/postinst new file mode 100755 index 00000000..e5765a67 --- /dev/null +++ b/packaging/deb/cdk/DEBIAN/postinst @@ -0,0 +1,12 @@ +#!/bin/bash +# This is a postinstallation script so the service can be configured and started when requested +# +adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent cdk +if [ -d "/opt/cdk" ] +then + echo "Directory /opt/cdk exists." +else + mkdir -p /opt/cdk + chown -R cdk /opt/cdk +fi +systemctl daemon-reload diff --git a/packaging/deb/cdk/DEBIAN/postrm b/packaging/deb/cdk/DEBIAN/postrm new file mode 100755 index 00000000..a2ea87a6 --- /dev/null +++ b/packaging/deb/cdk/DEBIAN/postrm @@ -0,0 +1,8 @@ +#!/bin/bash +# +############### +# Remove cdk-node installs +############## +#rm -rf /lib/systemd/system/cdk-node.service +deluser cdk +#systemctl daemon-reload diff --git a/packaging/systemd/cdk.service.example b/packaging/systemd/cdk.service.example new file mode 100644 index 00000000..d427e775 --- /dev/null +++ b/packaging/systemd/cdk.service.example @@ -0,0 +1,16 @@ +[Unit] + Description=cdk + StartLimitIntervalSec=500 + StartLimitBurst=5 + +[Service] + Restart=on-failure + RestartSec=5s + ExecStart=/usr/bin/cdk --config $config --chain $chain node + Type=simple + KillSignal=SIGINT + User=cdk + TimeoutStopSec=120 + +[Install] + WantedBy=multi-user.target diff --git a/reorgdetector/mock_eth_client.go b/reorgdetector/mock_eth_client.go index a76c62f9..0c561ab3 100644 --- a/reorgdetector/mock_eth_client.go +++ b/reorgdetector/mock_eth_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package reorgdetector diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go index 7a995bac..496a844c 100644 --- a/reorgdetector/reorgdetector.go +++ b/reorgdetector/reorgdetector.go @@ -120,12 +120,20 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { errGroup errgroup.Group ) - rd.trackedBlocksLock.Lock() - defer rd.trackedBlocksLock.Unlock() + subscriberIDs := rd.getSubscriberIDs() - for id, hdrs := range rd.trackedBlocks { + for _, id := range subscriberIDs { id := id - hdrs := hdrs + + // This is done like this because of a possible deadlock + // between AddBlocksToTrack and detectReorgInTrackedList + rd.trackedBlocksLock.RLock() + hdrs, ok := rd.trackedBlocks[id] + rd.trackedBlocksLock.RUnlock() + + if !ok { + continue + } errGroup.Go(func() error { headers := hdrs.getSorted() @@ -136,7 +144,7 @@ func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { if !ok || currentHeader == nil { if currentHeader, err = rd.client.HeaderByNumber(ctx, new(big.Int).SetUint64(hdr.Num)); err != nil { headersCacheLock.Unlock() - return fmt.Errorf("failed to get the header: %w", err) + return fmt.Errorf("failed to get the header %d: %w", hdr.Num, err) } headersCache[hdr.Num] = currentHeader } diff --git a/reorgdetector/reorgdetector_db.go b/reorgdetector/reorgdetector_db.go index 3174cbc0..79bd6cd4 100644 --- a/reorgdetector/reorgdetector_db.go +++ b/reorgdetector/reorgdetector_db.go @@ -53,6 +53,10 @@ func (rd *ReorgDetector) getTrackedBlocks(ctx context.Context) (map[string]*head // saveTrackedBlock saves the tracked block for a subscriber in db and in memory func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b header) error { + rd.trackedBlocksLock.Lock() + + // this has to go after the lock, because of a possible deadlock + // between AddBlocksToTrack and detectReorgInTrackedList tx, err := rd.db.BeginRw(ctx) if err != nil { return err @@ -60,7 +64,6 @@ func (rd *ReorgDetector) saveTrackedBlock(ctx context.Context, id string, b head defer tx.Rollback() - rd.trackedBlocksLock.Lock() hdrs, ok := rd.trackedBlocks[id] if !ok || hdrs.isEmpty() { hdrs = newHeadersList(b) diff --git a/reorgdetector/reorgdetector_sub.go b/reorgdetector/reorgdetector_sub.go index 675a81c5..c5002a2b 100644 --- a/reorgdetector/reorgdetector_sub.go +++ b/reorgdetector/reorgdetector_sub.go @@ -34,9 +34,24 @@ func (rd *ReorgDetector) Subscribe(id string) (*Subscription, error) { func (rd *ReorgDetector) notifySubscriber(id string, startingBlock header) { // Notify subscriber about this particular reorg rd.subscriptionsLock.RLock() - if sub, ok := rd.subscriptions[id]; ok { + sub, ok := rd.subscriptions[id] + rd.subscriptionsLock.RUnlock() + + if ok { sub.ReorgedBlock <- startingBlock.Num <-sub.ReorgProcessed } - rd.subscriptionsLock.RUnlock() +} + +// getSubscriberIDs returns a list of subscriber IDs +func (rd *ReorgDetector) getSubscriberIDs() []string { + rd.subscriptionsLock.RLock() + defer rd.subscriptionsLock.RUnlock() + + ids := make([]string, 0, len(rd.subscriptions)) + for id := range rd.subscriptions { + ids = append(ids, id) + } + + return ids } diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go index 7efe0892..c99bb484 100644 --- a/reorgdetector/reorgdetector_test.go +++ b/reorgdetector/reorgdetector_test.go @@ -2,47 +2,21 @@ package reorgdetector import ( "context" - big "math/big" "testing" "time" cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/0xPolygon/cdk/test/helpers" "github.com/stretchr/testify/require" ) -func newSimulatedL1(t *testing.T, auth *bind.TransactOpts) *simulated.Backend { - t.Helper() - - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - - blockGasLimit := uint64(999999999999999999) - client := simulated.NewBackend(map[common.Address]types.Account{ - auth.From: { - Balance: balance, - }, - }, simulated.WithBlockGasLimit(blockGasLimit)) - client.Commit() - - return client -} - func Test_ReorgDetector(t *testing.T) { const subID = "test" ctx := context.Background() // Simulated L1 - privateKeyL1, err := crypto.GenerateKey() - require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) - require.NoError(t, err) - clientL1 := newSimulatedL1(t, authL1) - require.NoError(t, err) + clientL1, _ := helpers.SimulatedBackend(t, nil, 0) // Create test DB dir testDir := t.TempDir() @@ -92,6 +66,6 @@ func Test_ReorgDetector(t *testing.T) { headersList, ok := reorgDetector.trackedBlocks[subID] reorgDetector.trackedBlocksLock.Unlock() require.True(t, ok) - require.Equal(t, 1, headersList.len()) // Only block 2 left - require.Equal(t, remainingHeader.Hash(), headersList.get(2).Hash) + require.Equal(t, 1, headersList.len()) // Only block 3 left + require.Equal(t, remainingHeader.Hash(), headersList.get(4).Hash) } diff --git a/reorgdetector/types_test.go b/reorgdetector/types_test.go index 9e20e363..42f7f61c 100644 --- a/reorgdetector/types_test.go +++ b/reorgdetector/types_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" ) func TestBlockMap(t *testing.T) { @@ -50,8 +51,12 @@ func TestBlockMap(t *testing.T) { t.Parallel() copiedBm := bm.copy() - if !reflect.DeepEqual(bm, copiedBm) { - t.Errorf("add() returned incorrect result, expected: %v, got: %v", bm, copiedBm) + for i, header := range bm.headers { + copiedHeader, exists := copiedBm.headers[i] + require.True(t, exists) + if !reflect.DeepEqual(header, copiedHeader) { + t.Errorf("copy() returned incorrect result, expected: %v, got: %v", header, copiedHeader) + } } }) diff --git a/rpc/bridge.go b/rpc/bridge.go index 23c67409..c769158e 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -2,18 +2,16 @@ package rpc import ( "context" + "errors" "fmt" "math/big" "time" "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/l1bridge2infoindexsync" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" + "github.com/0xPolygon/cdk/rpc/types" + tree "github.com/0xPolygon/cdk/tree/types" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" ) @@ -23,22 +21,26 @@ const ( BRIDGE = "bridge" meterName = "github.com/0xPolygon/cdk/rpc" - zeroHex = "0x0" + zeroHex = "0x0" + binnarySearchDivider = 2 +) + +var ( + ErrNotOnL1Info = errors.New("this bridge has not been included on the L1 Info Tree yet") ) // BridgeEndpoints contains implementations for the "bridge" RPC endpoints type BridgeEndpoints struct { - logger *log.Logger - meter metric.Meter - readTimeout time.Duration - writeTimeout time.Duration - networkID uint32 - sponsor *claimsponsor.ClaimSponsor - l1InfoTree *l1infotreesync.L1InfoTreeSync - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync - injectedGERs *lastgersync.LastGERSync - bridgeL1 *bridgesync.BridgeSync - bridgeL2 *bridgesync.BridgeSync + logger *log.Logger + meter metric.Meter + readTimeout time.Duration + writeTimeout time.Duration + networkID uint32 + sponsor ClaimSponsorer + l1InfoTree L1InfoTreer + injectedGERs LastGERer + bridgeL1 Bridger + bridgeL2 Bridger } // NewBridgeEndpoints returns InteropEndpoints @@ -47,26 +49,24 @@ func NewBridgeEndpoints( writeTimeout time.Duration, readTimeout time.Duration, networkID uint32, - sponsor *claimsponsor.ClaimSponsor, - l1InfoTree *l1infotreesync.L1InfoTreeSync, - l1Bridge2Index *l1bridge2infoindexsync.L1Bridge2InfoIndexSync, - injectedGERs *lastgersync.LastGERSync, - bridgeL1 *bridgesync.BridgeSync, - bridgeL2 *bridgesync.BridgeSync, + sponsor ClaimSponsorer, + l1InfoTree L1InfoTreer, + injectedGERs LastGERer, + bridgeL1 Bridger, + bridgeL2 Bridger, ) *BridgeEndpoints { meter := otel.Meter(meterName) return &BridgeEndpoints{ - logger: logger, - meter: meter, - readTimeout: readTimeout, - writeTimeout: writeTimeout, - networkID: networkID, - sponsor: sponsor, - l1InfoTree: l1InfoTree, - l1Bridge2Index: l1Bridge2Index, - injectedGERs: injectedGERs, - bridgeL1: bridgeL1, - bridgeL2: bridgeL2, + logger: logger, + meter: meter, + readTimeout: readTimeout, + writeTimeout: writeTimeout, + networkID: networkID, + sponsor: sponsor, + l1InfoTree: l1InfoTree, + injectedGERs: injectedGERs, + bridgeL1: bridgeL1, + bridgeL2: bridgeL2, } } @@ -84,21 +84,26 @@ func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCoun c.Add(ctx, 1) if networkID == 0 { - l1InfoTreeIndex, err := b.l1Bridge2Index.GetL1InfoTreeIndexByDepositCount(ctx, depositCount) + l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL1Bridge(ctx, depositCount) // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get l1InfoTreeIndex, error: %s", err)) + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( + "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), + ) } return l1InfoTreeIndex, nil } if networkID == b.networkID { + l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL2Bridge(ctx, depositCount) // TODO: special treatment of the error when not found, // as it's expected that it will take some time for the L1 Info tree to be updated - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - "TODO: batchsync / certificatesync missing implementation", - ) + if err != nil { + return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( + "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), + ) + } + return l1InfoTreeIndex, nil } return zeroHex, rpc.NewRPCError( rpc.DefaultErrorCode, @@ -143,12 +148,6 @@ func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeInd ) } -type ClaimProof struct { - ProofLocalExitRoot [32]common.Hash - ProofRollupExitRoot [32]common.Hash - L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf -} - // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent @@ -172,7 +171,7 @@ func (b *BridgeEndpoints) ClaimProof( if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) } - var proofLocalExitRoot [32]common.Hash + var proofLocalExitRoot tree.Proof switch { case networkID == 0: proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot) @@ -202,8 +201,7 @@ func (b *BridgeEndpoints) ClaimProof( fmt.Sprintf("this client does not support network %d", networkID), ) } - - return ClaimProof{ + return types.ClaimProof{ ProofLocalExitRoot: proofLocalExitRoot, ProofRollupExitRoot: proofRollupExitRoot, L1InfoTreeLeaf: *info, @@ -258,3 +256,111 @@ func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interfa } return claim.Status, nil } + +func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL1Bridge(ctx context.Context, depositCount uint32) (uint32, error) { + lastInfo, err := b.l1InfoTree.GetLastInfo() + if err != nil { + return 0, err + } + + root, err := b.bridgeL1.GetRootByLER(ctx, lastInfo.MainnetExitRoot) + if err != nil { + return 0, err + } + if root.Index < depositCount { + return 0, ErrNotOnL1Info + } + + firstInfo, err := b.l1InfoTree.GetFirstInfo() + if err != nil { + return 0, err + } + + // Binary search between the first and last blcoks where L1 info tree was updated. + // Find the smallest l1 info tree index that is greater than depositCount and matches with + // a MER that is included on the l1 info tree + bestResult := lastInfo + lowerLimit := firstInfo.BlockNumber + upperLimit := lastInfo.BlockNumber + for lowerLimit <= upperLimit { + targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) + targetInfo, err := b.l1InfoTree.GetFirstInfoAfterBlock(targetBlock) + if err != nil { + return 0, err + } + root, err := b.bridgeL1.GetRootByLER(ctx, targetInfo.MainnetExitRoot) + if err != nil { + return 0, err + } + //nolint:gocritic // switch statement doesn't make sense here, I couldn't break + if root.Index < depositCount { + lowerLimit = targetBlock + 1 + } else if root.Index == depositCount { + bestResult = targetInfo + break + } else { + bestResult = targetInfo + upperLimit = targetBlock - 1 + } + } + + return bestResult.L1InfoTreeIndex, nil +} + +func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL2Bridge(ctx context.Context, depositCount uint32) (uint32, error) { + // NOTE: this code assumes that all the rollup exit roots + // (produced by the smart contract call verifyBatches / verifyBatchesTrustedAggregator) + // are included in the L1 info tree. As per the current implementation (smart contracts) of the protocol + // this is true. This could change in the future + lastVerified, err := b.l1InfoTree.GetLastVerifiedBatches(b.networkID - 1) + if err != nil { + return 0, err + } + + root, err := b.bridgeL2.GetRootByLER(ctx, lastVerified.ExitRoot) + if err != nil { + return 0, err + } + if root.Index < depositCount { + return 0, ErrNotOnL1Info + } + + firstVerified, err := b.l1InfoTree.GetFirstVerifiedBatches(b.networkID - 1) + if err != nil { + return 0, err + } + + // Binary search between the first and last blcoks where batches were verified. + // Find the smallest deposit count that is greater than depositCount and matches with + // a LER that is verified + bestResult := lastVerified + lowerLimit := firstVerified.BlockNumber + upperLimit := lastVerified.BlockNumber + for lowerLimit <= upperLimit { + targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) + targetVerified, err := b.l1InfoTree.GetFirstVerifiedBatchesAfterBlock(b.networkID-1, targetBlock) + if err != nil { + return 0, err + } + root, err = b.bridgeL2.GetRootByLER(ctx, targetVerified.ExitRoot) + if err != nil { + return 0, err + } + //nolint:gocritic // switch statement doesn't make sense here, I couldn't break + if root.Index < depositCount { + lowerLimit = targetBlock + 1 + } else if root.Index == depositCount { + bestResult = targetVerified + break + } else { + bestResult = targetVerified + upperLimit = targetBlock - 1 + } + } + + info, err := b.l1InfoTree.GetFirstL1InfoWithRollupExitRoot(bestResult.RollupExitRoot) + if err != nil { + return 0, err + } + return info.L1InfoTreeIndex, nil +} diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go new file mode 100644 index 00000000..84292e22 --- /dev/null +++ b/rpc/bridge_interfaces.go @@ -0,0 +1,40 @@ +package rpc + +import ( + "context" + "math/big" + + "github.com/0xPolygon/cdk/claimsponsor" + "github.com/0xPolygon/cdk/l1infotreesync" + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" +) + +type Bridger interface { + GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) + GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) +} + +type LastGERer interface { + GetFirstGERAfterL1InfoTreeIndex( + ctx context.Context, atOrAfterL1InfoTreeIndex uint32, + ) (injectedL1InfoTreeIndex uint32, ger common.Hash, err error) +} + +type L1InfoTreer interface { + GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) + GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (tree.Proof, error) + GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) + GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) + GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) + GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) + GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) + GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) + GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) + GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) +} + +type ClaimSponsorer interface { + AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error + GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) +} diff --git a/rpc/bridge_test.go b/rpc/bridge_test.go new file mode 100644 index 00000000..9d461a50 --- /dev/null +++ b/rpc/bridge_test.go @@ -0,0 +1,443 @@ +package rpc + +import ( + "context" + "errors" + "testing" + + cdkCommon "github.com/0xPolygon/cdk/common" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + mocks "github.com/0xPolygon/cdk/rpc/mocks" + tree "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetFirstL1InfoTreeIndexForL1Bridge(t *testing.T) { + type testCase struct { + description string + setupMocks func() + depositCount uint32 + expectedIndex uint32 + expectedErr error + } + ctx := context.Background() + b := newBridgeWithMocks(t) + fooErr := errors.New("foo") + firstL1Info := &l1infotreesync.L1InfoTreeLeaf{ + BlockNumber: 10, + MainnetExitRoot: common.HexToHash("alfa"), + } + lastL1Info := &l1infotreesync.L1InfoTreeLeaf{ + BlockNumber: 1000, + MainnetExitRoot: common.HexToHash("alfa"), + } + mockHappyPath := func() { + // to make this work, assume that block number == l1 info tree index == deposit count + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + infoAfterBlock := &l1infotreesync.L1InfoTreeLeaf{} + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Run(func(args mock.Arguments) { + blockNum, ok := args.Get(0).(uint64) + require.True(t, ok) + infoAfterBlock.L1InfoTreeIndex = uint32(blockNum) + infoAfterBlock.BlockNumber = blockNum + infoAfterBlock.MainnetExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + }). + Return(infoAfterBlock, nil) + rootByLER := &tree.Root{} + b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). + Run(func(args mock.Arguments) { + ler, ok := args.Get(1).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + if ler == common.HexToHash("alfa") { + index = uint32(lastL1Info.BlockNumber) + } + rootByLER.Index = index + }). + Return(rootByLER, nil) + } + testCases := []testCase{ + { + description: "error on GetLastInfo", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on first GetRootByLER", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "not included yet", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 10}, nil). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: ErrNotOnL1Info, + }, + { + description: "error on GetFirstInfo", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetFirstInfoAfterBlock", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetRootByLER (inside binnary search)", + setupMocks: func() { + b.l1InfoTree.On("GetLastInfo"). + Return(lastL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstInfo"). + Return(firstL1Info, nil). + Once() + b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). + Return(firstL1Info, nil). + Once() + b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "happy path 1", + setupMocks: mockHappyPath, + depositCount: 10, + expectedIndex: 10, + expectedErr: nil, + }, + { + description: "happy path 2", + setupMocks: mockHappyPath, + depositCount: 11, + expectedIndex: 11, + expectedErr: nil, + }, + { + description: "happy path 3", + setupMocks: mockHappyPath, + depositCount: 333, + expectedIndex: 333, + expectedErr: nil, + }, + { + description: "happy path 4", + setupMocks: mockHappyPath, + depositCount: 420, + expectedIndex: 420, + expectedErr: nil, + }, + { + description: "happy path 5", + setupMocks: mockHappyPath, + depositCount: 69, + expectedIndex: 69, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + log.Debugf("running test case: %s", tc.description) + tc.setupMocks() + actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL1Bridge(ctx, tc.depositCount) + require.Equal(t, tc.expectedErr, err) + require.Equal(t, tc.expectedIndex, actualIndex) + } +} + +func TestGetFirstL1InfoTreeIndexForL2Bridge(t *testing.T) { + type testCase struct { + description string + setupMocks func() + depositCount uint32 + expectedIndex uint32 + expectedErr error + } + ctx := context.Background() + b := newBridgeWithMocks(t) + fooErr := errors.New("foo") + firstVerified := &l1infotreesync.VerifyBatches{ + BlockNumber: 10, + ExitRoot: common.HexToHash("alfa"), + } + lastVerified := &l1infotreesync.VerifyBatches{ + BlockNumber: 1000, + ExitRoot: common.HexToHash("alfa"), + } + mockHappyPath := func() { + // to make this work, assume that block number == l1 info tree index == deposit count + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + verifiedAfterBlock := &l1infotreesync.VerifyBatches{} + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Run(func(args mock.Arguments) { + blockNum, ok := args.Get(1).(uint64) + require.True(t, ok) + verifiedAfterBlock.BlockNumber = blockNum + verifiedAfterBlock.ExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + verifiedAfterBlock.RollupExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) + }). + Return(verifiedAfterBlock, nil) + rootByLER := &tree.Root{} + b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). + Run(func(args mock.Arguments) { + ler, ok := args.Get(1).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + if ler == common.HexToHash("alfa") { + index = uint32(lastVerified.BlockNumber) + } + rootByLER.Index = index + }). + Return(rootByLER, nil) + info := &l1infotreesync.L1InfoTreeLeaf{} + b.l1InfoTree.On("GetFirstL1InfoWithRollupExitRoot", mock.Anything). + Run(func(args mock.Arguments) { + exitRoot, ok := args.Get(0).(common.Hash) + require.True(t, ok) + index := cdkCommon.BytesToUint32(exitRoot.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 + info.L1InfoTreeIndex = index + }). + Return(info, nil). + Once() + } + testCases := []testCase{ + { + description: "error on GetLastVerified", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on first GetRootByLER", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "not included yet", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 10}, nil). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: ErrNotOnL1Info, + }, + { + description: "error on GetFirstVerified", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetFirstVerifiedBatchesAfterBlock", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Return(nil, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "error on GetRootByLER (inside binnary search)", + setupMocks: func() { + b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). + Return(lastVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). + Return(&tree.Root{Index: 13}, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). + Return(firstVerified, nil). + Once() + b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). + Return(firstVerified, nil). + Once() + b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). + Return(&tree.Root{}, fooErr). + Once() + }, + depositCount: 11, + expectedIndex: 0, + expectedErr: fooErr, + }, + { + description: "happy path 1", + setupMocks: mockHappyPath, + depositCount: 10, + expectedIndex: 10, + expectedErr: nil, + }, + { + description: "happy path 2", + setupMocks: mockHappyPath, + depositCount: 11, + expectedIndex: 11, + expectedErr: nil, + }, + { + description: "happy path 3", + setupMocks: mockHappyPath, + depositCount: 333, + expectedIndex: 333, + expectedErr: nil, + }, + { + description: "happy path 4", + setupMocks: mockHappyPath, + depositCount: 420, + expectedIndex: 420, + expectedErr: nil, + }, + { + description: "happy path 5", + setupMocks: mockHappyPath, + depositCount: 69, + expectedIndex: 69, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + log.Debugf("running test case: %s", tc.description) + tc.setupMocks() + actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL2Bridge(ctx, tc.depositCount) + require.Equal(t, tc.expectedErr, err) + require.Equal(t, tc.expectedIndex, actualIndex) + } +} + +type bridgeWithMocks struct { + bridge *BridgeEndpoints + sponsor *mocks.ClaimSponsorer + l1InfoTree *mocks.L1InfoTreer + injectedGERs *mocks.LastGERer + bridgeL1 *mocks.Bridger + bridgeL2 *mocks.Bridger +} + +func newBridgeWithMocks(t *testing.T) bridgeWithMocks { + t.Helper() + b := bridgeWithMocks{ + sponsor: mocks.NewClaimSponsorer(t), + l1InfoTree: mocks.NewL1InfoTreer(t), + injectedGERs: mocks.NewLastGERer(t), + bridgeL1: mocks.NewBridger(t), + bridgeL2: mocks.NewBridger(t), + } + logger := log.WithFields("module", "bridgerpc") + b.bridge = NewBridgeEndpoints( + logger, 0, 0, 2, b.sponsor, b.l1InfoTree, b.injectedGERs, b.bridgeL1, b.bridgeL2, + ) + return b +} diff --git a/rpc/bridge_client.go b/rpc/client/bridge.go similarity index 95% rename from rpc/bridge_client.go rename to rpc/client/bridge.go index 04d57700..f67907f2 100644 --- a/rpc/bridge_client.go +++ b/rpc/client/bridge.go @@ -8,12 +8,13 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/rpc/types" ) type BridgeClientInterface interface { L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) - ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) + ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) SponsorClaim(claim claimsponsor.Claim) error GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) } @@ -53,7 +54,7 @@ func (c *Client) InjectedInfoAfterIndex( // ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin // while globalExitRoot should be already injected on the destination network. // This call needs to be done to a client of the same network were the bridge tx was sent -func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*ClaimProof, error) { +func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { response, err := rpc.JSONRPCCall(c.url, "bridge_claimProof", networkID, depositCount, l1InfoTreeIndex) if err != nil { return nil, err @@ -61,7 +62,7 @@ func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeInd if response.Error != nil { return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) } - var result ClaimProof + var result types.ClaimProof return &result, json.Unmarshal(response.Result, &result) } diff --git a/rpc/client.go b/rpc/client/client.go similarity index 100% rename from rpc/client.go rename to rpc/client/client.go diff --git a/rpc/mocks/bridge_client_interface.go b/rpc/mocks/bridge_client_interface.go new file mode 100644 index 00000000..4c5200e4 --- /dev/null +++ b/rpc/mocks/bridge_client_interface.go @@ -0,0 +1,319 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// BridgeClientInterface is an autogenerated mock type for the BridgeClientInterface type +type BridgeClientInterface struct { + mock.Mock +} + +type BridgeClientInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *BridgeClientInterface) EXPECT() *BridgeClientInterface_Expecter { + return &BridgeClientInterface_Expecter{mock: &_m.Mock} +} + +// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex +func (_m *BridgeClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { + ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for ClaimProof") + } + + var r0 *types.ClaimProof + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { + return rf(networkID, depositCount, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { + r0 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ClaimProof) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { + r1 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' +type BridgeClientInterface_ClaimProof_Call struct { + *mock.Call +} + +// ClaimProof is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +// - l1InfoTreeIndex uint32 +func (_e *BridgeClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_ClaimProof_Call { + return &BridgeClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} +} + +func (_c *BridgeClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *BridgeClientInterface_ClaimProof_Call { + _c.Call.Return(run) + return _c +} + +// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex +func (_m *BridgeClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { + ret := _m.Called(globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSponsoredClaimStatus") + } + + var r0 claimsponsor.ClaimStatus + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { + return rf(globalIndex) + } + if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { + r0 = rf(globalIndex) + } else { + r0 = ret.Get(0).(claimsponsor.ClaimStatus) + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' +type BridgeClientInterface_GetSponsoredClaimStatus_Call struct { + *mock.Call +} + +// GetSponsoredClaimStatus is a helper method to define mock.On call +// - globalIndex *big.Int +func (_e *BridgeClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + return &BridgeClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int)) + }) + return _c +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(run) + return _c +} + +// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex +func (_m *BridgeClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(networkID, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for InjectedInfoAfterIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(networkID, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(networkID, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' +type BridgeClientInterface_InjectedInfoAfterIndex_Call struct { + *mock.Call +} + +// InjectedInfoAfterIndex is a helper method to define mock.On call +// - networkID uint32 +// - l1InfoTreeIndex uint32 +func (_e *BridgeClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + return &BridgeClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(run) + return _c +} + +// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount +func (_m *BridgeClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { + ret := _m.Called(networkID, depositCount) + + if len(ret) == 0 { + panic("no return value specified for L1InfoTreeIndexForBridge") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { + return rf(networkID, depositCount) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { + r0 = rf(networkID, depositCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, depositCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BridgeClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' +type BridgeClientInterface_L1InfoTreeIndexForBridge_Call struct { + *mock.Call +} + +// L1InfoTreeIndexForBridge is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +func (_e *BridgeClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + return &BridgeClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(run) + return _c +} + +// SponsorClaim provides a mock function with given fields: claim +func (_m *BridgeClientInterface) SponsorClaim(claim claimsponsor.Claim) error { + ret := _m.Called(claim) + + if len(ret) == 0 { + panic("no return value specified for SponsorClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { + r0 = rf(claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BridgeClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' +type BridgeClientInterface_SponsorClaim_Call struct { + *mock.Call +} + +// SponsorClaim is a helper method to define mock.On call +// - claim claimsponsor.Claim +func (_e *BridgeClientInterface_Expecter) SponsorClaim(claim interface{}) *BridgeClientInterface_SponsorClaim_Call { + return &BridgeClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(claimsponsor.Claim)) + }) + return _c +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) Return(_a0 error) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *BridgeClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *BridgeClientInterface_SponsorClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewBridgeClientInterface creates a new instance of BridgeClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridgeClientInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *BridgeClientInterface { + mock := &BridgeClientInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/bridger.go b/rpc/mocks/bridger.go new file mode 100644 index 00000000..d0344c29 --- /dev/null +++ b/rpc/mocks/bridger.go @@ -0,0 +1,159 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/tree/types" +) + +// Bridger is an autogenerated mock type for the Bridger type +type Bridger struct { + mock.Mock +} + +type Bridger_Expecter struct { + mock *mock.Mock +} + +func (_m *Bridger) EXPECT() *Bridger_Expecter { + return &Bridger_Expecter{mock: &_m.Mock} +} + +// GetProof provides a mock function with given fields: ctx, depositCount, localExitRoot +func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (types.Proof, error) { + ret := _m.Called(ctx, depositCount, localExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetProof") + } + + var r0 types.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + return rf(ctx, depositCount, localExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + r0 = rf(ctx, depositCount, localExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, depositCount, localExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Bridger_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' +type Bridger_GetProof_Call struct { + *mock.Call +} + +// GetProof is a helper method to define mock.On call +// - ctx context.Context +// - depositCount uint32 +// - localExitRoot common.Hash +func (_e *Bridger_Expecter) GetProof(ctx interface{}, depositCount interface{}, localExitRoot interface{}) *Bridger_GetProof_Call { + return &Bridger_GetProof_Call{Call: _e.mock.On("GetProof", ctx, depositCount, localExitRoot)} +} + +func (_c *Bridger_GetProof_Call) Run(run func(ctx context.Context, depositCount uint32, localExitRoot common.Hash)) *Bridger_GetProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *Bridger_GetProof_Call) Return(_a0 types.Proof, _a1 error) *Bridger_GetProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *Bridger_GetProof_Call { + _c.Call.Return(run) + return _c +} + +// GetRootByLER provides a mock function with given fields: ctx, ler +func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*types.Root, error) { + ret := _m.Called(ctx, ler) + + if len(ret) == 0 { + panic("no return value specified for GetRootByLER") + } + + var r0 *types.Root + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Root, error)); ok { + return rf(ctx, ler) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Root); ok { + r0 = rf(ctx, ler) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Root) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, ler) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Bridger_GetRootByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRootByLER' +type Bridger_GetRootByLER_Call struct { + *mock.Call +} + +// GetRootByLER is a helper method to define mock.On call +// - ctx context.Context +// - ler common.Hash +func (_e *Bridger_Expecter) GetRootByLER(ctx interface{}, ler interface{}) *Bridger_GetRootByLER_Call { + return &Bridger_GetRootByLER_Call{Call: _e.mock.On("GetRootByLER", ctx, ler)} +} + +func (_c *Bridger_GetRootByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *Bridger_GetRootByLER_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *Bridger_GetRootByLER_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetRootByLER_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Root, error)) *Bridger_GetRootByLER_Call { + _c.Call.Return(run) + return _c +} + +// NewBridger creates a new instance of Bridger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewBridger(t interface { + mock.TestingT + Cleanup(func()) +}) *Bridger { + mock := &Bridger{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/claim_sponsorer.go b/rpc/mocks/claim_sponsorer.go new file mode 100644 index 00000000..59530955 --- /dev/null +++ b/rpc/mocks/claim_sponsorer.go @@ -0,0 +1,145 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + + mock "github.com/stretchr/testify/mock" +) + +// ClaimSponsorer is an autogenerated mock type for the ClaimSponsorer type +type ClaimSponsorer struct { + mock.Mock +} + +type ClaimSponsorer_Expecter struct { + mock *mock.Mock +} + +func (_m *ClaimSponsorer) EXPECT() *ClaimSponsorer_Expecter { + return &ClaimSponsorer_Expecter{mock: &_m.Mock} +} + +// AddClaimToQueue provides a mock function with given fields: ctx, claim +func (_m *ClaimSponsorer) AddClaimToQueue(ctx context.Context, claim *claimsponsor.Claim) error { + ret := _m.Called(ctx, claim) + + if len(ret) == 0 { + panic("no return value specified for AddClaimToQueue") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *claimsponsor.Claim) error); ok { + r0 = rf(ctx, claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClaimSponsorer_AddClaimToQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddClaimToQueue' +type ClaimSponsorer_AddClaimToQueue_Call struct { + *mock.Call +} + +// AddClaimToQueue is a helper method to define mock.On call +// - ctx context.Context +// - claim *claimsponsor.Claim +func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(ctx interface{}, claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { + return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", ctx, claim)} +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(ctx context.Context, claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*claimsponsor.Claim)) + }) + return _c +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) Return(_a0 error) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(context.Context, *claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { + _c.Call.Return(run) + return _c +} + +// GetClaim provides a mock function with given fields: ctx, globalIndex +func (_m *ClaimSponsorer) GetClaim(ctx context.Context, globalIndex *big.Int) (*claimsponsor.Claim, error) { + ret := _m.Called(ctx, globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetClaim") + } + + var r0 *claimsponsor.Claim + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*claimsponsor.Claim, error)); ok { + return rf(ctx, globalIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *claimsponsor.Claim); ok { + r0 = rf(ctx, globalIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*claimsponsor.Claim) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClaimSponsorer_GetClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaim' +type ClaimSponsorer_GetClaim_Call struct { + *mock.Call +} + +// GetClaim is a helper method to define mock.On call +// - ctx context.Context +// - globalIndex *big.Int +func (_e *ClaimSponsorer_Expecter) GetClaim(ctx interface{}, globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { + return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", ctx, globalIndex)} +} + +func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(ctx context.Context, globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ClaimSponsorer_GetClaim_Call) Return(_a0 *claimsponsor.Claim, _a1 error) *ClaimSponsorer_GetClaim_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(context.Context, *big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewClaimSponsorer creates a new instance of ClaimSponsorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClaimSponsorer(t interface { + mock.TestingT + Cleanup(func()) +}) *ClaimSponsorer { + mock := &ClaimSponsorer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/client_factory_interface.go b/rpc/mocks/client_factory_interface.go new file mode 100644 index 00000000..aca7aed0 --- /dev/null +++ b/rpc/mocks/client_factory_interface.go @@ -0,0 +1,83 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + rpc "github.com/0xPolygon/cdk/rpc/client" + mock "github.com/stretchr/testify/mock" +) + +// ClientFactoryInterface is an autogenerated mock type for the ClientFactoryInterface type +type ClientFactoryInterface struct { + mock.Mock +} + +type ClientFactoryInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ClientFactoryInterface) EXPECT() *ClientFactoryInterface_Expecter { + return &ClientFactoryInterface_Expecter{mock: &_m.Mock} +} + +// NewClient provides a mock function with given fields: url +func (_m *ClientFactoryInterface) NewClient(url string) rpc.ClientInterface { + ret := _m.Called(url) + + if len(ret) == 0 { + panic("no return value specified for NewClient") + } + + var r0 rpc.ClientInterface + if rf, ok := ret.Get(0).(func(string) rpc.ClientInterface); ok { + r0 = rf(url) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(rpc.ClientInterface) + } + } + + return r0 +} + +// ClientFactoryInterface_NewClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewClient' +type ClientFactoryInterface_NewClient_Call struct { + *mock.Call +} + +// NewClient is a helper method to define mock.On call +// - url string +func (_e *ClientFactoryInterface_Expecter) NewClient(url interface{}) *ClientFactoryInterface_NewClient_Call { + return &ClientFactoryInterface_NewClient_Call{Call: _e.mock.On("NewClient", url)} +} + +func (_c *ClientFactoryInterface_NewClient_Call) Run(run func(url string)) *ClientFactoryInterface_NewClient_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ClientFactoryInterface_NewClient_Call) Return(_a0 rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClientFactoryInterface_NewClient_Call) RunAndReturn(run func(string) rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { + _c.Call.Return(run) + return _c +} + +// NewClientFactoryInterface creates a new instance of ClientFactoryInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientFactoryInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ClientFactoryInterface { + mock := &ClientFactoryInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/client_interface.go b/rpc/mocks/client_interface.go new file mode 100644 index 00000000..28b87775 --- /dev/null +++ b/rpc/mocks/client_interface.go @@ -0,0 +1,319 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + big "math/big" + + claimsponsor "github.com/0xPolygon/cdk/claimsponsor" + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/rpc/types" +) + +// ClientInterface is an autogenerated mock type for the ClientInterface type +type ClientInterface struct { + mock.Mock +} + +type ClientInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ClientInterface) EXPECT() *ClientInterface_Expecter { + return &ClientInterface_Expecter{mock: &_m.Mock} +} + +// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex +func (_m *ClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { + ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for ClaimProof") + } + + var r0 *types.ClaimProof + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { + return rf(networkID, depositCount, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { + r0 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ClaimProof) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { + r1 = rf(networkID, depositCount, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' +type ClientInterface_ClaimProof_Call struct { + *mock.Call +} + +// ClaimProof is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +// - l1InfoTreeIndex uint32 +func (_e *ClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *ClientInterface_ClaimProof_Call { + return &ClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} +} + +func (_c *ClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *ClientInterface_ClaimProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *ClientInterface_ClaimProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *ClientInterface_ClaimProof_Call { + _c.Call.Return(run) + return _c +} + +// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex +func (_m *ClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { + ret := _m.Called(globalIndex) + + if len(ret) == 0 { + panic("no return value specified for GetSponsoredClaimStatus") + } + + var r0 claimsponsor.ClaimStatus + var r1 error + if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { + return rf(globalIndex) + } + if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { + r0 = rf(globalIndex) + } else { + r0 = ret.Get(0).(claimsponsor.ClaimStatus) + } + + if rf, ok := ret.Get(1).(func(*big.Int) error); ok { + r1 = rf(globalIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' +type ClientInterface_GetSponsoredClaimStatus_Call struct { + *mock.Call +} + +// GetSponsoredClaimStatus is a helper method to define mock.On call +// - globalIndex *big.Int +func (_e *ClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *ClientInterface_GetSponsoredClaimStatus_Call { + return &ClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*big.Int)) + }) + return _c +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *ClientInterface_GetSponsoredClaimStatus_Call { + _c.Call.Return(run) + return _c +} + +// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex +func (_m *ClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(networkID, l1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for InjectedInfoAfterIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(networkID, l1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(networkID, l1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, l1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' +type ClientInterface_InjectedInfoAfterIndex_Call struct { + *mock.Call +} + +// InjectedInfoAfterIndex is a helper method to define mock.On call +// - networkID uint32 +// - l1InfoTreeIndex uint32 +func (_e *ClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *ClientInterface_InjectedInfoAfterIndex_Call { + return &ClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *ClientInterface_InjectedInfoAfterIndex_Call { + _c.Call.Return(run) + return _c +} + +// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount +func (_m *ClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { + ret := _m.Called(networkID, depositCount) + + if len(ret) == 0 { + panic("no return value specified for L1InfoTreeIndexForBridge") + } + + var r0 uint32 + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { + return rf(networkID, depositCount) + } + if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { + r0 = rf(networkID, depositCount) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { + r1 = rf(networkID, depositCount) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' +type ClientInterface_L1InfoTreeIndexForBridge_Call struct { + *mock.Call +} + +// L1InfoTreeIndexForBridge is a helper method to define mock.On call +// - networkID uint32 +// - depositCount uint32 +func (_e *ClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *ClientInterface_L1InfoTreeIndexForBridge_Call { + return &ClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint32)) + }) + return _c +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *ClientInterface_L1InfoTreeIndexForBridge_Call { + _c.Call.Return(run) + return _c +} + +// SponsorClaim provides a mock function with given fields: claim +func (_m *ClientInterface) SponsorClaim(claim claimsponsor.Claim) error { + ret := _m.Called(claim) + + if len(ret) == 0 { + panic("no return value specified for SponsorClaim") + } + + var r0 error + if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { + r0 = rf(claim) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' +type ClientInterface_SponsorClaim_Call struct { + *mock.Call +} + +// SponsorClaim is a helper method to define mock.On call +// - claim claimsponsor.Claim +func (_e *ClientInterface_Expecter) SponsorClaim(claim interface{}) *ClientInterface_SponsorClaim_Call { + return &ClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} +} + +func (_c *ClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *ClientInterface_SponsorClaim_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(claimsponsor.Claim)) + }) + return _c +} + +func (_c *ClientInterface_SponsorClaim_Call) Return(_a0 error) *ClientInterface_SponsorClaim_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *ClientInterface_SponsorClaim_Call { + _c.Call.Return(run) + return _c +} + +// NewClientInterface creates a new instance of ClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClientInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ClientInterface { + mock := &ClientInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/l1_info_treer.go b/rpc/mocks/l1_info_treer.go new file mode 100644 index 00000000..a4e0f66c --- /dev/null +++ b/rpc/mocks/l1_info_treer.go @@ -0,0 +1,626 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + + mock "github.com/stretchr/testify/mock" + + types "github.com/0xPolygon/cdk/tree/types" +) + +// L1InfoTreer is an autogenerated mock type for the L1InfoTreer type +type L1InfoTreer struct { + mock.Mock +} + +type L1InfoTreer_Expecter struct { + mock *mock.Mock +} + +func (_m *L1InfoTreer) EXPECT() *L1InfoTreer_Expecter { + return &L1InfoTreer_Expecter{mock: &_m.Mock} +} + +// GetFirstInfo provides a mock function with given fields: +func (_m *L1InfoTreer) GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFirstInfo") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfo' +type L1InfoTreer_GetFirstInfo_Call struct { + *mock.Call +} + +// GetFirstInfo is a helper method to define mock.On call +func (_e *L1InfoTreer_Expecter) GetFirstInfo() *L1InfoTreer_GetFirstInfo_Call { + return &L1InfoTreer_GetFirstInfo_Call{Call: _e.mock.On("GetFirstInfo")} +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) Run(run func()) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstInfoAfterBlock provides a mock function with given fields: blockNum +func (_m *L1InfoTreer) GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetFirstInfoAfterBlock") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(blockNum) + } + if rf, ok := ret.Get(0).(func(uint64) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstInfoAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfoAfterBlock' +type L1InfoTreer_GetFirstInfoAfterBlock_Call struct { + *mock.Call +} + +// GetFirstInfoAfterBlock is a helper method to define mock.On call +// - blockNum uint64 +func (_e *L1InfoTreer_Expecter) GetFirstInfoAfterBlock(blockNum interface{}) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + return &L1InfoTreer_GetFirstInfoAfterBlock_Call{Call: _e.mock.On("GetFirstInfoAfterBlock", blockNum)} +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Run(run func(blockNum uint64)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint64)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) RunAndReturn(run func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstL1InfoWithRollupExitRoot provides a mock function with given fields: rollupExitRoot +func (_m *L1InfoTreer) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(rollupExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetFirstL1InfoWithRollupExitRoot") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(rollupExitRoot) + } + if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(rollupExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(common.Hash) error); ok { + r1 = rf(rollupExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstL1InfoWithRollupExitRoot' +type L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call struct { + *mock.Call +} + +// GetFirstL1InfoWithRollupExitRoot is a helper method to define mock.On call +// - rollupExitRoot common.Hash +func (_e *L1InfoTreer_Expecter) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot interface{}) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + return &L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call{Call: _e.mock.On("GetFirstL1InfoWithRollupExitRoot", rollupExitRoot)} +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Run(run func(rollupExitRoot common.Hash)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstVerifiedBatches provides a mock function with given fields: rollupID +func (_m *L1InfoTreer) GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID) + + if len(ret) == 0 { + panic("no return value specified for GetFirstVerifiedBatches") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID) + } + if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(rollupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatches' +type L1InfoTreer_GetFirstVerifiedBatches_Call struct { + *mock.Call +} + +// GetFirstVerifiedBatches is a helper method to define mock.On call +// - rollupID uint32 +func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetFirstVerifiedBatches_Call { + return &L1InfoTreer_GetFirstVerifiedBatches_Call{Call: _e.mock.On("GetFirstVerifiedBatches", rollupID)} +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatches_Call { + _c.Call.Return(run) + return _c +} + +// GetFirstVerifiedBatchesAfterBlock provides a mock function with given fields: rollupID, blockNum +func (_m *L1InfoTreer) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID, blockNum) + + if len(ret) == 0 { + panic("no return value specified for GetFirstVerifiedBatchesAfterBlock") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID, blockNum) + } + if rf, ok := ret.Get(0).(func(uint32, uint64) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID, blockNum) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32, uint64) error); ok { + r1 = rf(rollupID, blockNum) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatchesAfterBlock' +type L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call struct { + *mock.Call +} + +// GetFirstVerifiedBatchesAfterBlock is a helper method to define mock.On call +// - rollupID uint32 +// - blockNum uint64 +func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatchesAfterBlock(rollupID interface{}, blockNum interface{}) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + return &L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call{Call: _e.mock.On("GetFirstVerifiedBatchesAfterBlock", rollupID, blockNum)} +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Run(run func(rollupID uint32, blockNum uint64)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32), args[1].(uint64)) + }) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) RunAndReturn(run func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { + _c.Call.Return(run) + return _c +} + +// GetInfoByIndex provides a mock function with given fields: ctx, index +func (_m *L1InfoTreer) GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, index) + + if len(ret) == 0 { + panic("no return value specified for GetInfoByIndex") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf(ctx, index) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf(ctx, index) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, index) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetInfoByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByIndex' +type L1InfoTreer_GetInfoByIndex_Call struct { + *mock.Call +} + +// GetInfoByIndex is a helper method to define mock.On call +// - ctx context.Context +// - index uint32 +func (_e *L1InfoTreer_Expecter) GetInfoByIndex(ctx interface{}, index interface{}) *L1InfoTreer_GetInfoByIndex_Call { + return &L1InfoTreer_GetInfoByIndex_Call{Call: _e.mock.On("GetInfoByIndex", ctx, index)} +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetInfoByIndex_Call) RunAndReturn(run func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetInfoByIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetLastInfo provides a mock function with given fields: +func (_m *L1InfoTreer) GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLastInfo") + } + + var r0 *l1infotreesync.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLastInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastInfo' +type L1InfoTreer_GetLastInfo_Call struct { + *mock.Call +} + +// GetLastInfo is a helper method to define mock.On call +func (_e *L1InfoTreer_Expecter) GetLastInfo() *L1InfoTreer_GetLastInfo_Call { + return &L1InfoTreer_GetLastInfo_Call{Call: _e.mock.On("GetLastInfo")} +} + +func (_c *L1InfoTreer_GetLastInfo_Call) Run(run func()) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *L1InfoTreer_GetLastInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLastInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetLastInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetLastVerifiedBatches provides a mock function with given fields: rollupID +func (_m *L1InfoTreer) GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { + ret := _m.Called(rollupID) + + if len(ret) == 0 { + panic("no return value specified for GetLastVerifiedBatches") + } + + var r0 *l1infotreesync.VerifyBatches + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { + return rf(rollupID) + } + if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { + r0 = rf(rollupID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(rollupID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLastVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastVerifiedBatches' +type L1InfoTreer_GetLastVerifiedBatches_Call struct { + *mock.Call +} + +// GetLastVerifiedBatches is a helper method to define mock.On call +// - rollupID uint32 +func (_e *L1InfoTreer_Expecter) GetLastVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetLastVerifiedBatches_Call { + return &L1InfoTreer_GetLastVerifiedBatches_Call{Call: _e.mock.On("GetLastVerifiedBatches", rollupID)} +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetLastVerifiedBatches_Call { + _c.Call.Return(run) + return _c +} + +// GetLocalExitRoot provides a mock function with given fields: ctx, networkID, rollupExitRoot +func (_m *L1InfoTreer) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) { + ret := _m.Called(ctx, networkID, rollupExitRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLocalExitRoot") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (common.Hash, error)); ok { + return rf(ctx, networkID, rollupExitRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) common.Hash); ok { + r0 = rf(ctx, networkID, rollupExitRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, networkID, rollupExitRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetLocalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLocalExitRoot' +type L1InfoTreer_GetLocalExitRoot_Call struct { + *mock.Call +} + +// GetLocalExitRoot is a helper method to define mock.On call +// - ctx context.Context +// - networkID uint32 +// - rollupExitRoot common.Hash +func (_e *L1InfoTreer_Expecter) GetLocalExitRoot(ctx interface{}, networkID interface{}, rollupExitRoot interface{}) *L1InfoTreer_GetLocalExitRoot_Call { + return &L1InfoTreer_GetLocalExitRoot_Call{Call: _e.mock.On("GetLocalExitRoot", ctx, networkID, rollupExitRoot)} +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) Run(run func(ctx context.Context, networkID uint32, rollupExitRoot common.Hash)) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) Return(_a0 common.Hash, _a1 error) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetLocalExitRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (common.Hash, error)) *L1InfoTreer_GetLocalExitRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetRollupExitTreeMerkleProof provides a mock function with given fields: ctx, networkID, root +func (_m *L1InfoTreer) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (types.Proof, error) { + ret := _m.Called(ctx, networkID, root) + + if len(ret) == 0 { + panic("no return value specified for GetRollupExitTreeMerkleProof") + } + + var r0 types.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { + return rf(ctx, networkID, root) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { + r0 = rf(ctx, networkID, root) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { + r1 = rf(ctx, networkID, root) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoTreer_GetRollupExitTreeMerkleProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupExitTreeMerkleProof' +type L1InfoTreer_GetRollupExitTreeMerkleProof_Call struct { + *mock.Call +} + +// GetRollupExitTreeMerkleProof is a helper method to define mock.On call +// - ctx context.Context +// - networkID uint32 +// - root common.Hash +func (_e *L1InfoTreer_Expecter) GetRollupExitTreeMerkleProof(ctx interface{}, networkID interface{}, root interface{}) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + return &L1InfoTreer_GetRollupExitTreeMerkleProof_Call{Call: _e.mock.On("GetRollupExitTreeMerkleProof", ctx, networkID, root)} +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Run(run func(ctx context.Context, networkID uint32, root common.Hash)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) + }) + return _c +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Return(_a0 types.Proof, _a1 error) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { + _c.Call.Return(run) + return _c +} + +// NewL1InfoTreer creates a new instance of L1InfoTreer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewL1InfoTreer(t interface { + mock.TestingT + Cleanup(func()) +}) *L1InfoTreer { + mock := &L1InfoTreer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/mocks/last_ge_rer.go b/rpc/mocks/last_ge_rer.go new file mode 100644 index 00000000..d2e3068a --- /dev/null +++ b/rpc/mocks/last_ge_rer.go @@ -0,0 +1,104 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// LastGERer is an autogenerated mock type for the LastGERer type +type LastGERer struct { + mock.Mock +} + +type LastGERer_Expecter struct { + mock *mock.Mock +} + +func (_m *LastGERer) EXPECT() *LastGERer_Expecter { + return &LastGERer_Expecter{mock: &_m.Mock} +} + +// GetFirstGERAfterL1InfoTreeIndex provides a mock function with given fields: ctx, atOrAfterL1InfoTreeIndex +func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (uint32, common.Hash, error) { + ret := _m.Called(ctx, atOrAfterL1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetFirstGERAfterL1InfoTreeIndex") + } + + var r0 uint32 + var r1 common.Hash + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (uint32, common.Hash, error)); ok { + return rf(ctx, atOrAfterL1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) uint32); ok { + r0 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + r0 = ret.Get(0).(uint32) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) common.Hash); ok { + r1 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint32) error); ok { + r2 = rf(ctx, atOrAfterL1InfoTreeIndex) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstGERAfterL1InfoTreeIndex' +type LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call struct { + *mock.Call +} + +// GetFirstGERAfterL1InfoTreeIndex is a helper method to define mock.On call +// - ctx context.Context +// - atOrAfterL1InfoTreeIndex uint32 +func (_e *LastGERer_Expecter) GetFirstGERAfterL1InfoTreeIndex(ctx interface{}, atOrAfterL1InfoTreeIndex interface{}) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + return &LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call{Call: _e.mock.On("GetFirstGERAfterL1InfoTreeIndex", ctx, atOrAfterL1InfoTreeIndex)} +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Run(run func(ctx context.Context, atOrAfterL1InfoTreeIndex uint32)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(injectedL1InfoTreeIndex uint32, ger common.Hash, err error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(injectedL1InfoTreeIndex, ger, err) + return _c +} + +func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (uint32, common.Hash, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { + _c.Call.Return(run) + return _c +} + +// NewLastGERer creates a new instance of LastGERer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLastGERer(t interface { + mock.TestingT + Cleanup(func()) +}) *LastGERer { + mock := &LastGERer{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/types/bridge.go b/rpc/types/bridge.go new file mode 100644 index 00000000..eb8c6464 --- /dev/null +++ b/rpc/types/bridge.go @@ -0,0 +1,12 @@ +package types + +import ( + "github.com/0xPolygon/cdk/l1infotreesync" + tree "github.com/0xPolygon/cdk/tree/types" +) + +type ClaimProof struct { + ProofLocalExitRoot tree.Proof + ProofRollupExitRoot tree.Proof + L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf +} diff --git a/scripts/local_config b/scripts/local_config index ed8aaec3..9a1f55cf 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -1,72 +1,308 @@ #!/bin/bash #Include common varaibles source $(dirname $0)/../test/scripts/env.sh +############################################################################### +function log_debug() { + echo -e "\033[0;30mDebug: $*" "\033[0m" +} +############################################################################### +function log_error() { + echo -e "\033[0;31mError: $*" "\033[0m" +} +############################################################################### +function log_fatal() { + log_error $* + exit 1 +} +############################################################################### +function ok_or_fatal(){ + if [ $? -ne 0 ]; then + log_fatal $* + fi +} +############################################################################### +function get_value_from_toml_file(){ + local _FILE="$1" + # KEY = . + local _SECTION="$2" + local _KEY="$3" + local _LINE + local _inside_section=0 + local _return_next_line=0 + while read -r _LINE; do + # Clean up line from spaces and tabs + _LINE=$(echo $_LINE | tr -d '[:space:]') + #echo $_LINE + if [ $_inside_section -eq 1 ]; then + if [[ "$_LINE" == [* ]]; then + return 1 + fi + if [ $_return_next_line -eq 1 ]; then + # sed sentence remove quotes + echo $_LINE | sed 's/^[[:space:]]*"//;s/"$//' + + return 0 + fi + #local _key_splitted=(${_LINE//=/ }) + local _key_name=$(echo $_LINE | cut -f 1 -d "=") + local _key_value=$(echo $_LINE | cut -f 2- -d "=") + if [ "$_key_name" == "$_KEY" ]; then + if [ $_key_value == "[" ]; then + _return_next_line=1 + else + # sed sentence remove quotes + echo $_key_value | sed 's/^[[:space:]]*"//;s/"$//' + return 0 + fi + fi + elif [ "$_LINE" == "[${_SECTION}]" ]; then + _inside_section=1 + fi + + + done < "$_FILE" + return 2 +} +############################################################################### +function export_key_from_toml_file_or_fatal(){ + local _EXPORTED_VAR_NAME="$1" + local _FILE="$2" + local _SECTION="$3" + local _KEY="$4" + local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) + if [ -z "$_VALUE" ]; then + log_fatal "$FUNCNAME: key $_KEY not found in section $_SECTION" + fi + export $_EXPORTED_VAR_NAME="$_VALUE" + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY" +} +############################################################################### +function export_obj_key_from_toml_file_or_fatal(){ + local _EXPORTED_VAR_NAME="$1" + local _FILE="$2" + local _SECTION="$3" + local _KEY="$4" + local _OBJ_KEY="$5" + log_debug "export_obj_key_from_toml_file_or_fatal: $_EXPORTED_VAR_NAME $_FILE $_SECTION $_KEY $_OBJ_KEY" + local _VALUE=$(get_value_from_toml_file $_FILE $_SECTION $_KEY) + if [ -z "$_VALUE" ]; then + log_fatal "export_obj_key_from_toml_file_or_fatal: obj_key $_KEY not found in section [$_SECTION]" + fi + local _CLEAN_VALUE=$(echo $_VALUE | tr -d '{' | tr -d '}' | tr ',' '\n') + while read -r _LINE; do + local _key_splitted=(${_LINE//=/ }) + + if [ "${_key_splitted[0]}" == "$_OBJ_KEY" ]; then + local _KEY_VALUE=${_key_splitted[1]} + if [ "$_KEY_VALUE" == "[" ]; then + read -r _LINE + _KEY_VALUE=$LINE + echo "zzz $_KEY_VALUE" + fi + local _RES=$(echo $_KEY_VALUE | sed 's/^[[:space:]]*"//;s/"$//') + export $_EXPORTED_VAR_NAME="${_RES}" + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# file:$_FILE section:$_SECTION key:$_KEY obj_key:$_OBJ_KEY" + return 0 + fi + done <<< "$_CLEAN_VALUE" + log_fatal "export_obj_key_from_toml_file_or_fatal: obj_key $_OBJ_KEY not found in section $_SECTION/ $_KEY = $_VALUE" +} ############################################################################### -# MAIN +function export_values_of_genesis(){ + local _GENESIS_FILE=$1 + if [ ! -f $_GENESIS_FILE ]; then + log_fatal "Error: genesis file not found: $_GENESIS_FILE" + fi + export l1_chain_id=$(jq -r '.L1Config.chainId' $_GENESIS_FILE | tr -d '"') + export pol_token_address=$(jq -r '.L1Config.polTokenAddress' $_GENESIS_FILE) + export zkevm_rollup_address=$(jq -r '.L1Config.polygonZkEVMAddress' $_GENESIS_FILE) + export zkevm_rollup_manager_address=$(jq -r '.L1Config.polygonRollupManagerAddress' $_GENESIS_FILE) + export zkevm_global_exit_root_address=$(jq -r '.L1Config.polygonZkEVMGlobalExitRootAddress' $_GENESIS_FILE) + export zkevm_rollup_manager_block_number=$(jq -r '.rollupManagerCreationBlockNumber' $_GENESIS_FILE) +} + ############################################################################### -set -o pipefail # enable strict command pipe error detection +function export_values_of_cdk_node_config(){ + local _CDK_CONFIG_FILE=$1 + export_key_from_toml_file_or_fatal zkevm_l2_sequencer_address $_CDK_CONFIG_FILE SequenceSender L2Coinbase + export_obj_key_from_toml_file_or_fatal zkevm_l2_sequencer_keystore_password $_CDK_CONFIG_FILE SequenceSender PrivateKey Password + export_key_from_toml_file_or_fatal l1_chain_id $_CDK_CONFIG_FILE SequenceSender.EthTxManager.Etherman L1ChainID + export_key_from_toml_file_or_fatal zkevm_is_validium $_CDK_CONFIG_FILE Common IsValidiumMode + export_key_from_toml_file_or_fatal zkevm_contract_versions $_CDK_CONFIG_FILE Common ContractVersions + export_key_from_toml_file_or_fatal l2_chain_id $_CDK_CONFIG_FILE Aggregator ChainID + export_key_from_toml_file_or_fatal zkevm_aggregator_port $_CDK_CONFIG_FILE Aggregator Port + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_address $_CDK_CONFIG_FILE Aggregator SenderAddress + export_key_from_toml_file_or_fatal aggregator_db_name $_CDK_CONFIG_FILE Aggregator.DB Name + export_key_from_toml_file_or_fatal aggregator_db_user $_CDK_CONFIG_FILE Aggregator.DB User + export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password + export_obj_key_from_toml_file_or_fatal zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password + + export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE Aggregator ForkId + export is_cdk_validium=$zkevm_is_validium + export zkevm_rollup_chain_id=$l2_chain_id -which kurtosis > /dev/null -if [ $? -ne 0 ]; then - echo "kurtosis is not installed. Please install it:" + if [ "$zkevm_is_validium" == "true" ]; then + log_debug "Validium mode detected... Retrieving the dac_port" + export_value_from_kurtosis_or_fail dac_port zkevm-dac-001 dac + fi + export zkevm_l2_keystore_password=$zkevm_l2_sequencer_keystore_password +} +############################################################################### +# params: +# $1 -> exported variable name +# $2 -> service name +# $3...$n -> endpoint names (will try all of them until one is found) +############################################################################### +function export_value_from_kurtosis_or_fail(){ + local _EXPORTED_VAR_NAME="$1" + shift + local _SERVICE="$1" + shift + local _END_POINT + local _RESULT + log_debug "Trying to get kurtosis value:$_EXPORTED_VAR_NAME = $KURTOSIS_ENCLAVE $_SERVICE $*" + while [ ! -z $1 ]; do + _END_POINT=$1 + shift + log_debug "--- kurtosis value: $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" + _RESULT=$(kurtosis port print $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT 2>/dev/null) + if [ ! -z $_RESULT ]; then + break + fi + done + export $_EXPORTED_VAR_NAME=$_RESULT + if [ -z $_EXPORTED_VAR_NAME ]; then + log_fatal "Error getting kurtosis port: $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" + fi + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# Kurtosis $KURTOSIS_ENCLAVE $_SERVICE $_END_POINT" +} +############################################################################### +function export_portnum_from_kurtosis_or_fail(){ + local _EXPORTED_VAR_NAME="$1" + export_value_from_kurtosis_or_fail $* > /dev/null + local _VALUE + eval "_VALUE=\$$1" + # sed sentece eliminate protocol (xyz://) is have it + # kurtosis sometimes include protocol but not always + local _PORT=$(echo "$_VALUE" | sed -E 's|^[a-zA-Z]+://||' | cut -f 2 -d ":") + if [ -z $_PORT ]; then + log_fatal "Error getting port number from kurtosis: $2 $3 -> $_VALUE" + fi + export $_EXPORTED_VAR_NAME=$_PORT + log_debug "$_EXPORTED_VAR_NAME=${!_EXPORTED_VAR_NAME} \t\t\t# Kurtosis $KURTOSIS_ENCLAVE $2 $3" +} +############################################################################### +function export_ports_from_kurtosis(){ + export_portnum_from_kurtosis_or_fail l1_rpc_port el-1-geth-lighthouse rpc + export_portnum_from_kurtosis_or_fail zkevm_rpc_http_port cdk-erigon-node-001 rpc rpc + export_portnum_from_kurtosis_or_fail zkevm_data_streamer_port cdk-erigon-sequencer-001 data-streamer + export_portnum_from_kurtosis_or_fail aggregator_db_port postgres-001 postgres + export_portnum_from_kurtosis_or_fail agglayer_port agglayer agglayer + export aggregator_db_hostname="127.0.0.1" + export l1_rpc_url="http://localhost:${l1_rpc_port}" + export l2_rpc_url="http://localhost:${zkevm_rpc_http_port}" +} + +############################################################################### +function export_forced_values(){ + export global_log_level="debug" + export l2_rpc_name="localhost" + export sequencer_name="localhost" + export deployment_suffix="" +} +############################################################################### +function check_requirements(){ + which kurtosis > /dev/null + if [ $? -ne 0 ]; then + log_error "kurtosis is not installed. Please install it:" cat << EOF echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt install kurtosis-cli=0.90.1 + sudo apt install kurtosis-cli kurtosis version EOF exit 1 -fi + fi + if [ -z $TMP_CDK_FOLDER -o -z $KURTOSIS_ENCLAVE ]; then + log_fatal "TMP_CDK_FOLDER or KURTOSIS_ENCLAVE is not set. Must be set on file env.sh" + fi + kurtosis enclave inspect $KURTOSIS_ENCLAVE > /dev/null + if [ $? -ne 0 ]; then + log_error "Error inspecting enclave $KURTOSIS_ENCLAVE" + echo "You must start kurtosis environment before running this script" + echo "- start kurtosis:" + echo " kurtosis clean --all; kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file params.yml --image-download always ." -if [ -z $TMP_CDK_FOLDER -o -z $ENCLAVE ]; then - echo "TMP_CDK_FOLDER or ENCLAVE is not set. Must be set on file env.sh" - exit 1 -fi -kurtosis enclave inspect $ENCLAVE > /dev/null -if [ $? -ne 0 ]; then - echo "Error inspecting enclave $ENCLAVE" - echo "You must start kurtosis environment before running this script" - echo "- start kurtosis:" - echo " kurtosis clean --all; kurtosis run --enclave $ENCLAVE --args-file params.yml --image-download always ." + exit 1 + fi +} +############################################################################### +function create_dest_folder(){ + export DEST=${TMP_CDK_FOLDER}/local_config + [ ! -d ${DEST} ] && mkdir -p ${DEST} + rm $DEST/* +} +############################################################################### +function download_kurtosis_artifacts(){ + kurtosis files download $KURTOSIS_ENCLAVE genesis $DEST + ok_or_fatal "Error downloading kurtosis artifact genesis to $DEST" + export genesis_file=$DEST/genesis.json + + kurtosis files download $KURTOSIS_ENCLAVE sequencer-keystore $DEST + ok_or_fatal "Error downloading kurtosis artifact sequencer-keystore to $DEST" + export zkevm_l2_sequencer_keystore_file=$DEST/sequencer.keystore + + kurtosis files download $KURTOSIS_ENCLAVE cdk-node-config-artifact $DEST + ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" - exit 1 -fi -DEST=${TMP_CDK_FOLDER}/local_config - -[ ! -d ${DEST} ] && mkdir -p ${DEST} -rm $DEST/* -kurtosis files download $ENCLAVE genesis $DEST -[ $? -ne 0 ] && echo "Error downloading genesis" && exit 1 -export genesis_file=$DEST/genesis.json -kurtosis files download $ENCLAVE sequencer-keystore $DEST -[ $? -ne 0 ] && echo "Error downloading sequencer-keystore" && exit 1 -export sequencer_keystore_file=$DEST/sequencer.keystore - -l1_rpc_port=$(kurtosis port print $ENCLAVE el-1-geth-lighthouse rpc | cut -f 3 -d ":") -[ $? -ne 0 ] && echo "Error getting l1_rpc_port" && exit 1 || export l1_rpc_port && echo "l1_rpc_port=$l1_rpc_port" - -zkevm_data_streamer_port=$(kurtosis port print $ENCLAVE cdk-erigon-sequencer-001 data-streamer | cut -f 3 -d ":") -[ $? -ne 0 ] && echo "Error getting zkevm_data_streamer_port" && exit 1 || export zkevm_data_streamer_port && echo "zkevm_data_streamer_port=$zkevm_data_streamer_port" - -kurtosis files download $ENCLAVE cdk-node-config-artifact $DEST -export zkevm_l2_sequencer_address=$(cat $DEST/cdk-node-config.toml |grep L2Coinbase | cut -f 2 -d "="| tr -d '"' | tr -d ' ') -export zkevm_l2_keystore_password=$(cat $DEST/cdk-node-config.toml |grep -A1 L2Coinbase | tr ',' '\n' | grep Password | cut -f 2 -d '=' | tr -d '}' | tr -d '"' | tr -d ' ') -export l1_chain_id=$(cat $DEST/cdk-node-config.toml | grep L1ChainID | cut -f 2 -d '=' | head -n 1) -echo $l1_chain_id -export zkevm_is_validium=$(cat $DEST/cdk-node-config.toml | grep IsValidiumMode | cut -f 2 -d '=') - -if [ "$zkevm_is_validium" == "true" ]; then - echo "Validium mode detected... Retrieving the dac_port" - dac_port=$(kurtosis port print $ENCLAVE zkevm-dac-001 dac | cut -f 3 -d ":") - [ $? -ne 0 ] && echo "Error getting dac_port" && exit 1 || export dac_port && echo "dac_port=$dac_port" -fi - -envsubst < test/config/test.kurtosis_template.toml > $DEST/test.kurtosis.toml + kurtosis files download $KURTOSIS_ENCLAVE aggregator-keystore $DEST + ok_or_fatal "Error downloading kurtosis artifact cdk-node-config-artifact to $DEST" + export zkevm_l2_aggregator_keystore_file=$DEST/aggregator.keystore + +} +############################################################################### +function check_generated_config_file(){ + grep "" $DEST_TEMPLATE_FILE > /dev/null + if [ $? -ne 1 ]; then + log_error "some values are not set, check $ORIG_TEMPLATE_FILE" + echo "" + echo "missing keys in rendered template: $DEST_TEMPLATE_FILE" + echo " " + grep "" $DEST_TEMPLATE_FILE + exit 1 + fi +} +############################################################################### +# MAIN +############################################################################### +set -o pipefail # enable strict command pipe error detection +check_requirements +create_dest_folder + +download_kurtosis_artifacts + +export_values_of_genesis $genesis_file +export_ports_from_kurtosis +export_values_of_cdk_node_config $DEST/cdk-node-config.toml +export_forced_values + +ORIG_TEMPLATE_FILE=test/config/kurtosis-cdk-node-config.toml.template +DEST_TEMPLATE_FILE=$DEST/test.kurtosis.toml + +# Generate config file +go run scripts/run_template.go $ORIG_TEMPLATE_FILE > $DEST_TEMPLATE_FILE +ok_or_fatal "Error generating template" + +check_generated_config_file + + +echo " " +echo "file generated at:" $DEST/test.kurtosis.toml echo "- to restart kurtosis:" echo " kurtosis clean --all; kurtosis run --enclave cdk-v1 --args-file params.yml --image-download always ." @@ -85,9 +321,9 @@ cat << EOF "cwd": "\${workspaceFolder}", "args":[ "run", - "-cfg", "$DEST/test.kurtosis.toml", + "-cfg", "$DEST_TEMPLATE_FILE", "-components", "sequence-sender,aggregator", - "-custom-network-file", "$DEST/local_config/genesis.json" ] }, EOF + diff --git a/scripts/run_template.go b/scripts/run_template.go new file mode 100644 index 00000000..c9ef58a3 --- /dev/null +++ b/scripts/run_template.go @@ -0,0 +1,57 @@ +package main + +import ( + "log" + "os" + "regexp" + "strings" + "text/template" +) + +func main() { + tmpl := template.New("t1") + content, err := readFile(os.Args[1]) + if err != nil { + log.Fatalf("Error loading template: %v", err) + } + content = replaceDotsInTemplateVariables(content) + tmpl = template.Must(tmpl.Parse(content)) + + if err := tmpl.Execute(os.Stdout, environmentToMap()); err != nil { + log.Fatalf("Error executing template: %v", err) + } +} +func replaceDotsInTemplateVariables(template string) string { + re := regexp.MustCompile(`{{\s*\.([^{}]*)\s*}}`) + result := re.ReplaceAllStringFunc(template, func(match string) string { + match = strings.ReplaceAll(match[3:], ".", "_") + return "{{." + match + }) + return result +} + +func readFile(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return string(content), nil +} + +func environmentToMap() map[string]any { + envVars := make(map[string]any) + for _, e := range os.Environ() { + pair := splitAtFirst(e, '=') + envVars[pair[0]] = pair[1] + } + return envVars +} + +func splitAtFirst(s string, sep rune) [2]string { + for i, c := range s { + if c == sep { + return [2]string{s[:i], s[i+1:]} + } + } + return [2]string{s, ""} +} diff --git a/sequencesender.json b/sequencesender.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/sequencesender.json @@ -0,0 +1 @@ +{} diff --git a/sequencesender/config.go b/sequencesender/config.go index 7b7aada0..f264f904 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -3,7 +3,7 @@ package sequencesender import ( "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" "github.com/ethereum/go-ethereum/common" ) @@ -54,9 +54,6 @@ type Config struct { // MaxPendingTx is the maximum number of pending transactions (those that are not in a final state) MaxPendingTx uint64 - // StreamClient is the config for the stream client - StreamClient StreamClientCfg `mapstructure:"StreamClient"` - // EthTxManager is the config for the ethtxmanager EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` @@ -68,8 +65,11 @@ type Config struct { // BlockFinality indicates the status of the blocks that will be queried in order to sync BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - // SanityCheckRPCURL is the URL of the RPC server to perform sanity check regarding the number of blocks in a batch - SanityCheckRPCURL string `mapstructure:"SanityCheckRPCURL"` + // RPCURL is the URL of the RPC server + RPCURL string `mapstructure:"RPCURL"` + + // GetBatchWaitInterval is the time to wait to query for a new batch when there are no more batches available + GetBatchWaitInterval types.Duration `mapstructure:"GetBatchWaitInterval"` } // StreamClientCfg contains the data streamer's configuration properties diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go new file mode 100644 index 00000000..32bc62b4 --- /dev/null +++ b/sequencesender/ethtx.go @@ -0,0 +1,390 @@ +package sequencesender + +import ( + "context" + "encoding/json" + "errors" + "math" + "math/big" + "os" + "strings" + "sync/atomic" + "time" + + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/ethereum/go-ethereum/common" +) + +type ethTxData struct { + Nonce uint64 `json:"nonce"` + Status string `json:"status"` + SentL1Timestamp time.Time `json:"sentL1Timestamp"` + StatusTimestamp time.Time `json:"statusTimestamp"` + FromBatch uint64 `json:"fromBatch"` + ToBatch uint64 `json:"toBatch"` + MinedAtBlock big.Int `json:"minedAtBlock"` + OnMonitor bool `json:"onMonitor"` + To common.Address `json:"to"` + StateHistory []string `json:"stateHistory"` + Txs map[common.Hash]ethTxAdditionalData `json:"txs"` + Gas uint64 `json:"gas"` +} + +type ethTxAdditionalData struct { + GasPrice *big.Int `json:"gasPrice,omitempty"` + RevertMessage string `json:"revertMessage,omitempty"` +} + +// sendTx adds transaction to the ethTxManager to send it to L1 +func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, + fromBatch uint64, toBatch uint64, data []byte, gas uint64) error { + // Params if new tx to send or resend a previous tx + var ( + paramTo *common.Address + paramData []byte + valueFromBatch uint64 + valueToBatch uint64 + valueToAddress common.Address + ) + + if !resend { + paramTo = to + paramData = data + valueFromBatch = fromBatch + valueToBatch = toBatch + } else { + if txOldHash == nil { + log.Errorf("trying to resend a tx with nil hash") + return errors.New("resend tx with nil hash monitor id") + } + oldEthTx := s.ethTransactions[*txOldHash] + paramTo = &oldEthTx.To + paramData = s.ethTxData[*txOldHash] + valueFromBatch = oldEthTx.FromBatch + valueToBatch = oldEthTx.ToBatch + } + if paramTo != nil { + valueToAddress = *paramTo + } + + // Add sequence tx + txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) + if err != nil { + log.Errorf("error adding sequence to ethtxmanager: %v", err) + return err + } + + // Add new eth tx + txData := ethTxData{ + SentL1Timestamp: time.Now(), + StatusTimestamp: time.Now(), + Status: "*new", + FromBatch: valueFromBatch, + ToBatch: valueToBatch, + OnMonitor: true, + To: valueToAddress, + Gas: gas, + } + + // Add tx to internal structure + s.mutexEthTx.Lock() + s.ethTransactions[txHash] = &txData + txResults := make(map[common.Hash]types.TxResult, 0) + s.copyTxData(txHash, paramData, txResults) + err = s.getResultAndUpdateEthTx(ctx, txHash) + if err != nil { + log.Errorf("error getting result for tx %v: %v", txHash, err) + } + if !resend { + atomic.StoreUint64(&s.latestSentToL1Batch, valueToBatch) + } else { + s.ethTransactions[*txOldHash].Status = "*resent" + } + s.mutexEthTx.Unlock() + + // Save sent sequences + err = s.saveSentSequencesTransactions(ctx) + if err != nil { + log.Errorf("error saving tx sequence sent, error: %v", err) + } + return nil +} + +// purgeEthTx purges transactions from memory structures +func (s *SequenceSender) purgeEthTx(ctx context.Context) { + // If sequence sending is stopped, do not purge + if atomic.LoadUint32(&s.seqSendingStopped) == 1 { + return + } + + // Purge old transactions that are finalized + s.mutexEthTx.Lock() + timePurge := time.Now().Add(-s.cfg.WaitPeriodPurgeTxFile.Duration) + toPurge := make([]common.Hash, 0) + for hash, data := range s.ethTransactions { + if !data.StatusTimestamp.Before(timePurge) { + continue + } + + if !data.OnMonitor || data.Status == types.MonitoredTxStatusFinalized.String() { + toPurge = append(toPurge, hash) + + // Remove from tx monitor + if data.OnMonitor { + err := s.ethTxManager.Remove(ctx, hash) + if err != nil { + log.Warnf("error removing monitor tx %v from ethtxmanager: %v", hash, err) + } else { + log.Infof("removed monitor tx %v from ethtxmanager", hash) + } + } + } + } + + if len(toPurge) > 0 { + var firstPurged uint64 = math.MaxUint64 + var lastPurged uint64 + for i := 0; i < len(toPurge); i++ { + if s.ethTransactions[toPurge[i]].Nonce < firstPurged { + firstPurged = s.ethTransactions[toPurge[i]].Nonce + } + if s.ethTransactions[toPurge[i]].Nonce > lastPurged { + lastPurged = s.ethTransactions[toPurge[i]].Nonce + } + delete(s.ethTransactions, toPurge[i]) + delete(s.ethTxData, toPurge[i]) + } + log.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) + } + s.mutexEthTx.Unlock() +} + +// syncEthTxResults syncs results from L1 for transactions in the memory structure +func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam + s.mutexEthTx.Lock() + var ( + txPending uint64 + txSync uint64 + ) + for hash, data := range s.ethTransactions { + if data.Status == types.MonitoredTxStatusFinalized.String() { + continue + } + + err := s.getResultAndUpdateEthTx(ctx, hash) + if err != nil { + log.Errorf("error getting result for tx %v: %v", hash, err) + } + txSync++ + txStatus := s.ethTransactions[hash].Status + // Count if it is not in a final state + if s.ethTransactions[hash].OnMonitor && + txStatus != types.MonitoredTxStatusFailed.String() && + txStatus != types.MonitoredTxStatusSafe.String() && + txStatus != types.MonitoredTxStatusFinalized.String() { + txPending++ + } + } + s.mutexEthTx.Unlock() + + // Save updated sequences transactions + err := s.saveSentSequencesTransactions(ctx) + if err != nil { + log.Errorf("error saving tx sequence, error: %v", err) + } + + log.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) + return txPending, nil +} + +// syncAllEthTxResults syncs all tx results from L1 +func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { + // Get all results + results, err := s.ethTxManager.ResultsByStatus(ctx, nil) + if err != nil { + log.Warnf("error getting results for all tx: %v", err) + return err + } + + // Check and update tx status + numResults := len(results) + s.mutexEthTx.Lock() + for _, result := range results { + txSequence, exists := s.ethTransactions[result.ID] + if !exists { + log.Debugf("transaction %v missing in memory structure. Adding it", result.ID) + // No info: from/to batch and the sent timestamp + s.ethTransactions[result.ID] = ðTxData{ + SentL1Timestamp: time.Time{}, + StatusTimestamp: time.Now(), + OnMonitor: true, + Status: "*missing", + } + txSequence = s.ethTransactions[result.ID] + } + + s.updateEthTxResult(txSequence, result) + } + s.mutexEthTx.Unlock() + + // Save updated sequences transactions + err = s.saveSentSequencesTransactions(ctx) + if err != nil { + log.Errorf("error saving tx sequence, error: %v", err) + } + + log.Infof("%d tx results synchronized", numResults) + return nil +} + +// copyTxData copies tx data in the internal structure +func (s *SequenceSender) copyTxData( + txHash common.Hash, txData []byte, txsResults map[common.Hash]types.TxResult, +) { + s.ethTxData[txHash] = make([]byte, len(txData)) + copy(s.ethTxData[txHash], txData) + + s.ethTransactions[txHash].Txs = make(map[common.Hash]ethTxAdditionalData, 0) + for hash, result := range txsResults { + var gasPrice *big.Int + if result.Tx != nil { + gasPrice = result.Tx.GasPrice() + } + + add := ethTxAdditionalData{ + GasPrice: gasPrice, + RevertMessage: result.RevertMessage, + } + s.ethTransactions[txHash].Txs[hash] = add + } +} + +// updateEthTxResult handles updating transaction state +func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult types.MonitoredTxResult) { + if txData.Status != txResult.Status.String() { + log.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) + txData.StatusTimestamp = time.Now() + stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + + ", " + txData.Status + ", " + txResult.Status.String() + txData.Status = txResult.Status.String() + txData.StateHistory = append(txData.StateHistory, stTrans) + + // Manage according to the state + statusConsolidated := txData.Status == types.MonitoredTxStatusSafe.String() || + txData.Status == types.MonitoredTxStatusFinalized.String() + if txData.Status == types.MonitoredTxStatusFailed.String() { + s.logFatalf("transaction %v result failed!") + } else if statusConsolidated && txData.ToBatch >= atomic.LoadUint64(&s.latestVirtualBatchNumber) { + s.latestVirtualTime = txData.StatusTimestamp + } + } + + // Update info received from L1 + txData.Nonce = txResult.Nonce + if txResult.To != nil { + txData.To = *txResult.To + } + if txResult.MinedAtBlockNumber != nil { + txData.MinedAtBlock = *txResult.MinedAtBlockNumber + } + s.copyTxData(txResult.ID, txResult.Data, txResult.Txs) +} + +// getResultAndUpdateEthTx updates the tx status from the ethTxManager +func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash common.Hash) error { + txData, exists := s.ethTransactions[txHash] + if !exists { + s.logger.Errorf("transaction %v not found in memory", txHash) + return errors.New("transaction not found in memory structure") + } + + txResult, err := s.ethTxManager.Result(ctx, txHash) + switch { + case errors.Is(err, ethtxmanager.ErrNotFound): + s.logger.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) + txData.OnMonitor = false + // Resend tx + errSend := s.sendTx(ctx, true, &txHash, nil, 0, 0, nil, txData.Gas) + if errSend == nil { + txData.OnMonitor = false + } + + case err != nil: + s.logger.Errorf("error getting result for tx %v: %v", txHash, err) + return err + + default: + s.updateEthTxResult(txData, txResult) + } + + return nil +} + +// loadSentSequencesTransactions loads the file into the memory structure +func (s *SequenceSender) loadSentSequencesTransactions() error { + // Check if file exists + if _, err := os.Stat(s.cfg.SequencesTxFileName); os.IsNotExist(err) { + log.Infof("file not found %s: %v", s.cfg.SequencesTxFileName, err) + return nil + } else if err != nil { + log.Errorf("error opening file %s: %v", s.cfg.SequencesTxFileName, err) + return err + } + + // Read file + data, err := os.ReadFile(s.cfg.SequencesTxFileName) + if err != nil { + log.Errorf("error reading file %s: %v", s.cfg.SequencesTxFileName, err) + return err + } + + // Restore memory structure + s.mutexEthTx.Lock() + err = json.Unmarshal(data, &s.ethTransactions) + s.mutexEthTx.Unlock() + if err != nil { + log.Errorf("error decoding data from %s: %v", s.cfg.SequencesTxFileName, err) + return err + } + + return nil +} + +// saveSentSequencesTransactions saves memory structure into persistent file +func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) error { + var err error + + // Purge tx + s.purgeEthTx(ctx) + + // Create file + fileName := s.cfg.SequencesTxFileName[0:strings.IndexRune(s.cfg.SequencesTxFileName, '.')] + ".tmp" + s.sequencesTxFile, err = os.Create(fileName) + if err != nil { + log.Errorf("error creating file %s: %v", fileName, err) + return err + } + defer s.sequencesTxFile.Close() + + // Write data JSON encoded + encoder := json.NewEncoder(s.sequencesTxFile) + encoder.SetIndent("", " ") + s.mutexEthTx.Lock() + err = encoder.Encode(s.ethTransactions) + s.mutexEthTx.Unlock() + if err != nil { + log.Errorf("error writing file %s: %v", fileName, err) + return err + } + + // Rename the new file + err = os.Rename(fileName, s.cfg.SequencesTxFileName) + if err != nil { + log.Errorf("error renaming file %s to %s: %v", fileName, s.cfg.SequencesTxFileName, err) + return err + } + + return nil +} diff --git a/sequencesender/ethtx_test.go b/sequencesender/ethtx_test.go new file mode 100644 index 00000000..06afb640 --- /dev/null +++ b/sequencesender/ethtx_test.go @@ -0,0 +1,786 @@ +package sequencesender + +import ( + "context" + "encoding/json" + "errors" + "math/big" + "os" + "testing" + "time" + + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/mocks" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func Test_sendTx(t *testing.T) { + t.Parallel() + + addr := common.BytesToAddress([]byte{1, 2, 3}) + hash := common.HexToHash("0x1") + oldHash := common.HexToHash("0x2") + + type args struct { + resend bool + txOldHash *common.Hash + to *common.Address + fromBatch uint64 + toBatch uint64 + data []byte + gas uint64 + } + + type state struct { + currentNonce uint64 + ethTxData map[common.Hash][]byte + ethTransactions map[common.Hash]*ethTxData + latestSentToL1Batch uint64 + } + + tests := []struct { + name string + args args + state state + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + expectedState state + expectedErr error + }{ + { + name: "successfully sent", + args: args{ + resend: false, + txOldHash: nil, + to: &addr, + fromBatch: 1, + toBatch: 2, + data: []byte("test"), + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, &addr, big.NewInt(0), []byte("test"), uint64(0), mock.Anything, uint64(100500)).Return(hash, nil) + mngr.On("Result", mock.Anything, hash).Return(ethtxtypes.MonitoredTxResult{ + ID: hash, + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + state: state{ + currentNonce: 10, + ethTxData: map[common.Hash][]byte{ + hash: {}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + hash: {}, + }, + latestSentToL1Batch: 0, + }, + expectedState: state{ + currentNonce: 11, + ethTxData: map[common.Hash][]byte{ + hash: {1, 2, 3}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + hash: { + SentL1Timestamp: now, + StatusTimestamp: now, + FromBatch: 1, + ToBatch: 2, + OnMonitor: true, + To: addr, + Gas: 100500, + StateHistory: []string{now.Format("2006-01-02T15:04:05.000-07:00") + ", *new, "}, + Txs: map[common.Hash]ethTxAdditionalData{}, + }, + }, + latestSentToL1Batch: 2, + }, + expectedErr: nil, + }, + { + name: "successfully sent with resend", + args: args{ + resend: true, + txOldHash: &oldHash, + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, &addr, big.NewInt(0), []byte(nil), uint64(0), mock.Anything, uint64(100500)).Return(hash, nil) + mngr.On("Result", mock.Anything, hash).Return(ethtxtypes.MonitoredTxResult{ + ID: hash, + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + state: state{ + ethTxData: map[common.Hash][]byte{ + hash: []byte("test"), + }, + ethTransactions: map[common.Hash]*ethTxData{ + oldHash: { + To: addr, + Nonce: 10, + FromBatch: 1, + ToBatch: 2, + }, + }, + latestSentToL1Batch: 0, + }, + expectedState: state{ + currentNonce: 0, + ethTxData: map[common.Hash][]byte{ + hash: {1, 2, 3}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + hash: { + SentL1Timestamp: now, + StatusTimestamp: now, + FromBatch: 1, + ToBatch: 2, + OnMonitor: true, + To: addr, + Gas: 100500, + StateHistory: []string{now.Format("2006-01-02T15:04:05.000-07:00") + ", *new, "}, + Txs: map[common.Hash]ethTxAdditionalData{}, + }, + }, + latestSentToL1Batch: 0, + }, + expectedErr: nil, + }, + { + name: "add with gas returns error", + args: args{ + resend: true, + txOldHash: &oldHash, + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, &addr, big.NewInt(0), []byte(nil), uint64(0), mock.Anything, uint64(100500)).Return(nil, errors.New("failed to add with gas")) + return mngr + }, + state: state{ + ethTxData: map[common.Hash][]byte{ + hash: []byte("test"), + }, + ethTransactions: map[common.Hash]*ethTxData{ + oldHash: { + To: addr, + Nonce: 10, + FromBatch: 1, + ToBatch: 2, + }, + }, + latestSentToL1Batch: 0, + }, + expectedErr: errors.New("failed to add with gas"), + }, + { + name: "empty old hash", + args: args{ + resend: true, + gas: 100500, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + return mngr + }, + state: state{ + ethTxData: map[common.Hash][]byte{ + hash: []byte("test"), + }, + ethTransactions: map[common.Hash]*ethTxData{ + oldHash: { + To: addr, + Nonce: 10, + FromBatch: 1, + ToBatch: 2, + }, + }, + latestSentToL1Batch: 0, + }, + expectedErr: errors.New("resend tx with nil hash monitor id"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + ss := SequenceSender{ + ethTxData: tt.state.ethTxData, + ethTransactions: tt.state.ethTransactions, + ethTxManager: tt.getEthTxManager(t), + latestSentToL1Batch: tt.state.latestSentToL1Batch, + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + err = ss.sendTx(context.Background(), tt.args.resend, tt.args.txOldHash, tt.args.to, tt.args.fromBatch, tt.args.toBatch, tt.args.data, tt.args.gas) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedState.ethTxData, ss.ethTxData) + require.Equal(t, len(tt.expectedState.ethTransactions), len(ss.ethTransactions)) + for k, v := range tt.expectedState.ethTransactions { + require.Equal(t, v.Gas, ss.ethTransactions[k].Gas) + require.Equal(t, v.To, ss.ethTransactions[k].To) + require.Equal(t, v.Nonce, ss.ethTransactions[k].Nonce) + require.Equal(t, v.Status, ss.ethTransactions[k].Status) + require.Equal(t, v.FromBatch, ss.ethTransactions[k].FromBatch) + require.Equal(t, v.ToBatch, ss.ethTransactions[k].ToBatch) + require.Equal(t, v.OnMonitor, ss.ethTransactions[k].OnMonitor) + } + require.Equal(t, tt.expectedState.latestSentToL1Batch, ss.latestSentToL1Batch) + } + }) + } +} + +func Test_purgeEthTx(t *testing.T) { + t.Parallel() + + firstTimestamp := time.Now().Add(-time.Hour) + secondTimestamp := time.Now().Add(time.Hour) + + tests := []struct { + name string + seqSendingStopped uint32 + ethTransactions map[common.Hash]*ethTxData + ethTxData map[common.Hash][]byte + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + sequenceList []uint64 + expectedEthTransactions map[common.Hash]*ethTxData + expectedEthTxData map[common.Hash][]byte + }{ + { + name: "sequence sender stopped", + seqSendingStopped: 1, + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + return mocks.NewEthTxManagerMock(t) + }, + sequenceList: []uint64{1, 2}, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + }, + expectedEthTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + }, + }, + { + name: "transactions purged", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + common.HexToHash("0x2"): {4, 5, 6}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Remove", mock.Anything, common.HexToHash("0x1")).Return(nil) + return mngr + }, + sequenceList: []uint64{1, 2}, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + expectedEthTxData: map[common.Hash][]byte{ + common.HexToHash("0x2"): {4, 5, 6}, + }, + }, + { + name: "removed with error", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: firstTimestamp, + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusFinalized.String(), + }, + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + common.HexToHash("0x2"): {4, 5, 6}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Remove", mock.Anything, common.HexToHash("0x1")).Return(errors.New("test err")) + return mngr + }, + sequenceList: []uint64{1, 2}, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x2"): { + StatusTimestamp: secondTimestamp, + }, + }, + expectedEthTxData: map[common.Hash][]byte{ + common.HexToHash("0x2"): {4, 5, 6}, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mngr := tt.getEthTxManager(t) + ss := SequenceSender{ + seqSendingStopped: tt.seqSendingStopped, + ethTransactions: tt.ethTransactions, + ethTxData: tt.ethTxData, + ethTxManager: mngr, + logger: log.GetDefaultLogger(), + } + + ss.purgeEthTx(context.Background()) + + mngr.AssertExpectations(t) + require.Equal(t, tt.expectedEthTransactions, ss.ethTransactions) + require.Equal(t, tt.expectedEthTxData, ss.ethTxData) + }) + } +} + +func Test_syncEthTxResults(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ethTransactions map[common.Hash]*ethTxData + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + + expectErr error + expectPendingTxs uint64 + }{ + { + name: "successfully synced", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: time.Now(), + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusCreated.String(), + }, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Result", mock.Anything, common.HexToHash("0x1")).Return(ethtxtypes.MonitoredTxResult{ + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + expectPendingTxs: 1, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name) + require.NoError(t, err) + + mngr := tt.getEthTxManager(t) + ss := SequenceSender{ + ethTransactions: tt.ethTransactions, + ethTxManager: mngr, + ethTxData: make(map[common.Hash][]byte), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + pendingTxs, err := ss.syncEthTxResults(context.Background()) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectPendingTxs, pendingTxs) + } + + mngr.AssertExpectations(t) + + err = os.RemoveAll(tmpFile.Name() + ".tmp") + require.NoError(t, err) + }) + } +} + +func Test_syncAllEthTxResults(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ethTransactions map[common.Hash]*ethTxData + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + + expectErr error + expectPendingTxs uint64 + }{ + { + name: "successfully synced", + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + StatusTimestamp: time.Now(), + OnMonitor: true, + Status: ethtxtypes.MonitoredTxStatusCreated.String(), + }, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return([]ethtxtypes.MonitoredTxResult{ + { + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, + }, nil) + return mngr + }, + expectPendingTxs: 1, + }, + { + name: "successfully synced with missing tx", + ethTransactions: map[common.Hash]*ethTxData{}, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return([]ethtxtypes.MonitoredTxResult{ + { + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, + }, nil) + return mngr + }, + expectPendingTxs: 1, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name) + require.NoError(t, err) + + mngr := tt.getEthTxManager(t) + ss := SequenceSender{ + ethTransactions: tt.ethTransactions, + ethTxManager: mngr, + ethTxData: make(map[common.Hash][]byte), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + err = ss.syncAllEthTxResults(context.Background()) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr, err) + } else { + require.NoError(t, err) + } + + mngr.AssertExpectations(t) + + err = os.RemoveAll(tmpFile.Name() + ".tmp") + require.NoError(t, err) + }) + } +} + +func Test_copyTxData(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + txHash common.Hash + txData []byte + txsResults map[common.Hash]ethtxtypes.TxResult + ethTxData map[common.Hash][]byte + ethTransactions map[common.Hash]*ethTxData + expectedRthTxData map[common.Hash][]byte + expectedEthTransactions map[common.Hash]*ethTxData + }{ + { + name: "successfully copied", + txHash: common.HexToHash("0x1"), + txData: []byte{1, 2, 3}, + txsResults: map[common.Hash]ethtxtypes.TxResult{ + common.HexToHash("0x1"): {}, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {0, 2, 3}, + }, + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): {}, + }, + expectedRthTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {1, 2, 3}, + }, + expectedEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + Txs: map[common.Hash]ethTxAdditionalData{ + common.HexToHash("0x1"): {}, + }, + }, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + s := SequenceSender{ + ethTxData: tt.ethTxData, + ethTransactions: tt.ethTransactions, + } + + s.copyTxData(tt.txHash, tt.txData, tt.txsResults) + require.Equal(t, tt.expectedRthTxData, s.ethTxData) + require.Equal(t, tt.expectedEthTransactions, s.ethTransactions) + }) + } +} + +func Test_getResultAndUpdateEthTx(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + hash common.Hash + ethTransactions map[common.Hash]*ethTxData + ethTxData map[common.Hash][]byte + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + expectedErr error + }{ + { + name: "successfully updated", + hash: common.HexToHash("0x1"), + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): {}, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Result", mock.Anything, common.HexToHash("0x1")).Return(ethtxtypes.MonitoredTxResult{ + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + expectedErr: nil, + }, + { + name: "not found", + hash: common.HexToHash("0x1"), + ethTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): { + Gas: 100500, + }, + }, + ethTxData: map[common.Hash][]byte{ + common.HexToHash("0x1"): {}, + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Result", mock.Anything, common.HexToHash("0x1")).Return(ethtxtypes.MonitoredTxResult{}, ethtxmanager.ErrNotFound) + mngr.On("AddWithGas", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, mock.Anything, mock.Anything, uint64(100500)).Return(common.Hash{}, nil) + mngr.On("Result", mock.Anything, common.Hash{}).Return(ethtxtypes.MonitoredTxResult{ + ID: common.HexToHash("0x1"), + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + ss := SequenceSender{ + ethTransactions: tt.ethTransactions, + ethTxData: tt.ethTxData, + ethTxManager: tt.getEthTxManager(t), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + }, + logger: log.GetDefaultLogger(), + } + + err = ss.getResultAndUpdateEthTx(context.Background(), tt.hash) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func Test_loadSentSequencesTransactions(t *testing.T) { + t.Parallel() + + tx := ðTxData{ + FromBatch: 1, + ToBatch: 2, + OnMonitor: true, + To: common.BytesToAddress([]byte{1, 2, 3}), + Gas: 100500, + StateHistory: []string{"2021-09-01T15:04:05.000-07:00, *new, "}, + Txs: map[common.Hash]ethTxAdditionalData{}, + } + + tests := []struct { + name string + getFilename func(t *testing.T) string + expectEthTransactions map[common.Hash]*ethTxData + expectErr error + }{ + { + name: "successfully loaded", + getFilename: func(t *testing.T) string { + t.Helper() + + tmpFile, err := os.CreateTemp(os.TempDir(), "test") + require.NoError(t, err) + + ethTxDataBytes, err := json.Marshal(map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): tx, + }) + require.NoError(t, err) + + _, err = tmpFile.Write(ethTxDataBytes) + require.NoError(t, err) + + t.Cleanup(func() { + err := os.Remove(tmpFile.Name()) + require.NoError(t, err) + }) + + return tmpFile.Name() + }, + expectEthTransactions: map[common.Hash]*ethTxData{ + common.HexToHash("0x1"): tx, + }, + }, + { + name: "file does not exist", + getFilename: func(t *testing.T) string { + t.Helper() + + return "does not exist.tmp" + }, + expectEthTransactions: map[common.Hash]*ethTxData{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + s := SequenceSender{ + cfg: Config{ + SequencesTxFileName: tt.getFilename(t), + }, + ethTransactions: map[common.Hash]*ethTxData{}, + logger: log.GetDefaultLogger(), + } + + err := s.loadSentSequencesTransactions() + if tt.expectErr != nil { + require.Equal(t, tt.expectErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectEthTransactions, s.ethTransactions) + } + }) + } +} diff --git a/sequencesender/mocks/mock_etherman.go b/sequencesender/mocks/mock_etherman.go new file mode 100644 index 00000000..46a70170 --- /dev/null +++ b/sequencesender/mocks/mock_etherman.go @@ -0,0 +1,147 @@ +// Code generated by mockery v2.40.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthermanMock is an autogenerated mock type for the Etherman type +type EthermanMock struct { + mock.Mock +} + +// CurrentNonce provides a mock function with given fields: ctx, address +func (_m *EthermanMock) CurrentNonce(ctx context.Context, address common.Address) (uint64, error) { + ret := _m.Called(ctx, address) + + if len(ret) == 0 { + panic("no return value specified for CurrentNonce") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { + return rf(ctx, address) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { + r0 = rf(ctx, address) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { + r1 = rf(ctx, address) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EstimateGas provides a mock function with given fields: ctx, from, to, value, data +func (_m *EthermanMock) EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) { + ret := _m.Called(ctx, from, to, value, data) + + if len(ret) == 0 { + panic("no return value specified for EstimateGas") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *common.Address, *big.Int, []byte) (uint64, error)); ok { + return rf(ctx, from, to, value, data) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Address, *common.Address, *big.Int, []byte) uint64); ok { + r0 = rf(ctx, from, to, value, data) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Address, *common.Address, *big.Int, []byte) error); ok { + r1 = rf(ctx, from, to, value, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBatchNumber provides a mock function with given fields: +func (_m *EthermanMock) GetLatestBatchNumber() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLatestBatchNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBlockHeader provides a mock function with given fields: ctx +func (_m *EthermanMock) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Header, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Header); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanMock { + mock := &EthermanMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/mocks/mock_ethtxmanager.go b/sequencesender/mocks/mock_ethtxmanager.go new file mode 100644 index 00000000..f3b456a4 --- /dev/null +++ b/sequencesender/mocks/mock_ethtxmanager.go @@ -0,0 +1,146 @@ +// Code generated by mockery v2.40.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthTxManagerMock is an autogenerated mock type for the EthTxManager type +type EthTxManagerMock struct { + mock.Mock +} + +// AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas +func (_m *EthTxManagerMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) + + if len(ret) == 0 { + panic("no return value specified for AddWithGas") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar, gas) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Remove provides a mock function with given fields: ctx, hash +func (_m *EthTxManagerMock) Remove(ctx context.Context, hash common.Hash) error { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, hash) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Result provides a mock function with given fields: ctx, hash +func (_m *EthTxManagerMock) Result(ctx context.Context, hash common.Hash) (ethtxtypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, hash) + + if len(ret) == 0 { + panic("no return value specified for Result") + } + + var r0 ethtxtypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxtypes.MonitoredTxResult, error)); ok { + return rf(ctx, hash) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxtypes.MonitoredTxResult); ok { + r0 = rf(ctx, hash) + } else { + r0 = ret.Get(0).(ethtxtypes.MonitoredTxResult) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResultsByStatus provides a mock function with given fields: ctx, status +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, status []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, status) + + if len(ret) == 0 { + panic("no return value specified for ResultsByStatus") + } + + var r0 []ethtxtypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error)); ok { + return rf(ctx, status) + } + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) []ethtxtypes.MonitoredTxResult); ok { + r0 = rf(ctx, status) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]ethtxtypes.MonitoredTxResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []ethtxtypes.MonitoredTxStatus) error); ok { + r1 = rf(ctx, status) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: +func (_m *EthTxManagerMock) Start() { + _m.Called() +} + +// NewEthTxManagerMock creates a new instance of EthTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthTxManagerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthTxManagerMock { + mock := &EthTxManagerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/mocks/mock_txbuilder.go b/sequencesender/mocks/mock_txbuilder.go new file mode 100644 index 00000000..0607313b --- /dev/null +++ b/sequencesender/mocks/mock_txbuilder.go @@ -0,0 +1,367 @@ +// Code generated by mockery v2.40.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + datastream "github.com/0xPolygon/cdk/state/datastream" + + mock "github.com/stretchr/testify/mock" + + seqsendertypes "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + + txbuilder "github.com/0xPolygon/cdk/sequencesender/txbuilder" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// TxBuilderMock is an autogenerated mock type for the TxBuilder type +type TxBuilderMock struct { + mock.Mock +} + +type TxBuilderMock_Expecter struct { + mock *mock.Mock +} + +func (_m *TxBuilderMock) EXPECT() *TxBuilderMock_Expecter { + return &TxBuilderMock_Expecter{mock: &_m.Mock} +} + +// BuildSequenceBatchesTx provides a mock function with given fields: ctx, sequences +func (_m *TxBuilderMock) BuildSequenceBatchesTx(ctx context.Context, sequences seqsendertypes.Sequence) (*types.Transaction, error) { + ret := _m.Called(ctx, sequences) + + if len(ret) == 0 { + panic("no return value specified for BuildSequenceBatchesTx") + } + + var r0 *types.Transaction + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, seqsendertypes.Sequence) (*types.Transaction, error)); ok { + return rf(ctx, sequences) + } + if rf, ok := ret.Get(0).(func(context.Context, seqsendertypes.Sequence) *types.Transaction); ok { + r0 = rf(ctx, sequences) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, seqsendertypes.Sequence) error); ok { + r1 = rf(ctx, sequences) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxBuilderMock_BuildSequenceBatchesTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildSequenceBatchesTx' +type TxBuilderMock_BuildSequenceBatchesTx_Call struct { + *mock.Call +} + +// BuildSequenceBatchesTx is a helper method to define mock.On call +// - ctx context.Context +// - sequences seqsendertypes.Sequence +func (_e *TxBuilderMock_Expecter) BuildSequenceBatchesTx(ctx interface{}, sequences interface{}) *TxBuilderMock_BuildSequenceBatchesTx_Call { + return &TxBuilderMock_BuildSequenceBatchesTx_Call{Call: _e.mock.On("BuildSequenceBatchesTx", ctx, sequences)} +} + +func (_c *TxBuilderMock_BuildSequenceBatchesTx_Call) Run(run func(ctx context.Context, sequences seqsendertypes.Sequence)) *TxBuilderMock_BuildSequenceBatchesTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(seqsendertypes.Sequence)) + }) + return _c +} + +func (_c *TxBuilderMock_BuildSequenceBatchesTx_Call) Return(_a0 *types.Transaction, _a1 error) *TxBuilderMock_BuildSequenceBatchesTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxBuilderMock_BuildSequenceBatchesTx_Call) RunAndReturn(run func(context.Context, seqsendertypes.Sequence) (*types.Transaction, error)) *TxBuilderMock_BuildSequenceBatchesTx_Call { + _c.Call.Return(run) + return _c +} + +// NewBatchFromL2Block provides a mock function with given fields: l2Block +func (_m *TxBuilderMock) NewBatchFromL2Block(l2Block *datastream.L2Block) seqsendertypes.Batch { + ret := _m.Called(l2Block) + + if len(ret) == 0 { + panic("no return value specified for NewBatchFromL2Block") + } + + var r0 seqsendertypes.Batch + if rf, ok := ret.Get(0).(func(*datastream.L2Block) seqsendertypes.Batch); ok { + r0 = rf(l2Block) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(seqsendertypes.Batch) + } + } + + return r0 +} + +// TxBuilderMock_NewBatchFromL2Block_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBatchFromL2Block' +type TxBuilderMock_NewBatchFromL2Block_Call struct { + *mock.Call +} + +// NewBatchFromL2Block is a helper method to define mock.On call +// - l2Block *datastream.L2Block +func (_e *TxBuilderMock_Expecter) NewBatchFromL2Block(l2Block interface{}) *TxBuilderMock_NewBatchFromL2Block_Call { + return &TxBuilderMock_NewBatchFromL2Block_Call{Call: _e.mock.On("NewBatchFromL2Block", l2Block)} +} + +func (_c *TxBuilderMock_NewBatchFromL2Block_Call) Run(run func(l2Block *datastream.L2Block)) *TxBuilderMock_NewBatchFromL2Block_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*datastream.L2Block)) + }) + return _c +} + +func (_c *TxBuilderMock_NewBatchFromL2Block_Call) Return(_a0 seqsendertypes.Batch) *TxBuilderMock_NewBatchFromL2Block_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TxBuilderMock_NewBatchFromL2Block_Call) RunAndReturn(run func(*datastream.L2Block) seqsendertypes.Batch) *TxBuilderMock_NewBatchFromL2Block_Call { + _c.Call.Return(run) + return _c +} + +// NewSequence provides a mock function with given fields: ctx, batches, coinbase +func (_m *TxBuilderMock) NewSequence(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address) (seqsendertypes.Sequence, error) { + ret := _m.Called(ctx, batches, coinbase) + + if len(ret) == 0 { + panic("no return value specified for NewSequence") + } + + var r0 seqsendertypes.Sequence + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address) (seqsendertypes.Sequence, error)); ok { + return rf(ctx, batches, coinbase) + } + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address) seqsendertypes.Sequence); ok { + r0 = rf(ctx, batches, coinbase) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(seqsendertypes.Sequence) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []seqsendertypes.Batch, common.Address) error); ok { + r1 = rf(ctx, batches, coinbase) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxBuilderMock_NewSequence_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewSequence' +type TxBuilderMock_NewSequence_Call struct { + *mock.Call +} + +// NewSequence is a helper method to define mock.On call +// - ctx context.Context +// - batches []seqsendertypes.Batch +// - coinbase common.Address +func (_e *TxBuilderMock_Expecter) NewSequence(ctx interface{}, batches interface{}, coinbase interface{}) *TxBuilderMock_NewSequence_Call { + return &TxBuilderMock_NewSequence_Call{Call: _e.mock.On("NewSequence", ctx, batches, coinbase)} +} + +func (_c *TxBuilderMock_NewSequence_Call) Run(run func(ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address)) *TxBuilderMock_NewSequence_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]seqsendertypes.Batch), args[2].(common.Address)) + }) + return _c +} + +func (_c *TxBuilderMock_NewSequence_Call) Return(_a0 seqsendertypes.Sequence, _a1 error) *TxBuilderMock_NewSequence_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxBuilderMock_NewSequence_Call) RunAndReturn(run func(context.Context, []seqsendertypes.Batch, common.Address) (seqsendertypes.Sequence, error)) *TxBuilderMock_NewSequence_Call { + _c.Call.Return(run) + return _c +} + +// NewSequenceIfWorthToSend provides a mock function with given fields: ctx, sequenceBatches, l2Coinbase, batchNumber +func (_m *TxBuilderMock) NewSequenceIfWorthToSend(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64) (seqsendertypes.Sequence, error) { + ret := _m.Called(ctx, sequenceBatches, l2Coinbase, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for NewSequenceIfWorthToSend") + } + + var r0 seqsendertypes.Sequence + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address, uint64) (seqsendertypes.Sequence, error)); ok { + return rf(ctx, sequenceBatches, l2Coinbase, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, []seqsendertypes.Batch, common.Address, uint64) seqsendertypes.Sequence); ok { + r0 = rf(ctx, sequenceBatches, l2Coinbase, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(seqsendertypes.Sequence) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []seqsendertypes.Batch, common.Address, uint64) error); ok { + r1 = rf(ctx, sequenceBatches, l2Coinbase, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxBuilderMock_NewSequenceIfWorthToSend_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewSequenceIfWorthToSend' +type TxBuilderMock_NewSequenceIfWorthToSend_Call struct { + *mock.Call +} + +// NewSequenceIfWorthToSend is a helper method to define mock.On call +// - ctx context.Context +// - sequenceBatches []seqsendertypes.Batch +// - l2Coinbase common.Address +// - batchNumber uint64 +func (_e *TxBuilderMock_Expecter) NewSequenceIfWorthToSend(ctx interface{}, sequenceBatches interface{}, l2Coinbase interface{}, batchNumber interface{}) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + return &TxBuilderMock_NewSequenceIfWorthToSend_Call{Call: _e.mock.On("NewSequenceIfWorthToSend", ctx, sequenceBatches, l2Coinbase, batchNumber)} +} + +func (_c *TxBuilderMock_NewSequenceIfWorthToSend_Call) Run(run func(ctx context.Context, sequenceBatches []seqsendertypes.Batch, l2Coinbase common.Address, batchNumber uint64)) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]seqsendertypes.Batch), args[2].(common.Address), args[3].(uint64)) + }) + return _c +} + +func (_c *TxBuilderMock_NewSequenceIfWorthToSend_Call) Return(_a0 seqsendertypes.Sequence, _a1 error) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxBuilderMock_NewSequenceIfWorthToSend_Call) RunAndReturn(run func(context.Context, []seqsendertypes.Batch, common.Address, uint64) (seqsendertypes.Sequence, error)) *TxBuilderMock_NewSequenceIfWorthToSend_Call { + _c.Call.Return(run) + return _c +} + +// SetCondNewSeq provides a mock function with given fields: cond +func (_m *TxBuilderMock) SetCondNewSeq(cond txbuilder.CondNewSequence) txbuilder.CondNewSequence { + ret := _m.Called(cond) + + if len(ret) == 0 { + panic("no return value specified for SetCondNewSeq") + } + + var r0 txbuilder.CondNewSequence + if rf, ok := ret.Get(0).(func(txbuilder.CondNewSequence) txbuilder.CondNewSequence); ok { + r0 = rf(cond) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(txbuilder.CondNewSequence) + } + } + + return r0 +} + +// TxBuilderMock_SetCondNewSeq_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCondNewSeq' +type TxBuilderMock_SetCondNewSeq_Call struct { + *mock.Call +} + +// SetCondNewSeq is a helper method to define mock.On call +// - cond txbuilder.CondNewSequence +func (_e *TxBuilderMock_Expecter) SetCondNewSeq(cond interface{}) *TxBuilderMock_SetCondNewSeq_Call { + return &TxBuilderMock_SetCondNewSeq_Call{Call: _e.mock.On("SetCondNewSeq", cond)} +} + +func (_c *TxBuilderMock_SetCondNewSeq_Call) Run(run func(cond txbuilder.CondNewSequence)) *TxBuilderMock_SetCondNewSeq_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(txbuilder.CondNewSequence)) + }) + return _c +} + +func (_c *TxBuilderMock_SetCondNewSeq_Call) Return(_a0 txbuilder.CondNewSequence) *TxBuilderMock_SetCondNewSeq_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TxBuilderMock_SetCondNewSeq_Call) RunAndReturn(run func(txbuilder.CondNewSequence) txbuilder.CondNewSequence) *TxBuilderMock_SetCondNewSeq_Call { + _c.Call.Return(run) + return _c +} + +// String provides a mock function with given fields: +func (_m *TxBuilderMock) String() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for String") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TxBuilderMock_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' +type TxBuilderMock_String_Call struct { + *mock.Call +} + +// String is a helper method to define mock.On call +func (_e *TxBuilderMock_Expecter) String() *TxBuilderMock_String_Call { + return &TxBuilderMock_String_Call{Call: _e.mock.On("String")} +} + +func (_c *TxBuilderMock_String_Call) Run(run func()) *TxBuilderMock_String_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *TxBuilderMock_String_Call) Return(_a0 string) *TxBuilderMock_String_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *TxBuilderMock_String_Call) RunAndReturn(run func() string) *TxBuilderMock_String_Call { + _c.Call.Return(run) + return _c +} + +// NewTxBuilderMock creates a new instance of TxBuilderMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxBuilderMock(t interface { + mock.TestingT + Cleanup(func()) +}) *TxBuilderMock { + mock := &TxBuilderMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/sequencesender/rpc.go b/sequencesender/rpc.go new file mode 100644 index 00000000..a604da37 --- /dev/null +++ b/sequencesender/rpc.go @@ -0,0 +1,95 @@ +package sequencesender + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" + "github.com/0xPolygon/cdk/state" + "github.com/ethereum/go-ethereum/common" +) + +func getBatchFromRPC(addr string, batchNumber uint64) (*rpcbatch.RPCBatch, error) { + type zkEVMBatch struct { + Blocks []string `json:"blocks"` + BatchL2Data string `json:"batchL2Data"` + Coinbase string `json:"coinbase"` + GlobalExitRoot string `json:"globalExitRoot"` + Closed bool `json:"closed"` + Timestamp string `json:"timestamp"` + } + + zkEVMBatchData := zkEVMBatch{} + + log.Infof("Getting batch %d from RPC", batchNumber) + + response, err := rpc.JSONRPCCall(addr, "zkevm_getBatchByNumber", batchNumber) + if err != nil { + return nil, err + } + + // Check if the response is nil + if response.Result == nil { + return nil, state.ErrNotFound + } + + // Check if the response is an error + if response.Error != nil { + return nil, fmt.Errorf("error in the response calling zkevm_getBatchByNumber: %v", response.Error) + } + + // Get the batch number from the response hex string + err = json.Unmarshal(response.Result, &zkEVMBatchData) + if err != nil { + return nil, fmt.Errorf("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: %w", err) + } + + rpcBatch, err := rpcbatch.New(batchNumber, zkEVMBatchData.Blocks, common.FromHex(zkEVMBatchData.BatchL2Data), + common.HexToHash(zkEVMBatchData.GlobalExitRoot), common.HexToAddress(zkEVMBatchData.Coinbase), zkEVMBatchData.Closed) + if err != nil { + return nil, fmt.Errorf("error creating the rpc batch: %w", err) + } + + if len(zkEVMBatchData.Blocks) > 0 { + lastL2BlockTimestamp, err := getL2BlockTimestampFromRPC(addr, zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) + if err != nil { + return nil, fmt.Errorf("error getting the last l2 block timestamp from the rpc: %w", err) + } + rpcBatch.SetLastL2BLockTimestamp(lastL2BlockTimestamp) + } else { + log.Infof("No blocks in the batch, setting the last l2 block timestamp from the batch data") + rpcBatch.SetLastL2BLockTimestamp(new(big.Int).SetBytes(common.FromHex(zkEVMBatchData.Timestamp)).Uint64()) + } + + return rpcBatch, nil +} + +func getL2BlockTimestampFromRPC(addr, blockHash string) (uint64, error) { + type zkeEVML2Block struct { + Timestamp string `json:"timestamp"` + } + + log.Infof("Getting l2 block timestamp from RPC. Block hash: %s", blockHash) + + response, err := rpc.JSONRPCCall(addr, "eth_getBlockByHash", blockHash, false) + if err != nil { + return 0, err + } + + // Check if the response is an error + if response.Error != nil { + return 0, fmt.Errorf("error in the response calling eth_getBlockByHash: %v", response.Error) + } + + // Get the l2 block from the response + l2Block := zkeEVML2Block{} + err = json.Unmarshal(response.Result, &l2Block) + if err != nil { + return 0, fmt.Errorf("error unmarshalling the l2 block from the response calling eth_getBlockByHash: %w", err) + } + + return new(big.Int).SetBytes(common.FromHex(l2Block.Timestamp)).Uint64(), nil +} diff --git a/sequencesender/rpc_test.go b/sequencesender/rpc_test.go new file mode 100644 index 00000000..4774b237 --- /dev/null +++ b/sequencesender/rpc_test.go @@ -0,0 +1,115 @@ +package sequencesender + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func Test_getBatchFromRPC(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + batch uint64 + getBatchByNumberResp string + getBlockByHasResp string + getBatchByNumberErr error + getBlockByHashErr error + expectBlocks int + expectData []byte + expectTimestamp uint64 + expectErr error + }{ + { + name: "successfully fetched", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":["1", "2", "3"],"batchL2Data":"0x1234567"}}`, + getBlockByHasResp: `{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x123456"}}`, + batch: 0, + expectBlocks: 3, + expectData: common.FromHex("0x1234567"), + expectTimestamp: 1193046, + expectErr: nil, + }, + { + name: "invalid json", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":invalid,"batchL2Data":"test"}}`, + batch: 0, + expectBlocks: 3, + expectData: nil, + expectErr: errors.New("invalid character 'i' looking for beginning of value"), + }, + { + name: "wrong json", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":{"blocks":"invalid","batchL2Data":"test"}}`, + batch: 0, + expectBlocks: 3, + expectData: nil, + expectErr: errors.New("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: json: cannot unmarshal string into Go struct field zkEVMBatch.blocks of type []string"), + }, + { + name: "error in the response", + getBatchByNumberResp: `{"jsonrpc":"2.0","id":1,"result":null,"error":{"code":-32602,"message":"Invalid params"}}`, + batch: 0, + expectBlocks: 0, + expectData: nil, + expectErr: errors.New("error in the response calling zkevm_getBatchByNumber: &{-32602 Invalid params }"), + }, + { + name: "http failed", + getBatchByNumberErr: errors.New("failed to fetch"), + batch: 0, + expectBlocks: 0, + expectData: nil, + expectErr: errors.New("invalid status code, expected: 200, found: 500"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var req rpc.Request + err := json.NewDecoder(r.Body).Decode(&req) + require.NoError(t, err) + + switch req.Method { + case "zkevm_getBatchByNumber": + if tt.getBatchByNumberErr != nil { + http.Error(w, tt.getBatchByNumberErr.Error(), http.StatusInternalServerError) + return + } + + _, _ = w.Write([]byte(tt.getBatchByNumberResp)) + case "eth_getBlockByHash": + if tt.getBlockByHashErr != nil { + http.Error(w, tt.getBlockByHashErr.Error(), http.StatusInternalServerError) + return + } + _, _ = w.Write([]byte(tt.getBlockByHasResp)) + default: + http.Error(w, "method not found", http.StatusNotFound) + } + })) + defer srv.Close() + + rpcBatch, err := getBatchFromRPC(srv.URL, tt.batch) + if tt.expectErr != nil { + require.Equal(t, tt.expectErr.Error(), err.Error()) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectTimestamp, rpcBatch.LastL2BLockTimestamp()) + require.Equal(t, tt.expectData, rpcBatch.L2Data()) + } + }) + } +} diff --git a/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go b/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go new file mode 100644 index 00000000..fafc1841 --- /dev/null +++ b/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go @@ -0,0 +1,132 @@ +package rpcbatch + +import ( + "fmt" + + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/ethereum/go-ethereum/common" +) + +type RPCBatch struct { + batchNumber uint64 `json:"batchNumber"` + blockHashes []string `json:"blocks"` + batchL2Data []byte `json:"batchL2Data"` + globalExitRoot common.Hash `json:"globalExitRoot"` + coinbase common.Address `json:"coinbase"` + closed bool `json:"closed"` + lastL2BlockTimestamp uint64 `json:"lastL2BlockTimestamp"` + l1InfoTreeIndex uint32 `json:"l1InfoTreeIndex"` +} + +func New(batchNumber uint64, blockHashes []string, batchL2Data []byte, globalExitRoot common.Hash, + coinbase common.Address, closed bool) (*RPCBatch, error) { + return &RPCBatch{ + batchNumber: batchNumber, + blockHashes: blockHashes, + batchL2Data: batchL2Data, + globalExitRoot: globalExitRoot, + coinbase: coinbase, + closed: closed, + }, nil +} + +// DeepCopy +func (b *RPCBatch) DeepCopy() seqsendertypes.Batch { + return &RPCBatch{ + batchNumber: b.batchNumber, + blockHashes: b.blockHashes, + batchL2Data: b.batchL2Data, + globalExitRoot: b.globalExitRoot, + coinbase: b.coinbase, + closed: b.closed, + lastL2BlockTimestamp: b.lastL2BlockTimestamp, + l1InfoTreeIndex: b.l1InfoTreeIndex, + } +} + +// LastCoinbase +func (b *RPCBatch) LastCoinbase() common.Address { + return b.coinbase +} + +// ForcedBatchTimestamp +func (b *RPCBatch) ForcedBatchTimestamp() uint64 { + return 0 +} + +// ForcedGlobalExitRoot +func (b *RPCBatch) ForcedGlobalExitRoot() common.Hash { + return common.Hash{} +} + +// ForcedBlockHashL1 +func (b *RPCBatch) ForcedBlockHashL1() common.Hash { + return common.Hash{} +} + +// L2Data +func (b *RPCBatch) L2Data() []byte { + return b.batchL2Data +} + +// LastL2BLockTimestamp +func (b *RPCBatch) LastL2BLockTimestamp() uint64 { + return b.lastL2BlockTimestamp +} + +// BatchNumber +func (b *RPCBatch) BatchNumber() uint64 { + return b.batchNumber +} + +// GlobalExitRoot +func (b *RPCBatch) GlobalExitRoot() common.Hash { + return b.globalExitRoot +} + +// L1InfoTreeIndex +func (b *RPCBatch) L1InfoTreeIndex() uint32 { + return b.l1InfoTreeIndex +} + +// SetL2Data +func (b *RPCBatch) SetL2Data(data []byte) { + b.batchL2Data = data +} + +// SetLastCoinbase +func (b *RPCBatch) SetLastCoinbase(address common.Address) { + b.coinbase = address +} + +// SetLastL2BLockTimestamp +func (b *RPCBatch) SetLastL2BLockTimestamp(ts uint64) { + b.lastL2BlockTimestamp = ts +} + +// SetL1InfoTreeIndex +func (b *RPCBatch) SetL1InfoTreeIndex(index uint32) { + b.l1InfoTreeIndex = index +} + +// String +func (b *RPCBatch) String() string { + return fmt.Sprintf( + "Batch/RPC: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, ForcedBlockHashL1: %x"+ + ", L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, GlobalExitRoot: %x, L1InfoTreeIndex: %d", + b.LastCoinbase().String(), + b.ForcedBatchTimestamp(), + b.ForcedGlobalExitRoot().String(), + b.ForcedBlockHashL1().String(), + b.L2Data(), + b.LastL2BLockTimestamp(), + b.BatchNumber(), + b.GlobalExitRoot().String(), + b.L1InfoTreeIndex(), + ) +} + +// IsClosed +func (b *RPCBatch) IsClosed() bool { + return b.closed +} diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 3431d3fe..468866c2 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -2,84 +2,80 @@ package sequencesender import ( "context" - "encoding/json" "errors" "fmt" - "math" "math/big" "os" - "strings" "sync" + "sync/atomic" "time" - "github.com/0xPolygon/cdk-rpc/rpc" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes/rpcbatch" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" - "github.com/0xPolygon/cdk/state/datastream" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygonHermez/zkevm-ethtx-manager/log" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum/common" - "google.golang.org/protobuf/proto" + "github.com/ethereum/go-ethereum/core/types" ) +const ten = 10 + +// EthTxManager represents the eth tx manager interface +type EthTxManager interface { + Start() + AddWithGas( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + gas uint64, + ) (common.Hash, error) + Remove(ctx context.Context, hash common.Hash) error + ResultsByStatus(ctx context.Context, status []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Result(ctx context.Context, hash common.Hash) (ethtxtypes.MonitoredTxResult, error) +} + +// Etherman represents the etherman behaviour +type Etherman interface { + CurrentNonce(ctx context.Context, address common.Address) (uint64, error) + GetLatestBlockHeader(ctx context.Context) (*types.Header, error) + EstimateGas(ctx context.Context, from common.Address, to *common.Address, value *big.Int, data []byte) (uint64, error) + GetLatestBatchNumber() (uint64, error) +} + // SequenceSender represents a sequence sender type SequenceSender struct { - cfg Config - logger *log.Logger - ethTxManager *ethtxmanager.Client - etherman *etherman.Client - currentNonce uint64 - nonceMutex sync.Mutex - latestVirtualBatch uint64 // Latest virtualized batch obtained from L1 - latestVirtualTime time.Time // Latest virtual batch timestamp - latestSentToL1Batch uint64 // Latest batch sent to L1 - wipBatch uint64 // Work in progress batch - sequenceList []uint64 // Sequence of batch number to be send to L1 - sequenceData map[uint64]*sequenceData // All the batch data indexed by batch number - mutexSequence sync.Mutex // Mutex to access sequenceData and sequenceList - ethTransactions map[common.Hash]*ethTxData // All the eth tx sent to L1 indexed by hash - ethTxData map[common.Hash][]byte // Tx data send to or received from L1 - mutexEthTx sync.Mutex // Mutex to access ethTransactions - sequencesTxFile *os.File // Persistence of sent transactions - validStream bool // Not valid while receiving data before the desired batch - fromStreamBatch uint64 // Initial batch to connect to the streaming - latestStreamBatch uint64 // Latest batch received by the streaming - seqSendingStopped bool // If there is a critical error - prevStreamEntry *datastreamer.FileEntry - streamClient *datastreamer.StreamClient - TxBuilder txbuilder.TxBuilder - latestVirtualBatchLock sync.Mutex + cfg Config + logger *log.Logger + ethTxManager EthTxManager + etherman Etherman + latestVirtualBatchNumber uint64 // Latest virtualized batch obtained from L1 + latestVirtualTime time.Time // Latest virtual batch timestamp + latestSentToL1Batch uint64 // Latest batch sent to L1 + sequenceList []uint64 // Sequence of batch number to be send to L1 + sequenceData map[uint64]*sequenceData // All the batch data indexed by batch number + mutexSequence sync.Mutex // Mutex to access sequenceData and sequenceList + ethTransactions map[common.Hash]*ethTxData // All the eth tx sent to L1 indexed by hash + ethTxData map[common.Hash][]byte // Tx data send to or received from L1 + mutexEthTx sync.Mutex // Mutex to access ethTransactions + sequencesTxFile *os.File // Persistence of sent transactions + validStream bool // Not valid while receiving data before the desired batch + seqSendingStopped uint32 // If there is a critical error + TxBuilder txbuilder.TxBuilder + latestVirtualBatchLock sync.Mutex } type sequenceData struct { batchClosed bool batch seqsendertypes.Batch batchRaw *state.BatchRawV2 - batchType datastream.BatchType -} - -type ethTxData struct { - Nonce uint64 `json:"nonce"` - Status string `json:"status"` - SentL1Timestamp time.Time `json:"sentL1Timestamp"` - StatusTimestamp time.Time `json:"statusTimestamp"` - FromBatch uint64 `json:"fromBatch"` - ToBatch uint64 `json:"toBatch"` - MinedAtBlock big.Int `json:"minedAtBlock"` - OnMonitor bool `json:"onMonitor"` - To common.Address `json:"to"` - StateHistory []string `json:"stateHistory"` - Txs map[common.Hash]ethTxAdditionalData `json:"txs"` - Gas uint64 `json:"gas"` -} - -type ethTxAdditionalData struct { - GasPrice *big.Int `json:"gasPrice,omitempty"` - RevertMessage string `json:"revertMessage,omitempty"` } // New inits sequence sender @@ -87,16 +83,14 @@ func New(cfg Config, logger *log.Logger, etherman *etherman.Client, txBuilder txbuilder.TxBuilder) (*SequenceSender, error) { // Create sequencesender s := SequenceSender{ - cfg: cfg, - logger: logger, - etherman: etherman, - ethTransactions: make(map[common.Hash]*ethTxData), - ethTxData: make(map[common.Hash][]byte), - sequenceData: make(map[uint64]*sequenceData), - validStream: false, - latestStreamBatch: 0, - seqSendingStopped: false, - TxBuilder: txBuilder, + cfg: cfg, + logger: logger, + etherman: etherman, + ethTransactions: make(map[common.Hash]*ethTxData), + ethTxData: make(map[common.Hash][]byte), + sequenceData: make(map[uint64]*sequenceData), + validStream: false, + TxBuilder: txBuilder, } logger.Infof("TxBuilder configuration: %s", txBuilder.String()) @@ -121,16 +115,6 @@ func New(cfg Config, logger *log.Logger, return nil, err } - // Create datastream client - s.streamClient, err = datastreamer.NewClient(s.cfg.StreamClient.Server, 1) - if err != nil { - s.logger.Fatalf("failed to create stream client, error: %v", err) - } else { - s.logger.Infof("new stream client") - } - // Set func to handle the streaming - s.streamClient.SetProcessEntryFunc(s.handleReceivedDataStream) - return &s, nil } @@ -139,19 +123,8 @@ func (s *SequenceSender) Start(ctx context.Context) { // Start ethtxmanager client go s.ethTxManager.Start() - // Get current nonce - var err error - s.nonceMutex.Lock() - s.currentNonce, err = s.etherman.CurrentNonce(ctx, s.cfg.L2Coinbase) - if err != nil { - s.logger.Fatalf("failed to get current nonce from %v, error: %v", s.cfg.L2Coinbase, err) - } else { - s.logger.Infof("current nonce for %v is %d", s.cfg.L2Coinbase, s.currentNonce) - } - s.nonceMutex.Unlock() - // Get latest virtual state batch from L1 - err = s.updateLatestVirtualBatch() + err := s.getLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } @@ -162,39 +135,87 @@ func (s *SequenceSender) Start(ctx context.Context) { s.logger.Fatalf("failed to sync monitored tx results, error: %v", err) } - // Start datastream client - err = s.streamClient.Start() - if err != nil { - s.logger.Fatalf("failed to start stream client, error: %v", err) - } + // Current batch to sequence + atomic.StoreUint64(&s.latestSentToL1Batch, atomic.LoadUint64(&s.latestVirtualBatchNumber)) - // Set starting point of the streaming - s.fromStreamBatch = s.latestVirtualBatch + // Start retrieving batches from RPC + go func() { + err := s.batchRetrieval(ctx) + if err != nil { + s.logFatalf("error retrieving batches from RPC: %v", err) + } + }() - bookmark := &datastream.BookMark{ - Type: datastream.BookmarkType_BOOKMARK_TYPE_BATCH, - Value: s.fromStreamBatch, - } + // Start sequence sending + go s.sequenceSending(ctx) +} - marshalledBookmark, err := proto.Marshal(bookmark) - if err != nil { - s.logger.Fatalf("failed to marshal bookmark, error: %v", err) - } +// batchRetrieval keeps reading batches from the RPC +func (s *SequenceSender) batchRetrieval(ctx context.Context) error { + ticker := time.NewTicker(s.cfg.GetBatchWaitInterval.Duration) + defer ticker.Stop() - s.logger.Infof("stream client from bookmark %v", bookmark) + currentBatchNumber := atomic.LoadUint64(&s.latestVirtualBatchNumber) + 1 + for { + select { + case <-ctx.Done(): + s.logger.Info("context cancelled, stopping batch retrieval") + return ctx.Err() + default: + // Try to retrieve batch from RPC + rpcBatch, err := getBatchFromRPC(s.cfg.RPCURL, currentBatchNumber) + if err != nil { + if errors.Is(err, ethtxmanager.ErrNotFound) { + s.logger.Infof("batch %d not found in RPC", currentBatchNumber) + } else { + s.logger.Errorf("error getting batch %d from RPC: %v", currentBatchNumber, err) + } + <-ticker.C + continue + } - // Current batch to sequence - s.wipBatch = s.latestVirtualBatch + 1 - s.latestSentToL1Batch = s.latestVirtualBatch + // Check if the batch is closed + if !rpcBatch.IsClosed() { + s.logger.Infof("batch %d is not closed yet", currentBatchNumber) + <-ticker.C + continue + } - // Start sequence sending - go s.sequenceSending(ctx) + // Process and decode the batch + if err := s.populateSequenceData(rpcBatch, currentBatchNumber); err != nil { + return err + } + + // Increment the batch number for the next iteration + currentBatchNumber++ + } + } +} + +func (s *SequenceSender) populateSequenceData(rpcBatch *rpcbatch.RPCBatch, batchNumber uint64) error { + s.mutexSequence.Lock() + defer s.mutexSequence.Unlock() + + s.sequenceList = append(s.sequenceList, batchNumber) - // Start receiving the streaming - err = s.streamClient.ExecCommandStartBookmark(marshalledBookmark) + // Decode batch to retrieve the l1 info tree index + batchRaw, err := state.DecodeBatchV2(rpcBatch.L2Data()) if err != nil { - s.logger.Fatalf("failed to connect to the streaming: %v", err) + s.logger.Errorf("Failed to decode batch data for batch %d, err: %v", batchNumber, err) + return err + } + + if len(batchRaw.Blocks) > 0 { + rpcBatch.SetL1InfoTreeIndex(batchRaw.Blocks[len(batchRaw.Blocks)-1].IndexL1InfoTree) } + + s.sequenceData[batchNumber] = &sequenceData{ + batchClosed: rpcBatch.IsClosed(), + batch: rpcBatch, + batchRaw: batchRaw, + } + + return nil } // sequenceSending starts loop to check if there are sequences to send and sends them if it's convenient @@ -208,17 +229,18 @@ func (s *SequenceSender) sequenceSending(ctx context.Context) { // purgeSequences purges batches from memory structures func (s *SequenceSender) purgeSequences() { // If sequence sending is stopped, do not purge - if s.seqSendingStopped { + if atomic.LoadUint32(&s.seqSendingStopped) == 1 { return } // Purge the information of batches that are already virtualized s.mutexSequence.Lock() + defer s.mutexSequence.Unlock() truncateUntil := 0 toPurge := make([]uint64, 0) for i := 0; i < len(s.sequenceList); i++ { batchNumber := s.sequenceList[i] - if batchNumber <= s.latestVirtualBatch { + if batchNumber <= atomic.LoadUint64(&s.latestVirtualBatchNumber) { truncateUntil = i + 1 toPurge = append(toPurge, batchNumber) } @@ -240,221 +262,13 @@ func (s *SequenceSender) purgeSequences() { } s.logger.Infof("batches purged count: %d, fromBatch: %d, toBatch: %d", len(toPurge), firstPurged, lastPurged) } - s.mutexSequence.Unlock() -} - -// purgeEthTx purges transactions from memory structures -func (s *SequenceSender) purgeEthTx(ctx context.Context) { - // If sequence sending is stopped, do not purge - if s.seqSendingStopped { - return - } - - // Purge old transactions that are finalized - s.mutexEthTx.Lock() - timePurge := time.Now().Add(-s.cfg.WaitPeriodPurgeTxFile.Duration) - toPurge := make([]common.Hash, 0) - for hash, data := range s.ethTransactions { - if !data.StatusTimestamp.Before(timePurge) { - continue - } - - if !data.OnMonitor || data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { - toPurge = append(toPurge, hash) - - // Remove from tx monitor - if data.OnMonitor { - err := s.ethTxManager.Remove(ctx, hash) - if err != nil { - s.logger.Warnf("error removing monitor tx %v from ethtxmanager: %v", hash, err) - } else { - s.logger.Infof("removed monitor tx %v from ethtxmanager", hash) - } - } - } - } - - if len(toPurge) > 0 { - var firstPurged uint64 = math.MaxUint64 - var lastPurged uint64 - for i := 0; i < len(toPurge); i++ { - if s.ethTransactions[toPurge[i]].Nonce < firstPurged { - firstPurged = s.ethTransactions[toPurge[i]].Nonce - } - if s.ethTransactions[toPurge[i]].Nonce > lastPurged { - lastPurged = s.ethTransactions[toPurge[i]].Nonce - } - delete(s.ethTransactions, toPurge[i]) - delete(s.ethTxData, toPurge[i]) - } - s.logger.Infof("txs purged count: %d, fromNonce: %d, toNonce: %d", len(toPurge), firstPurged, lastPurged) - } - s.mutexEthTx.Unlock() -} - -// syncEthTxResults syncs results from L1 for transactions in the memory structure -func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam - s.mutexEthTx.Lock() - var txPending uint64 - var txSync uint64 - for hash, data := range s.ethTransactions { - if data.Status == ethtxmanager.MonitoredTxStatusFinalized.String() { - continue - } - - _ = s.getResultAndUpdateEthTx(ctx, hash) - txSync++ - txStatus := s.ethTransactions[hash].Status - // Count if it is not in a final state - if s.ethTransactions[hash].OnMonitor && - txStatus != ethtxmanager.MonitoredTxStatusFailed.String() && - txStatus != ethtxmanager.MonitoredTxStatusSafe.String() && - txStatus != ethtxmanager.MonitoredTxStatusFinalized.String() { - txPending++ - } - } - s.mutexEthTx.Unlock() - - // Save updated sequences transactions - err := s.saveSentSequencesTransactions(ctx) - if err != nil { - s.logger.Errorf("error saving tx sequence, error: %v", err) - } - - s.logger.Infof("%d tx results synchronized (%d in pending state)", txSync, txPending) - return txPending, nil -} - -// syncAllEthTxResults syncs all tx results from L1 -func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { - // Get all results - results, err := s.ethTxManager.ResultsByStatus(ctx, nil) - if err != nil { - s.logger.Warnf("error getting results for all tx: %v", err) - return err - } - - // Check and update tx status - numResults := len(results) - s.mutexEthTx.Lock() - for _, result := range results { - txSequence, exists := s.ethTransactions[result.ID] - if !exists { - s.logger.Infof("transaction %v missing in memory structure. Adding it", result.ID) - // No info: from/to batch and the sent timestamp - s.ethTransactions[result.ID] = ðTxData{ - SentL1Timestamp: time.Time{}, - StatusTimestamp: time.Now(), - OnMonitor: true, - Status: "*missing", - } - txSequence = s.ethTransactions[result.ID] - } - - s.updateEthTxResult(txSequence, result) - } - s.mutexEthTx.Unlock() - - // Save updated sequences transactions - err = s.saveSentSequencesTransactions(ctx) - if err != nil { - s.logger.Errorf("error saving tx sequence, error: %v", err) - } - - s.logger.Infof("%d tx results synchronized", numResults) - return nil -} - -// copyTxData copies tx data in the internal structure -func (s *SequenceSender) copyTxData( - txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult, -) { - s.ethTxData[txHash] = make([]byte, len(txData)) - copy(s.ethTxData[txHash], txData) - - s.ethTransactions[txHash].Txs = make(map[common.Hash]ethTxAdditionalData, 0) - for hash, result := range txsResults { - var gasPrice *big.Int - if result.Tx != nil { - gasPrice = result.Tx.GasPrice() - } - - add := ethTxAdditionalData{ - GasPrice: gasPrice, - RevertMessage: result.RevertMessage, - } - s.ethTransactions[txHash].Txs[hash] = add - } -} - -// updateEthTxResult handles updating transaction state -func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmanager.MonitoredTxResult) { - if txData.Status != txResult.Status.String() { - s.logger.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) - txData.StatusTimestamp = time.Now() - stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + ", " + - txData.Status + ", " + txResult.Status.String() - - txData.Status = txResult.Status.String() - txData.StateHistory = append(txData.StateHistory, stTrans) - - // Manage according to the state - statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || - txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String() - - if txData.Status == ethtxmanager.MonitoredTxStatusFailed.String() { - s.logFatalf("transaction %v result failed!") - } else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatch { - s.latestVirtualTime = txData.StatusTimestamp - } - } - - // Update info received from L1 - txData.Nonce = txResult.Nonce - if txResult.To != nil { - txData.To = *txResult.To - } - if txResult.MinedAtBlockNumber != nil { - txData.MinedAtBlock = *txResult.MinedAtBlockNumber - } - s.copyTxData(txResult.ID, txResult.Data, txResult.Txs) -} - -// getResultAndUpdateEthTx updates the tx status from the ethTxManager -func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash common.Hash) error { - txData, exists := s.ethTransactions[txHash] - if !exists { - s.logger.Errorf("transaction %v not found in memory", txHash) - return errors.New("transaction not found in memory structure") - } - - txResult, err := s.ethTxManager.Result(ctx, txHash) - switch { - case errors.Is(err, ethtxmanager.ErrNotFound): - s.logger.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) - txData.OnMonitor = false - // Resend tx - errSend := s.sendTx(ctx, true, &txHash, nil, 0, 0, nil, txData.Gas) - if errSend == nil { - txData.OnMonitor = false - } - - case err != nil: - s.logger.Errorf("error getting result for tx %v: %v", txHash, err) - return err - - default: - s.updateEthTxResult(txData, txResult) - } - - return nil } // tryToSendSequence checks if there is a sequence and it's worth it to send to L1 func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Update latest virtual batch s.logger.Infof("updating virtual batch") - err := s.updateLatestVirtualBatch() + err := s.getLatestVirtualBatch() if err != nil { return } @@ -467,7 +281,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Check if the sequence sending is stopped - if s.seqSendingStopped { + if atomic.LoadUint32(&s.seqSendingStopped) == 1 { s.logger.Warnf("sending is stopped!") return } @@ -489,13 +303,12 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Send sequences to L1 - firstSequence := sequence.FirstBatch() - lastSequence := sequence.LastBatch() - lastL2BlockTimestamp := lastSequence.LastL2BLockTimestamp() + firstBatch := sequence.FirstBatch() + lastBatch := sequence.LastBatch() + lastL2BlockTimestamp := lastBatch.LastL2BLockTimestamp() s.logger.Debugf(sequence.String()) - s.logger.Infof("sending sequences to L1. From batch %d to batch %d", - firstSequence.BatchNumber(), lastSequence.BatchNumber()) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) // Wait until last L1 block timestamp is L1BlockTimestampMargin seconds above the timestamp // of the last L2 block in the sequence @@ -508,13 +321,13 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { return } - elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) + elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) if !elapsed { s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, - lastSequence.BatchNumber(), lastL2BlockTimestamp, timeMargin, + lastBatch.BatchNumber(), lastL2BlockTimestamp, timeMargin, ) time.Sleep(time.Duration(waitTime) * time.Second) } else { @@ -522,7 +335,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { "in the sequence is greater than %d seconds", lastL1BlockHeader.Number, lastL1BlockHeader.Time, - lastSequence.BatchNumber, + lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin, ) @@ -535,28 +348,25 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { for { currentTime := uint64(time.Now().Unix()) - elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, currentTime, timeMargin) + elapsed, waitTime := marginTimeElapsed(lastL2BlockTimestamp, currentTime, timeMargin) // Wait if the time difference is less than L1BlockTimestampMargin if !elapsed { s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+ "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", - waitTime, currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin) + waitTime, currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) time.Sleep(time.Duration(waitTime) * time.Second) } else { s.logger.Infof("sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+ "in the sequence is also greater than %d seconds", - currentTime, lastSequence.BatchNumber, lastL2BlockTimestamp, timeMargin) + currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) break } } // Send sequences to L1 s.logger.Debugf(sequence.String()) - s.logger.Infof( - "sending sequences to L1. From batch %d to batch %d", - firstSequence.BatchNumber(), lastSequence.BatchNumber(), - ) + s.logger.Infof("sending sequences to L1. From batch %d to batch %d", firstBatch.BatchNumber(), lastBatch.BatchNumber()) tx, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { @@ -565,12 +375,12 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Get latest virtual state batch from L1 - err = s.updateLatestVirtualBatch() + err = s.getLatestVirtualBatch() if err != nil { s.logger.Fatalf("error getting latest sequenced batch, error: %v", err) } - sequence.SetLastVirtualBatchNumber(s.latestVirtualBatch) + sequence.SetLastVirtualBatchNumber(atomic.LoadUint64(&s.latestVirtualBatchNumber)) txToEstimateGas, err := s.TxBuilder.BuildSequenceBatchesTx(ctx, sequence) if err != nil { @@ -585,7 +395,7 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { } // Add sequence tx - err = s.sendTx(ctx, false, nil, tx.To(), firstSequence.BatchNumber(), lastSequence.BatchNumber(), tx.Data(), gas) + err = s.sendTx(ctx, false, nil, tx.To(), firstBatch.BatchNumber(), lastBatch.BatchNumber(), tx.Data(), gas) if err != nil { return } @@ -594,86 +404,6 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { s.purgeSequences() } -// sendTx adds transaction to the ethTxManager to send it to L1 -func (s *SequenceSender) sendTx( - ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, - fromBatch uint64, toBatch uint64, data []byte, gas uint64, -) error { - // Params if new tx to send or resend a previous tx - var paramTo *common.Address - var paramNonce *uint64 - var paramData []byte - var valueFromBatch uint64 - var valueToBatch uint64 - var valueToAddress common.Address - - if !resend { - s.nonceMutex.Lock() - nonce := s.currentNonce - s.currentNonce++ - s.nonceMutex.Unlock() - paramNonce = &nonce - paramTo = to - paramData = data - valueFromBatch = fromBatch - valueToBatch = toBatch - } else { - if txOldHash == nil { - s.logger.Errorf("trying to resend a tx with nil hash") - return errors.New("resend tx with nil hash monitor id") - } - paramTo = &s.ethTransactions[*txOldHash].To - paramNonce = &s.ethTransactions[*txOldHash].Nonce - paramData = s.ethTxData[*txOldHash] - valueFromBatch = s.ethTransactions[*txOldHash].FromBatch - valueToBatch = s.ethTransactions[*txOldHash].ToBatch - } - if paramTo != nil { - valueToAddress = *paramTo - } - - // Add sequence tx - txHash, err := s.ethTxManager.AddWithGas(ctx, paramTo, paramNonce, big.NewInt(0), paramData, s.cfg.GasOffset, nil, gas) - if err != nil { - s.logger.Errorf("error adding sequence to ethtxmanager: %v", err) - return err - } - - // Add new eth tx - txData := ethTxData{ - SentL1Timestamp: time.Now(), - StatusTimestamp: time.Now(), - Status: "*new", - FromBatch: valueFromBatch, - ToBatch: valueToBatch, - OnMonitor: true, - To: valueToAddress, - Gas: gas, - } - - // Add tx to internal structure - s.mutexEthTx.Lock() - s.ethTransactions[txHash] = &txData - txResults := make(map[common.Hash]ethtxmanager.TxResult, 0) - s.copyTxData(txHash, paramData, txResults) - _ = s.getResultAndUpdateEthTx(ctx, txHash) - if !resend { - s.latestSentToL1Batch = valueToBatch - } else { - s.ethTransactions[*txOldHash].Status = "*resent" - } - s.mutexEthTx.Unlock() - - // Save sent sequences - err = s.saveSentSequencesTransactions(ctx) - if err != nil { - s.logger.Errorf("error saving tx sequence sent, error: %v", err) - } - return nil -} - -// getSequencesToSend generates sequences to be sent to L1. -// Empty array means there are no sequences to send or it's not worth sending func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes.Sequence, error) { // Add sequences until too big for a single L1 tx or last batch is reached s.mutexSequence.Lock() @@ -682,7 +412,8 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes sequenceBatches := make([]seqsendertypes.Batch, 0) for i := 0; i < len(s.sequenceList); i++ { batchNumber := s.sequenceList[i] - if batchNumber <= s.latestVirtualBatch || batchNumber <= s.latestSentToL1Batch { + if batchNumber <= atomic.LoadUint64(&s.latestVirtualBatchNumber) || + batchNumber <= atomic.LoadUint64(&s.latestSentToL1Batch) { continue } @@ -695,12 +426,6 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes ) } - // Check if batch is closed - if !s.sequenceData[batchNumber].batchClosed { - // Reached current wip batch - break - } - // New potential batch to add to the sequence batch := s.sequenceData[batchNumber].batch.DeepCopy() @@ -751,525 +476,40 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) (seqsendertypes return nil, nil } -// loadSentSequencesTransactions loads the file into the memory structure -func (s *SequenceSender) loadSentSequencesTransactions() error { - // Check if file exists - if _, err := os.Stat(s.cfg.SequencesTxFileName); os.IsNotExist(err) { - s.logger.Infof("file not found %s: %v", s.cfg.SequencesTxFileName, err) - return nil - } else if err != nil { - s.logger.Errorf("error opening file %s: %v", s.cfg.SequencesTxFileName, err) - return err - } - - // Read file - data, err := os.ReadFile(s.cfg.SequencesTxFileName) - if err != nil { - s.logger.Errorf("error reading file %s: %v", s.cfg.SequencesTxFileName, err) - return err - } - - // Restore memory structure - s.mutexEthTx.Lock() - err = json.Unmarshal(data, &s.ethTransactions) - s.mutexEthTx.Unlock() - if err != nil { - s.logger.Errorf("error decoding data from %s: %v", s.cfg.SequencesTxFileName, err) - return err - } - - return nil -} +// getLatestVirtualBatch queries the value in L1 and updates the latest virtual batch field +func (s *SequenceSender) getLatestVirtualBatch() error { + s.latestVirtualBatchLock.Lock() + defer s.latestVirtualBatchLock.Unlock() -// saveSentSequencesTransactions saves memory structure into persistent file -func (s *SequenceSender) saveSentSequencesTransactions(ctx context.Context) error { + // Get latest virtual state batch from L1 var err error - // Purge tx - s.purgeEthTx(ctx) - - // Ceate file - fileName := s.cfg.SequencesTxFileName[0:strings.IndexRune(s.cfg.SequencesTxFileName, '.')] + ".tmp" - s.sequencesTxFile, err = os.Create(fileName) - if err != nil { - s.logger.Errorf("error creating file %s: %v", fileName, err) - return err - } - defer s.sequencesTxFile.Close() - - // Write data JSON encoded - encoder := json.NewEncoder(s.sequencesTxFile) - encoder.SetIndent("", " ") - s.mutexEthTx.Lock() - err = encoder.Encode(s.ethTransactions) - s.mutexEthTx.Unlock() - if err != nil { - s.logger.Errorf("error writing file %s: %v", fileName, err) - return err - } - - // Rename the new file - err = os.Rename(fileName, s.cfg.SequencesTxFileName) + latestVirtualBatchNumber, err := s.etherman.GetLatestBatchNumber() if err != nil { - s.logger.Errorf("error renaming file %s to %s: %v", fileName, s.cfg.SequencesTxFileName, err) - return err - } - - return nil -} - -func (s *SequenceSender) entryTypeToString(entryType datastream.EntryType) string { - switch entryType { - case datastream.EntryType_ENTRY_TYPE_BATCH_START: - return "BatchStart" - case datastream.EntryType_ENTRY_TYPE_L2_BLOCK: - return "L2Block" - case datastream.EntryType_ENTRY_TYPE_TRANSACTION: - return "Transaction" - case datastream.EntryType_ENTRY_TYPE_BATCH_END: - return "BatchEnd" - default: - return fmt.Sprintf("%d", entryType) - } -} - -// handleReceivedDataStream manages the events received by the streaming -func (s *SequenceSender) handleReceivedDataStream( - entry *datastreamer.FileEntry, client *datastreamer.StreamClient, server *datastreamer.StreamServer, -) error { - dsType := datastream.EntryType(entry.Type) - - var prevEntryType datastream.EntryType - if s.prevStreamEntry != nil { - prevEntryType = datastream.EntryType(s.prevStreamEntry.Type) - } - - switch dsType { - case datastream.EntryType_ENTRY_TYPE_L2_BLOCK: - // Handle stream entry: L2Block - l2Block := &datastream.L2Block{} - - err := proto.Unmarshal(entry.Data, l2Block) - if err != nil { - s.logger.Errorf("error unmarshalling L2Block: %v", err) - return err - } - - s.logger.Infof("received L2Block entry, l2Block.Number: %d, l2Block.BatchNumber: %d, entry.Number: %d", - l2Block.Number, l2Block.BatchNumber, entry.Number, - ) - - // Sanity checks - if s.prevStreamEntry != nil && - !(prevEntryType == datastream.EntryType_ENTRY_TYPE_BATCH_START || - prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || - prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - s.logger.Fatalf("unexpected L2Block entry received, entry.Number: %d, l2Block.Number: %d, "+ - "prevEntry: %s, prevEntry.Number: %d", - entry.Number, - l2Block.Number, - s.entryTypeToString(prevEntryType), - s.prevStreamEntry.Number, - ) - } else if prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK { - prevL2Block := &datastream.L2Block{} - - err := proto.Unmarshal(s.prevStreamEntry.Data, prevL2Block) - if err != nil { - s.logger.Errorf("error unmarshalling prevL2Block: %v", err) - return err - } - if l2Block.Number != prevL2Block.Number+1 { - s.logger.Fatalf("unexpected L2Block number %d received, it should be %d, entry.Number: %d, prevEntry.Number: %d", - l2Block.Number, prevL2Block.Number+1, entry.Number, s.prevStreamEntry.Number) - } - } - - switch { - case l2Block.BatchNumber <= s.fromStreamBatch: - // Already virtualized - if l2Block.BatchNumber != s.latestStreamBatch { - s.logger.Infof("skipped! batch already virtualized, number %d", l2Block.BatchNumber) - } - - case !s.validStream && l2Block.BatchNumber == s.fromStreamBatch+1: - // Initial case after startup - s.addNewSequenceBatch(l2Block) - s.validStream = true - - case l2Block.BatchNumber > s.wipBatch: - // Handle whether it's only a new block or also a new batch - // Create new sequential batch - s.addNewSequenceBatch(l2Block) - } - - // Latest stream batch - s.latestStreamBatch = l2Block.BatchNumber - if !s.validStream { - return nil - } - - // Add L2 block - s.addNewBatchL2Block(l2Block) - - s.prevStreamEntry = entry - - case datastream.EntryType_ENTRY_TYPE_TRANSACTION: - // Handle stream entry: Transaction - if !s.validStream { - return nil - } - - l2Tx := &datastream.Transaction{} - err := proto.Unmarshal(entry.Data, l2Tx) - if err != nil { - s.logger.Errorf("error unmarshalling Transaction: %v", err) - return err - } - - s.logger.Debugf( - "received Transaction entry, tx.L2BlockNumber: %d, tx.Index: %d, entry.Number: %d", - l2Tx.L2BlockNumber, l2Tx.Index, entry.Number, - ) - - // Sanity checks - if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || - prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - s.logger.Fatalf("unexpected Transaction entry received, entry.Number: %d, transaction.L2BlockNumber: %d, "+ - "transaction.Index: %d, prevEntry: %s, prevEntry.Number: %d", - entry.Number, l2Tx.L2BlockNumber, l2Tx.Index, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number) - } - - // Sanity check: tx should be decodable - _, err = state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) - if err != nil { - s.logger.Fatalf("error decoding tx during sanity check: %v", err) - } - - // Add tx data - s.addNewBlockTx(l2Tx) - - s.prevStreamEntry = entry - - case datastream.EntryType_ENTRY_TYPE_BATCH_START: - // Handle stream entry: BatchStart - if !s.validStream { - return nil - } - - batch := &datastream.BatchStart{} - err := proto.Unmarshal(entry.Data, batch) - if err != nil { - s.logger.Errorf("error unmarshalling BatchStart: %v", err) - return err - } - - s.logger.Infof("received BatchStart entry, batchStart.Number: %d, entry.Number: %d", batch.Number, entry.Number) - - // Add batch start data - s.addInfoSequenceBatchStart(batch) - - s.prevStreamEntry = entry - - case datastream.EntryType_ENTRY_TYPE_BATCH_END: - // Handle stream entry: BatchEnd - if !s.validStream { - return nil - } - - batch := &datastream.BatchEnd{} - err := proto.Unmarshal(entry.Data, batch) - if err != nil { - s.logger.Errorf("error unmarshalling BatchEnd: %v", err) - return err - } - - s.logger.Infof("received BatchEnd entry, batchEnd.Number: %d, entry.Number: %d", batch.Number, entry.Number) - - // Sanity checks - if !(prevEntryType == datastream.EntryType_ENTRY_TYPE_L2_BLOCK || - prevEntryType == datastream.EntryType_ENTRY_TYPE_TRANSACTION) { - s.logger.Fatalf( - "unexpected BatchEnd entry received, entry.Number: %d, batchEnd.Number: %d, "+ - "prevEntry.Type: %s, prevEntry.Number: %d", - entry.Number, batch.Number, s.entryTypeToString(prevEntryType), s.prevStreamEntry.Number) - } - - // Add batch end data - s.addInfoSequenceBatchEnd(batch) - - // Close current wip batch - err = s.closeSequenceBatch() - if err != nil { - s.logger.Fatalf("error closing wip batch") - return err - } - - s.prevStreamEntry = entry - } - - return nil -} - -// closeSequenceBatch closes the current batch -func (s *SequenceSender) closeSequenceBatch() error { - s.mutexSequence.Lock() - defer s.mutexSequence.Unlock() - - s.logger.Infof("closing batch %d", s.wipBatch) - - data := s.sequenceData[s.wipBatch] - if data != nil { - data.batchClosed = true - - batchL2Data, err := state.EncodeBatchV2(data.batchRaw) - if err != nil { - s.logger.Errorf("error closing and encoding the batch %d: %v", s.wipBatch, err) - return err - } - - data.batch.SetL2Data(batchL2Data) - } else { - s.logger.Fatalf("wipBatch %d not found in sequenceData slice", s.wipBatch) + s.logger.Errorf("error getting latest virtual batch, error: %v", err) + return errors.New("fail to get latest virtual batch") } - // Sanity Check - if s.cfg.SanityCheckRPCURL != "" { - rpcNumberOfBlocks, batchL2Data, err := s.getBatchFromRPC(s.wipBatch) - if err != nil { - s.logger.Fatalf("error getting batch number from RPC while trying to perform sanity check: %v", err) - } else { - dsNumberOfBlocks := len(s.sequenceData[s.wipBatch].batchRaw.Blocks) - if rpcNumberOfBlocks != dsNumberOfBlocks { - s.logger.Fatalf( - "number of blocks in batch %d (%d) does not match the number of blocks in the batch from the RPC (%d)", - s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks, - ) - } - - if data.batchType == datastream.BatchType_BATCH_TYPE_REGULAR && - common.Bytes2Hex(data.batch.L2Data()) != batchL2Data { - s.logger.Infof("datastream batchL2Data: %s", common.Bytes2Hex(data.batch.L2Data())) - s.logger.Infof("RPC batchL2Data: %s", batchL2Data) - s.logger.Fatalf("batchL2Data in batch %d does not match batchL2Data from the RPC (%d)", s.wipBatch) - } + atomic.StoreUint64(&s.latestVirtualBatchNumber, latestVirtualBatchNumber) - s.logger.Infof("sanity check of batch %d against RPC successful", s.wipBatch) - } - } else { - s.logger.Warnf("config param SanityCheckRPCURL not set, sanity check with RPC can't be done") - } + s.logger.Infof("latest virtual batch is %d", latestVirtualBatchNumber) return nil } -func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (int, string, error) { - type zkEVMBatch struct { - Blocks []string `mapstructure:"blocks"` - BatchL2Data string `mapstructure:"batchL2Data"` - } - - zkEVMBatchData := zkEVMBatch{} - - response, err := rpc.JSONRPCCall(s.cfg.SanityCheckRPCURL, "zkevm_getBatchByNumber", batchNumber) - if err != nil { - return 0, "", err - } - - // Check if the response is an error - if response.Error != nil { - return 0, "", fmt.Errorf("error in the response calling zkevm_getBatchByNumber: %v", response.Error) - } - - // Get the batch number from the response hex string - err = json.Unmarshal(response.Result, &zkEVMBatchData) - if err != nil { - return 0, "", fmt.Errorf( - "error unmarshalling the batch number from the response calling zkevm_getBatchByNumber: %w", - err, - ) - } - - return len(zkEVMBatchData.Blocks), zkEVMBatchData.BatchL2Data, nil -} - -// addNewSequenceBatch adds a new batch to the sequence -func (s *SequenceSender) addNewSequenceBatch(l2Block *datastream.L2Block) { - s.mutexSequence.Lock() - s.logger.Infof("...new batch, number %d", l2Block.BatchNumber) - - if l2Block.BatchNumber > s.wipBatch+1 { - s.logFatalf("new batch number (%d) is not consecutive to the current one (%d)", l2Block.BatchNumber, s.wipBatch) - } else if l2Block.BatchNumber < s.wipBatch { - s.logFatalf("new batch number (%d) is lower than the current one (%d)", l2Block.BatchNumber, s.wipBatch) - } - - batch := s.TxBuilder.NewBatchFromL2Block(l2Block) - - // Add to the list - s.sequenceList = append(s.sequenceList, l2Block.BatchNumber) - - // Create initial data - batchRaw := state.BatchRawV2{} - data := sequenceData{ - batchClosed: false, - batch: batch, - batchRaw: &batchRaw, - } - s.sequenceData[l2Block.BatchNumber] = &data - - // Update wip batch - s.wipBatch = l2Block.BatchNumber - s.mutexSequence.Unlock() -} - -// addInfoSequenceBatchStart adds info from the batch start -func (s *SequenceSender) addInfoSequenceBatchStart(batch *datastream.BatchStart) { - s.mutexSequence.Lock() - s.logger.Infof( - "batch %d (%s) Start: type %d forkId %d chainId %d", - batch.Number, datastream.BatchType_name[int32(batch.Type)], batch.Type, batch.ForkId, batch.ChainId, - ) - - // Current batch - data := s.sequenceData[s.wipBatch] - if data != nil { - wipBatch := data.batch - if wipBatch.BatchNumber()+1 != batch.Number { - s.logFatalf( - "batch start number (%d) does not match the current consecutive one (%d)", - batch.Number, wipBatch.BatchNumber, - ) - } - data.batchType = batch.Type - } - - s.mutexSequence.Unlock() -} - -// addInfoSequenceBatchEnd adds info from the batch end -func (s *SequenceSender) addInfoSequenceBatchEnd(batch *datastream.BatchEnd) { - s.mutexSequence.Lock() - - // Current batch - data := s.sequenceData[s.wipBatch] - if data != nil { - wipBatch := data.batch - if wipBatch.BatchNumber() == batch.Number { - // wipBatch.StateRoot = common.BytesToHash(batch) TODO: check if this is needed - } else { - s.logFatalf("batch end number (%d) does not match the current one (%d)", batch.Number, wipBatch.BatchNumber) - } - } - - s.mutexSequence.Unlock() -} - -// addNewBatchL2Block adds a new L2 block to the work in progress batch -func (s *SequenceSender) addNewBatchL2Block(l2Block *datastream.L2Block) { - s.mutexSequence.Lock() - s.logger.Infof(".....new L2 block, number %d (batch %d)", l2Block.Number, l2Block.BatchNumber) - - // Current batch - data := s.sequenceData[s.wipBatch] - if data != nil { - wipBatchRaw := data.batchRaw - data.batch.SetLastL2BLockTimestamp(l2Block.Timestamp) - // Sanity check: should be the same coinbase within the batch - if common.BytesToAddress(l2Block.Coinbase) != data.batch.LastCoinbase() { - s.logFatalf( - "coinbase changed within the batch! (Previous %v, Current %v)", - data.batch.LastCoinbase, common.BytesToAddress(l2Block.Coinbase), - ) - } - data.batch.SetLastCoinbase(common.BytesToAddress(l2Block.Coinbase)) - data.batch.SetL1InfoTreeIndex(l2Block.L1InfotreeIndex) - // New L2 block raw - newBlockRaw := state.L2BlockRaw{} - - // Add L2 block - wipBatchRaw.Blocks = append(wipBatchRaw.Blocks, newBlockRaw) - // Get current L2 block - _, blockRaw := s.getWipL2Block() - if blockRaw == nil { - s.logger.Debugf("wip block %d not found!") - return - } - - // Fill in data - blockRaw.DeltaTimestamp = l2Block.DeltaTimestamp - blockRaw.IndexL1InfoTree = l2Block.L1InfotreeIndex - } - - s.mutexSequence.Unlock() -} - -// addNewBlockTx adds a new Tx to the current L2 block -func (s *SequenceSender) addNewBlockTx(l2Tx *datastream.Transaction) { - s.mutexSequence.Lock() - s.logger.Debugf("........new tx, length %d EGP %d SR %x..", - len(l2Tx.Encoded), l2Tx.EffectiveGasPricePercentage, l2Tx.ImStateRoot[:8], - ) - - // Current L2 block - _, blockRaw := s.getWipL2Block() - - // New Tx raw - tx, err := state.DecodeTx(common.Bytes2Hex(l2Tx.Encoded)) - if err != nil { - s.logger.Fatalf("error decoding tx: %v", err) - return - } - - l2TxRaw := state.L2TxRaw{ - EfficiencyPercentage: uint8(l2Tx.EffectiveGasPricePercentage), - TxAlreadyEncoded: false, - Tx: tx, - } - - // Add Tx - blockRaw.Transactions = append(blockRaw.Transactions, l2TxRaw) - s.mutexSequence.Unlock() -} - -// getWipL2Block returns index of the array and pointer to the current L2 block (helper func) -func (s *SequenceSender) getWipL2Block() (uint64, *state.L2BlockRaw) { //nolint:unparam - // Current batch - var wipBatchRaw *state.BatchRawV2 - if s.sequenceData[s.wipBatch] != nil { - wipBatchRaw = s.sequenceData[s.wipBatch].batchRaw - } - - // Current wip block - if len(wipBatchRaw.Blocks) > 0 { - blockIndex := uint64(len(wipBatchRaw.Blocks)) - 1 - return blockIndex, &wipBatchRaw.Blocks[blockIndex] - } else { - return 0, nil - } -} - -// updateLatestVirtualBatch queries the value in L1 and updates the latest virtual batch field -func (s *SequenceSender) updateLatestVirtualBatch() error { - s.latestVirtualBatchLock.Lock() - defer s.latestVirtualBatchLock.Unlock() - - // Get latest virtual state batch from L1 - var err error - - s.latestVirtualBatch, err = s.etherman.GetLatestBatchNumber() - if err != nil { - s.logger.Errorf("error getting latest virtual batch, error: %v", err) - return errors.New("fail to get latest virtual batch") - } else { - s.logger.Infof("latest virtual batch is %d", s.latestVirtualBatch) +// logFatalf logs error, activates flag to stop sequencing, and remains in an infinite loop +func (s *SequenceSender) logFatalf(template string, args ...interface{}) { + atomic.StoreUint32(&s.seqSendingStopped, 1) + for { + s.logger.Errorf(template, args...) + s.logger.Errorf("sequence sending stopped.") + time.Sleep(ten * time.Second) } - return nil } // marginTimeElapsed checks if the time between currentTime and l2BlockTimestamp is greater than timeMargin. // If it's greater returns true, otherwise it returns false and the waitTime needed to achieve this timeMargin -func (s *SequenceSender) marginTimeElapsed( +func marginTimeElapsed( l2BlockTimestamp uint64, currentTime uint64, timeMargin int64, ) (bool, int64) { // Check the time difference between L2 block and currentTime @@ -1290,60 +530,8 @@ func (s *SequenceSender) marginTimeElapsed( waitTime = timeMargin - timeDiff } return false, waitTime - } else { // timeDiff is greater than timeMargin - return true, 0 - } -} - -// logFatalf logs error, activates flag to stop sequencing, and remains in an infinite loop -func (s *SequenceSender) logFatalf(template string, args ...interface{}) { - s.seqSendingStopped = true - s.logger.Errorf(template, args...) - s.logger.Errorf("sequence sending stopped.") - for { - time.Sleep(1 * time.Second) - } -} - -// printBatch prints data from batch raw V2 -func printBatch(raw *state.BatchRawV2, showBlock bool, showTx bool) { - // Total amount of L2 tx in the batch - totalL2Txs := 0 - for k := 0; k < len(raw.Blocks); k++ { - totalL2Txs += len(raw.Blocks[k].Transactions) } - log.Debugf("// #blocks: %d, #L2txs: %d", len(raw.Blocks), totalL2Txs) - - // Blocks info - if showBlock { - numBlocks := len(raw.Blocks) - var firstBlock *state.L2BlockRaw - var lastBlock *state.L2BlockRaw - if numBlocks > 0 { - firstBlock = &raw.Blocks[0] - } - if numBlocks > 1 { - lastBlock = &raw.Blocks[numBlocks-1] - } - if firstBlock != nil { - log.Debugf("// block first (indL1info: %d, delta-timestamp: %d, #L2txs: %d)", - firstBlock.IndexL1InfoTree, firstBlock.DeltaTimestamp, len(firstBlock.Transactions), - ) - // Tx info - if showTx { - for iTx, tx := range firstBlock.Transactions { - v, r, s := tx.Tx.RawSignatureValues() - log.Debugf("// tx(%d) effPct: %d, encoded: %t, v: %v, r: %v, s: %v", - iTx, tx.EfficiencyPercentage, tx.TxAlreadyEncoded, v, r, s, - ) - } - } - } - if lastBlock != nil { - log.Debugf("// block last (indL1info: %d, delta-timestamp: %d, #L2txs: %d)", - lastBlock.DeltaTimestamp, lastBlock.DeltaTimestamp, len(lastBlock.Transactions), - ) - } - } + // timeDiff is greater than timeMargin + return true, 0 } diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go index c16fda42..432c5d39 100644 --- a/sequencesender/sequencesender_test.go +++ b/sequencesender/sequencesender_test.go @@ -1,12 +1,25 @@ package sequencesender import ( + "errors" + "math/big" + "os" "testing" + "time" + types2 "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/sequencesender/mocks" + "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "golang.org/x/net/context" ) const ( @@ -15,7 +28,15 @@ const ( txStreamEncoded3 = "b8b402f8b101268505d21dba0085076c363d8982dc60941929761e87667283f087ea9ab8370c174681b4e980b844095ea7b300000000000000000000000080a64c6d7f12c47b7c66c5b4e20e72bc1fcd5d9effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc001a0dd4db494969139a120e8721842455ec13f82757a4fc49b66d447c7d32d095a1da06ef54068a9aa67ecc4f52d885299a04feb6f3531cdfc771f1412cd3331d1ba4c" ) -func TestStreamTx(t *testing.T) { +var ( + now = time.Now() +) + +func TestMain(t *testing.M) { + t.Run() +} + +func Test_encoding(t *testing.T) { tx1, err := state.DecodeTx(txStreamEncoded1) require.NoError(t, err) tx2, err := state.DecodeTx(txStreamEncoded2) @@ -61,13 +82,515 @@ func TestStreamTx(t *testing.T) { }, } - printBatch(&batch, true, true) - encodedBatch, err := state.EncodeBatchV2(&batch) require.NoError(t, err) decodedBatch, err := state.DecodeBatchV2(encodedBatch) require.NoError(t, err) - printBatch(decodedBatch, true, true) + require.Equal(t, batch.String(), decodedBatch.String()) +} + +func Test_Start(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + getEtherman func(t *testing.T) *mocks.EthermanMock + batchWaitDuration types2.Duration + expectNonce uint64 + expectLastVirtualBatch uint64 + expectFromStreamBatch uint64 + expectWipBatch uint64 + expectLatestSentToL1Batch uint64 + }{ + { + name: "successfully started", + getEtherman: func(t *testing.T) *mocks.EthermanMock { + t.Helper() + + mngr := mocks.NewEthermanMock(t) + mngr.On("GetLatestBatchNumber").Return(uint64(1), nil) + return mngr + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("Start").Return(nil) + mngr.On("ResultsByStatus", mock.Anything, []ethtxtypes.MonitoredTxStatus(nil)).Return(nil, nil) + return mngr + }, + batchWaitDuration: types2.NewDuration(time.Millisecond), + expectNonce: 3, + expectLastVirtualBatch: 1, + expectFromStreamBatch: 1, + expectWipBatch: 2, + expectLatestSentToL1Batch: 1, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + s := SequenceSender{ + etherman: tt.getEtherman(t), + ethTxManager: tt.getEthTxManager(t), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + GetBatchWaitInterval: tt.batchWaitDuration, + }, + logger: log.GetDefaultLogger(), + } + + ctx, cancel := context.WithCancel(context.Background()) + s.Start(ctx) + time.Sleep(time.Second) + cancel() + time.Sleep(time.Second) + + require.Equal(t, tt.expectLatestSentToL1Batch, s.latestSentToL1Batch) + }) + } +} + +func Test_purgeSequences(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + seqSendingStopped uint32 + sequenceList []uint64 + sequenceData map[uint64]*sequenceData + latestVirtualBatchNumber uint64 + expectedSequenceList []uint64 + expectedSequenceData map[uint64]*sequenceData + }{ + { + name: "sequences purged when seqSendingStopped", + seqSendingStopped: 1, + sequenceList: []uint64{1, 2}, + sequenceData: map[uint64]*sequenceData{ + 1: {}, + 2: {}, + }, + expectedSequenceList: []uint64{1, 2}, + expectedSequenceData: map[uint64]*sequenceData{ + 1: {}, + 2: {}, + }, + }, + { + name: "no sequences purged", + seqSendingStopped: 0, + sequenceList: []uint64{4, 5}, + sequenceData: map[uint64]*sequenceData{ + 4: {}, + 5: {}, + }, + expectedSequenceList: []uint64{4, 5}, + expectedSequenceData: map[uint64]*sequenceData{ + 4: {}, + 5: {}, + }, + }, + { + name: "sequences purged", + seqSendingStopped: 0, + sequenceList: []uint64{4, 5, 6}, + sequenceData: map[uint64]*sequenceData{ + 4: {}, + 5: {}, + 6: {}, + }, + latestVirtualBatchNumber: 5, + expectedSequenceList: []uint64{6}, + expectedSequenceData: map[uint64]*sequenceData{ + 6: {}, + }, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ss := SequenceSender{ + seqSendingStopped: tt.seqSendingStopped, + sequenceList: tt.sequenceList, + sequenceData: tt.sequenceData, + latestVirtualBatchNumber: tt.latestVirtualBatchNumber, + logger: log.GetDefaultLogger(), + } + + ss.purgeSequences() + + require.Equal(t, tt.expectedSequenceList, ss.sequenceList) + require.Equal(t, tt.expectedSequenceData, ss.sequenceData) + }) + } +} + +func Test_tryToSendSequence(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + getEthTxManager func(t *testing.T) *mocks.EthTxManagerMock + getEtherman func(t *testing.T) *mocks.EthermanMock + getTxBuilder func(t *testing.T) *mocks.TxBuilderMock + maxPendingTxn uint64 + sequenceList []uint64 + latestSentToL1Batch uint64 + sequenceData map[uint64]*sequenceData + ethTransactions map[common.Hash]*ethTxData + ethTxData map[common.Hash][]byte + + expectErr error + }{ + { + name: "successfully sent", + getEtherman: func(t *testing.T) *mocks.EthermanMock { + t.Helper() + + mngr := mocks.NewEthermanMock(t) + mngr.On("GetLatestBatchNumber").Return(uint64(1), nil) + return mngr + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + return mngr + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{}), nil) + return mngr + }, + maxPendingTxn: 10, + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + }, + { + name: "successfully sent new sequence", + getEtherman: func(t *testing.T) *mocks.EthermanMock { + t.Helper() + + mngr := mocks.NewEthermanMock(t) + mngr.On("GetLatestBatchNumber").Return(uint64(1), nil) + mngr.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{ + Number: big.NewInt(1), + }, nil) + mngr.On("EstimateGas", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(uint64(100500), nil) + return mngr + }, + getEthTxManager: func(t *testing.T) *mocks.EthTxManagerMock { + t.Helper() + + mngr := mocks.NewEthTxManagerMock(t) + mngr.On("AddWithGas", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, mock.Anything, mock.Anything, uint64(100500)).Return(common.Hash{}, nil) + mngr.On("Result", mock.Anything, common.Hash{}).Return(ethtxtypes.MonitoredTxResult{ + ID: common.Hash{}, + Data: []byte{1, 2, 3}, + }, nil) + return mngr + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mngr.On("NewSequence", mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + mngr.On("BuildSequenceBatchesTx", mock.Anything, mock.Anything).Return(types.NewTx(&types.LegacyTx{}), nil) + return mngr + }, + maxPendingTxn: 10, + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + ethTransactions: map[common.Hash]*ethTxData{}, + ethTxData: map[common.Hash][]byte{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tmpFile, err := os.CreateTemp(os.TempDir(), tt.name+".tmp") + require.NoError(t, err) + defer os.RemoveAll(tmpFile.Name() + ".tmp") + + s := SequenceSender{ + ethTxManager: tt.getEthTxManager(t), + etherman: tt.getEtherman(t), + TxBuilder: tt.getTxBuilder(t), + cfg: Config{ + SequencesTxFileName: tmpFile.Name() + ".tmp", + MaxPendingTx: tt.maxPendingTxn, + }, + sequenceList: tt.sequenceList, + latestSentToL1Batch: tt.latestSentToL1Batch, + sequenceData: tt.sequenceData, + ethTransactions: tt.ethTransactions, + ethTxData: tt.ethTxData, + logger: log.GetDefaultLogger(), + } + + s.tryToSendSequence(context.Background()) + }) + } +} + +func Test_getSequencesToSend(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + sequenceList []uint64 + latestSentToL1Batch uint64 + forkUpgradeBatchNumber uint64 + sequenceData map[uint64]*sequenceData + getTxBuilder func(t *testing.T) *mocks.TxBuilderMock + expectedSequence seqsendertypes.Sequence + expectedErr error + }{ + { + name: "successfully get sequence", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + return mngr + }, + expectedSequence: txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), + expectedErr: nil, + }, + { + name: "different coinbase", + sequenceList: []uint64{2, 3}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + 3: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{ + LastCoinbase: common.HexToAddress("0x2"), + }), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mngr.On("NewSequence", mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + return mngr + }, + expectedSequence: txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), + expectedErr: nil, + }, + { + name: "NewSequenceIfWorthToSend return error", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("test error")) + return mngr + }, + expectedErr: errors.New("test error"), + }, + { + name: "fork upgrade", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + forkUpgradeBatchNumber: 2, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + mngr.On("NewSequenceIfWorthToSend", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, nil) + mngr.On("NewSequence", mock.Anything, mock.Anything, mock.Anything).Return(txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), nil) + return mngr + }, + expectedSequence: txbuilder.NewBananaSequence(etherman.SequenceBanana{ + Batches: []etherman.Batch{{ + BatchNumber: 2, + }}, + }), + expectedErr: nil, + }, + { + name: "fork upgrade passed", + sequenceList: []uint64{2}, + latestSentToL1Batch: 1, + forkUpgradeBatchNumber: 1, + sequenceData: map[uint64]*sequenceData{ + 2: { + batchClosed: true, + batch: txbuilder.NewBananaBatch(ðerman.Batch{}), + }, + }, + getTxBuilder: func(t *testing.T) *mocks.TxBuilderMock { + t.Helper() + + mngr := mocks.NewTxBuilderMock(t) + return mngr + }, + expectedErr: errors.New("aborting sequencing process as we reached the batch 2 where a new forkid is applied (upgrade)"), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ss := SequenceSender{ + sequenceList: tt.sequenceList, + latestSentToL1Batch: tt.latestSentToL1Batch, + cfg: Config{ + ForkUpgradeBatchNumber: tt.forkUpgradeBatchNumber, + }, + sequenceData: tt.sequenceData, + TxBuilder: tt.getTxBuilder(t), + logger: log.GetDefaultLogger(), + } + + sequence, err := ss.getSequencesToSend(context.Background()) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedSequence, sequence) + } + }) + } +} + +func Test_marginTimeElapsed(t *testing.T) { + t.Parallel() + + type args struct { + l2BlockTimestamp uint64 + currentTime uint64 + timeMargin int64 + } + tests := []struct { + name string + args args + want bool + want1 int64 + }{ + { + name: "time elapsed", + args: args{ + l2BlockTimestamp: 100, + currentTime: 200, + timeMargin: 50, + }, + want: true, + want1: 0, + }, + { + name: "time not elapsed", + args: args{ + l2BlockTimestamp: 100, + currentTime: 200, + timeMargin: 150, + }, + want: false, + want1: 50, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + got, got1 := marginTimeElapsed(tt.args.l2BlockTimestamp, tt.args.currentTime, tt.args.timeMargin) + require.Equal(t, tt.want, got, "marginTimeElapsed() got = %v, want %v", got, tt.want) + require.Equal(t, tt.want1, got1, "marginTimeElapsed() got1 = %v, want %v", got1, tt.want1) + }) + } } diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index 7b451ed8..2868bb4b 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -2,6 +2,7 @@ package txbuilder import ( "context" + "errors" "fmt" "math/big" @@ -27,6 +28,7 @@ type globalExitRootBananaContractor interface { type l1InfoSyncer interface { GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) + GetInitL1InfoRootMap(ctx context.Context) (*l1infotreesync.L1InfoTreeInitial, error) } type l1Client interface { @@ -74,39 +76,90 @@ func (t *TxBuilderBananaBase) NewBatchFromL2Block(l2Block *datastream.L2Block) s return NewBananaBatch(batch) } -func (t *TxBuilderBananaBase) NewSequence( - ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, -) (seqsendertypes.Sequence, error) { - ethBatches := toEthermanBatches(batches) - sequence := etherman.NewSequenceBanana(ethBatches, coinbase) - var greatestL1Index uint32 - for _, b := range sequence.Batches { - if greatestL1Index < b.L1InfoTreeIndex { - greatestL1Index = b.L1InfoTreeIndex +func getHighestL1InfoIndex(batches []etherman.Batch) uint32 { + var highestL1Index uint32 + for _, b := range batches { + if highestL1Index < b.L1InfoTreeIndex { + highestL1Index = b.L1InfoTreeIndex } } + return highestL1Index +} + +// Returns CounterL1InfoRoot to use for this batch +func (t *TxBuilderBananaBase) GetCounterL1InfoRoot(ctx context.Context, highestL1IndexInBatch uint32) (uint32, error) { header, err := t.ethClient.HeaderByNumber(ctx, t.blockFinality) if err != nil { - return nil, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err) + return 0, fmt.Errorf("error calling HeaderByNumber, with block finality %d: %w", t.blockFinality.Int64(), err) } + var resL1InfoCounter uint32 + info, err := t.l1InfoTree.GetLatestInfoUntilBlock(ctx, header.Number.Uint64()) + if err == nil { + resL1InfoCounter = info.L1InfoTreeIndex + 1 + } + if errors.Is(err, l1infotreesync.ErrNotFound) { + // There are no L1 Info tree leaves yet, so we can try to use L1InfoRootMap event + l1infotreeInitial, err := t.l1InfoTree.GetInitL1InfoRootMap(ctx) + if l1infotreeInitial == nil || err != nil { + return 0, fmt.Errorf("error no leaves on L1InfoTree yet and GetInitL1InfoRootMap fails: %w", err) + } + // We use this leaf as first one + resL1InfoCounter = l1infotreeInitial.LeafCount + } else if err != nil { + return 0, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err) + } + // special case: there are no leaves in L1InfoTree yet + if resL1InfoCounter == 0 && highestL1IndexInBatch == 0 { + log.Infof("No L1 Info tree leaves yet, batch use no leaf") + return resL1InfoCounter, nil + } + if resL1InfoCounter > highestL1IndexInBatch { + return resL1InfoCounter, nil + } + + return 0, fmt.Errorf( + "sequence contained an L1 Info tree index (%d) that is greater than the one synced with the desired finality (%d)", + highestL1IndexInBatch, resL1InfoCounter, + ) +} + +func (t *TxBuilderBananaBase) CheckL1InfoTreeLeafCounterVsInitL1InfoMap(ctx context.Context, leafCounter uint32) error { + l1infotreeInitial, err := t.l1InfoTree.GetInitL1InfoRootMap(ctx) if err != nil { - return nil, fmt.Errorf("error calling GetLatestInfoUntilBlock with block num %d: %w", header.Number.Uint64(), err) - } - if info.L1InfoTreeIndex >= greatestL1Index { - sequence.CounterL1InfoRoot = info.L1InfoTreeIndex + 1 - } else { - return nil, fmt.Errorf( - "sequence contained an L1 Info tree index (%d) that is greater than the one synced with the desired finality (%d)", - greatestL1Index, info.L1InfoTreeIndex, - ) + return fmt.Errorf("l1InfoTree.GetInitL1InfoRootMap fails: %w", err) + } + if l1infotreeInitial == nil { + log.Warnf("No InitL1InfoRootMap found, skipping check") + return nil + } + if leafCounter < l1infotreeInitial.LeafCount { + return fmt.Errorf("cant use this leafCounter because is previous to first value on contract Map"+ + "leafCounter(%d) < l1infotreeInitial.LeafCount(%d)", leafCounter, l1infotreeInitial.LeafCount) } + return nil +} +func (t *TxBuilderBananaBase) NewSequence( + ctx context.Context, batches []seqsendertypes.Batch, coinbase common.Address, +) (seqsendertypes.Sequence, error) { + ethBatches := toEthermanBatches(batches) + sequence := etherman.NewSequenceBanana(ethBatches, coinbase) + greatestL1Index := getHighestL1InfoIndex(sequence.Batches) + + counterL1InfoRoot, err := t.GetCounterL1InfoRoot(ctx, greatestL1Index) + if err != nil { + return nil, err + } + sequence.CounterL1InfoRoot = counterL1InfoRoot l1InfoRoot, err := t.getL1InfoRoot(sequence.CounterL1InfoRoot) if err != nil { return nil, err } - + err = t.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(ctx, sequence.CounterL1InfoRoot) + if err != nil { + return nil, err + } sequence.L1InfoRoot = l1InfoRoot accInputHash, err := t.rollupContract.LastAccInputHash(&bind.CallOpts{Pending: false}) @@ -134,10 +187,26 @@ func (t *TxBuilderBananaBase) NewSequence( sequence.OldAccInputHash = oldAccInputHash sequence.AccInputHash = accInputHash + + err = SequenceSanityCheck(sequence) + if err != nil { + return nil, fmt.Errorf("sequenceSanityCheck fails. Err: %w", err) + } res := NewBananaSequence(*sequence) return res, nil } +func SequenceSanityCheck(seq *etherman.SequenceBanana) error { + maxL1InfoIndex, err := calculateMaxL1InfoTreeIndexInsideSequence(seq) + if err != nil { + return err + } + if seq.CounterL1InfoRoot < maxL1InfoIndex+1 { + return fmt.Errorf("wrong CounterL1InfoRoot(%d): BatchL2Data (max=%d) ", seq.CounterL1InfoRoot, maxL1InfoIndex) + } + return nil +} + func (t *TxBuilderBananaBase) getL1InfoRoot(counterL1InfoRoot uint32) (common.Hash, error) { return t.globalExitRootContract.L1InfoRootMap(&bind.CallOpts{Pending: false}, counterL1InfoRoot) } diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index af4b05c0..44d7a7b1 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -2,14 +2,17 @@ package txbuilder_test import ( "context" + "fmt" "math/big" "testing" + "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" + "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/cdk/state/datastream" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -28,11 +31,19 @@ func TestBananaBaseNewSequenceEmpty(t *testing.T) { Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 69}, nil) lastAcc := common.HexToHash("0x8aca9664752dbae36135fd0956c956fc4a370feeac67485b49bcd4b99608ae41") testData.rollupContract.EXPECT().LastAccInputHash(mock.Anything).Return(lastAcc, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) require.NotNil(t, seq) require.NoError(t, err) - // TODO check values - // require.Equal(t, lastAcc, seq.LastAccInputHash()) +} + +func TestBananaBaseNewSequenceErrorHeaderByNumber(t *testing.T) { + testData := newBananaBaseTestData(t) + testData.l1Client.On("HeaderByNumber", mock.Anything, mock.Anything). + Return(nil, fmt.Errorf("error")) + seq, err := testData.sut.NewSequence(context.TODO(), nil, common.Address{}) + require.Nil(t, seq) + require.Error(t, err) } func TestBananaBaseNewBatchFromL2Block(t *testing.T) { @@ -64,6 +75,8 @@ func TestBananaBaseNewSequenceBatch(t *testing.T) { Coinbase: []byte{1, 2, 3}, GlobalExitRoot: []byte{4, 5, 6}, } + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil).Once() + batch := testData.sut.NewBatchFromL2Block(l2Block) batches := []seqsendertypes.Batch{batch} lastAcc := common.HexToHash("0x8aca9664752dbae36135fd0956c956fc4a370feeac67485b49bcd4b99608ae41") @@ -79,6 +92,76 @@ func TestBananaBaseNewSequenceBatch(t *testing.T) { // TODO: check that the seq have the right values } +func TestBananaSanityCheck(t *testing.T) { + batch := state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: 1, + ChangeL2BlockHeader: state.ChangeL2BlockHeader{ + DeltaTimestamp: 1, + IndexL1InfoTree: 1, + }, + }, + }, + } + data, err := state.EncodeBatchV2(&batch) + require.NoError(t, err) + require.NotNil(t, data) + seq := etherman.SequenceBanana{ + CounterL1InfoRoot: 2, + Batches: []etherman.Batch{ + { + L2Data: data, + }, + }, + } + err = txbuilder.SequenceSanityCheck(&seq) + require.NoError(t, err, "inside batchl2data max is 1 and counter is 2 (2>=1+1)") + seq.CounterL1InfoRoot = 1 + err = txbuilder.SequenceSanityCheck(&seq) + require.Error(t, err, "inside batchl2data max is 1 and counter is 1. The batchl2data is not included in counter") +} + +func TestBananaSanityCheckNilSeq(t *testing.T) { + err := txbuilder.SequenceSanityCheck(nil) + require.Error(t, err, "nil sequence") +} + +func TestBananaEmptyL1InfoTree(t *testing.T) { + testData := newBananaBaseTestData(t) + + testData.l1Client.On("HeaderByNumber", mock.Anything, mock.Anything). + Return(&types.Header{Number: big.NewInt(69)}, nil) + testData.l1InfoTreeSync.EXPECT().GetLatestInfoUntilBlock(testData.ctx, uint64(69)).Return(nil, l1infotreesync.ErrNotFound) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(testData.ctx).Return(&l1infotreesync.L1InfoTreeInitial{LeafCount: 10}, nil) + + leafCounter, err := testData.sut.GetCounterL1InfoRoot(testData.ctx, 0) + require.NoError(t, err) + require.Equal(t, uint32(10), leafCounter) +} + +func TestCheckL1InfoTreeLeafCounterVsInitL1InfoMap(t *testing.T) { + testData := newBananaBaseTestData(t) + + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(testData.ctx).Return(&l1infotreesync.L1InfoTreeInitial{LeafCount: 10}, nil) + err := testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 10) + require.NoError(t, err, "10 == 10 so is accepted") + + err = testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 9) + require.Error(t, err, "9 < 10 so is rejected") + + err = testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 11) + require.NoError(t, err, "11 > 10 so is accepted") +} + +func TestCheckL1InfoTreeLeafCounterVsInitL1InfoMapNotFound(t *testing.T) { + testData := newBananaBaseTestData(t) + + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(testData.ctx).Return(nil, nil) + err := testData.sut.CheckL1InfoTreeLeafCounterVsInitL1InfoMap(testData.ctx, 10) + require.NoError(t, err, "10 == 10 so is accepted") +} + type testDataBananaBase struct { rollupContract *mocks_txbuilder.RollupBananaBaseContractor getContract *mocks_txbuilder.GlobalExitRootBananaContractor @@ -86,6 +169,7 @@ type testDataBananaBase struct { sut *txbuilder.TxBuilderBananaBase l1InfoTreeSync *mocks_txbuilder.L1InfoSyncer l1Client *mocks_txbuilder.L1Client + ctx context.Context } func newBananaBaseTestData(t *testing.T) *testDataBananaBase { @@ -110,5 +194,6 @@ func newBananaBaseTestData(t *testing.T) *testDataBananaBase { sut: sut, l1InfoTreeSync: l1InfoSyncer, l1Client: l1Client, + ctx: context.TODO(), } } diff --git a/sequencesender/txbuilder/banana_types.go b/sequencesender/txbuilder/banana_types.go index c09095b6..c69d2876 100644 --- a/sequencesender/txbuilder/banana_types.go +++ b/sequencesender/txbuilder/banana_types.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/0xPolygon/cdk/state" "github.com/ethereum/go-ethereum/common" ) @@ -147,3 +148,37 @@ func (b *BananaSequence) LastVirtualBatchNumber() uint64 { func (b *BananaSequence) SetLastVirtualBatchNumber(batchNumber uint64) { b.SequenceBanana.LastVirtualBatchNumber = batchNumber } + +func calculateMaxL1InfoTreeIndexInsideL2Data(l2data []byte) (uint32, error) { + batchRawV2, err := state.DecodeBatchV2(l2data) + if err != nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideL2Data: error decoding batchL2Data, err:%w", err) + } + if batchRawV2 == nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideL2Data: batchRawV2 is nil") + } + maxIndex := uint32(0) + for _, block := range batchRawV2.Blocks { + if block.IndexL1InfoTree > maxIndex { + maxIndex = block.IndexL1InfoTree + } + } + return maxIndex, nil +} + +func calculateMaxL1InfoTreeIndexInsideSequence(seq *etherman.SequenceBanana) (uint32, error) { + if seq == nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideSequence: seq is nil") + } + maxIndex := uint32(0) + for _, batch := range seq.Batches { + index, err := calculateMaxL1InfoTreeIndexInsideL2Data(batch.L2Data) + if err != nil { + return 0, fmt.Errorf("calculateMaxL1InfoTreeIndexInsideBatches: error getting batch L1InfoTree , err:%w", err) + } + if index > maxIndex { + maxIndex = index + } + } + return maxIndex, nil +} diff --git a/sequencesender/txbuilder/banana_validium_test.go b/sequencesender/txbuilder/banana_validium_test.go index 8f764595..71f059b9 100644 --- a/sequencesender/txbuilder/banana_validium_test.go +++ b/sequencesender/txbuilder/banana_validium_test.go @@ -34,6 +34,8 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceErrorsFromDA(t *testing.T) Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaValidiumForTest(testData) require.NoError(t, err) ctx := context.TODO() @@ -53,6 +55,8 @@ func TestBananaValidiumBuildSequenceBatchesTxSequenceDAOk(t *testing.T) { Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaValidiumForTest(testData) require.NoError(t, err) ctx := context.TODO() diff --git a/sequencesender/txbuilder/banana_zkevm_test.go b/sequencesender/txbuilder/banana_zkevm_test.go index a4ff4bd7..4570729e 100644 --- a/sequencesender/txbuilder/banana_zkevm_test.go +++ b/sequencesender/txbuilder/banana_zkevm_test.go @@ -40,6 +40,8 @@ func TestBananaZkevmBuildSequenceBatchesTxOk(t *testing.T) { Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaZKEVMForTest(testData) require.NoError(t, err) @@ -61,6 +63,8 @@ func TestBananaZkevmBuildSequenceBatchesTxErr(t *testing.T) { Return(&types.Header{Number: big.NewInt(69)}, nil) testData.l1InfoTreeSync.On("GetLatestInfoUntilBlock", mock.Anything, mock.Anything). Return(&l1infotreesync.L1InfoTreeLeaf{L1InfoTreeIndex: 7}, nil) + testData.l1InfoTreeSync.EXPECT().GetInitL1InfoRootMap(mock.Anything).Return(nil, nil) + seq, err := newSequenceBananaZKEVMForTest(testData) require.NoError(t, err) diff --git a/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go b/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go index 65bf9394..12d641a8 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go +++ b/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go @@ -22,6 +22,64 @@ func (_m *L1InfoSyncer) EXPECT() *L1InfoSyncer_Expecter { return &L1InfoSyncer_Expecter{mock: &_m.Mock} } +// GetInitL1InfoRootMap provides a mock function with given fields: ctx +func (_m *L1InfoSyncer) GetInitL1InfoRootMap(ctx context.Context) (*l1infotreesync.L1InfoTreeInitial, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetInitL1InfoRootMap") + } + + var r0 *l1infotreesync.L1InfoTreeInitial + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*l1infotreesync.L1InfoTreeInitial, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *l1infotreesync.L1InfoTreeInitial); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeInitial) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// L1InfoSyncer_GetInitL1InfoRootMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInitL1InfoRootMap' +type L1InfoSyncer_GetInitL1InfoRootMap_Call struct { + *mock.Call +} + +// GetInitL1InfoRootMap is a helper method to define mock.On call +// - ctx context.Context +func (_e *L1InfoSyncer_Expecter) GetInitL1InfoRootMap(ctx interface{}) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + return &L1InfoSyncer_GetInitL1InfoRootMap_Call{Call: _e.mock.On("GetInitL1InfoRootMap", ctx)} +} + +func (_c *L1InfoSyncer_GetInitL1InfoRootMap_Call) Run(run func(ctx context.Context)) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L1InfoSyncer_GetInitL1InfoRootMap_Call) Return(_a0 *l1infotreesync.L1InfoTreeInitial, _a1 error) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L1InfoSyncer_GetInitL1InfoRootMap_Call) RunAndReturn(run func(context.Context) (*l1infotreesync.L1InfoTreeInitial, error)) *L1InfoSyncer_GetInitL1InfoRootMap_Call { + _c.Call.Return(run) + return _c +} + // GetLatestInfoUntilBlock provides a mock function with given fields: ctx, blockNum func (_m *L1InfoSyncer) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) { ret := _m.Called(ctx, blockNum) diff --git a/sonar-project.properties b/sonar-project.properties index 559f7073..815d53a8 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -7,11 +7,11 @@ sonar.projectName=cdk sonar.organization=0xpolygon sonar.sources=. -sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql +sonar.exclusions=**/test/**,**/vendor/**,**/mocks/**,**/build/**,**/target/**,**/proto/include/**,**/*.pb.go,**/docs/**,**/*.sql,**/mocks_*/*, scripts/** sonar.tests=. sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml +sonar.test.exclusions=**/vendor/**,**/docs/**,**/mocks/**,**/*.pb.go,**/*.yml,**/*.yaml,**/*.json,**/*.xml,**/*.toml,**/mocks_*/* sonar.issue.enforceSemantic=true # ===================================================== diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index c9c4e661..13539f2f 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "math/big" "time" @@ -24,7 +25,7 @@ type EVMDownloaderInterface interface { WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log - GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader + GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) } type LogAppenderMap map[common.Hash]func(b *EVMBlock, l types.Log) error @@ -101,8 +102,13 @@ func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, download if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock { // Indicate the last downloaded block if there are not events on it d.log.Debugf("sending block %d to the driver (without events)", toBlock) + header, isCanceled := d.GetBlockHeader(ctx, toBlock) + if isCanceled { + return + } + downloadedCh <- EVMBlock{ - EVMBlockHeader: d.GetBlockHeader(ctx, toBlock), + EVMBlockHeader: header, } } fromBlock = toBlock + 1 @@ -170,44 +176,53 @@ func (d *EVMDownloaderImplementation) WaitForNewBlocks( } func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) []EVMBlock { - blocks := []EVMBlock{} - logs := d.GetLogs(ctx, fromBlock, toBlock) - for _, l := range logs { - if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { - b := d.GetBlockHeader(ctx, l.BlockNumber) - if b.Hash != l.BlockHash { - d.log.Infof( - "there has been a block hash change between the event query and the block query "+ - "for block %d: %s vs %s. Retrying.", - l.BlockNumber, b.Hash, l.BlockHash, - ) - return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) + select { + case <-ctx.Done(): + return nil + default: + blocks := []EVMBlock{} + logs := d.GetLogs(ctx, fromBlock, toBlock) + for _, l := range logs { + if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { + b, canceled := d.GetBlockHeader(ctx, l.BlockNumber) + if canceled { + return nil + } + + if b.Hash != l.BlockHash { + d.log.Infof( + "there has been a block hash change between the event query and the block query "+ + "for block %d: %s vs %s. Retrying.", + l.BlockNumber, b.Hash, l.BlockHash, + ) + return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) + } + blocks = append(blocks, EVMBlock{ + EVMBlockHeader: EVMBlockHeader{ + Num: l.BlockNumber, + Hash: l.BlockHash, + Timestamp: b.Timestamp, + ParentHash: b.ParentHash, + }, + Events: []interface{}{}, + }) } - blocks = append(blocks, EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: l.BlockNumber, - Hash: l.BlockHash, - Timestamp: b.Timestamp, - ParentHash: b.ParentHash, - }, - Events: []interface{}{}, - }) - } - for { - attempts := 0 - err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) - if err != nil { - attempts++ - d.log.Error("error trying to append log: ", err) - d.rh.Handle("getLogs", attempts) - continue + for { + attempts := 0 + err := d.appender[l.Topics[0]](&blocks[len(blocks)-1], l) + if err != nil { + attempts++ + d.log.Error("error trying to append log: ", err) + d.rh.Handle("getLogs", attempts) + continue + } + break } - break } - } - return blocks + return blocks + } } func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { @@ -224,6 +239,11 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to for { unfilteredLogs, err = d.ethClient.FilterLogs(ctx, query) if err != nil { + if errors.Is(err, context.Canceled) { + // context is canceled, we don't want to fatal on max attempts in this case + return nil + } + attempts++ d.log.Error("error calling FilterLogs to eth client: ", err) d.rh.Handle("getLogs", attempts) @@ -243,11 +263,16 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to return logs } -func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { +func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { attempts := 0 for { header, err := d.ethClient.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) if err != nil { + if errors.Is(err, context.Canceled) { + // context is canceled, we don't want to fatal on max attempts in this case + return EVMBlockHeader{}, true + } + attempts++ d.log.Errorf("error getting block header for block %d, err: %v", blockNum, err) d.rh.Handle("getBlockHeader", attempts) @@ -258,6 +283,6 @@ func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockN Hash: header.Hash(), ParentHash: header.ParentHash, Timestamp: header.Time, - } + }, false } } diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index 59c43b8f..04c92e72 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -222,9 +222,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b1) d.On("GetEventsByBlockRange", mock.Anything, uint64(0), uint64(1)). - Return([]EVMBlock{}) + Return([]EVMBlock{}, false) d.On("GetBlockHeader", mock.Anything, uint64(1)). - Return(b1.EVMBlockHeader) + Return(b1.EVMBlockHeader, false) // iteration 1: wait for next block to be created d.On("WaitForNewBlocks", mock.Anything, uint64(1)). @@ -240,7 +240,7 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b2) d.On("GetEventsByBlockRange", mock.Anything, uint64(2), uint64(2)). - Return([]EVMBlock{b2}) + Return([]EVMBlock{b2}, false) // iteration 3: wait for next block to be created (jump to block 8) d.On("WaitForNewBlocks", mock.Anything, uint64(2)). @@ -270,9 +270,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b6, b7, b8) d.On("GetEventsByBlockRange", mock.Anything, uint64(3), uint64(8)). - Return([]EVMBlock{b6, b7}) + Return([]EVMBlock{b6, b7}, false) d.On("GetBlockHeader", mock.Anything, uint64(8)). - Return(b8.EVMBlockHeader) + Return(b8.EVMBlockHeader, false) // iteration 5: wait for next block to be created (jump to block 30) d.On("WaitForNewBlocks", mock.Anything, uint64(8)). @@ -288,9 +288,9 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b19) d.On("GetEventsByBlockRange", mock.Anything, uint64(9), uint64(19)). - Return([]EVMBlock{}) + Return([]EVMBlock{}, false) d.On("GetBlockHeader", mock.Anything, uint64(19)). - Return(b19.EVMBlockHeader) + Return(b19.EVMBlockHeader, false) // iteration 7: from block 20 to 30, events on last block b30 := EVMBlock{ @@ -302,7 +302,7 @@ func TestDownload(t *testing.T) { } expectedBlocks = append(expectedBlocks, b30) d.On("GetEventsByBlockRange", mock.Anything, uint64(20), uint64(30)). - Return([]EVMBlock{b30}) + Return([]EVMBlock{b30}, false) // iteration 8: wait for next block to be created (jump to block 35) d.On("WaitForNewBlocks", mock.Anything, uint64(30)). @@ -369,14 +369,16 @@ func TestGetBlockHeader(t *testing.T) { // at first attempt clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock := d.GetBlockHeader(ctx, blockNum) + actualBlock, isCanceled := d.GetBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) + assert.False(t, isCanceled) // after error from client clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, errors.New("foo")).Once() clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock = d.GetBlockHeader(ctx, blockNum) + actualBlock, isCanceled = d.GetBlockHeader(ctx, blockNum) assert.Equal(t, expectedBlock, actualBlock) + assert.False(t, isCanceled) } func buildAppender() LogAppenderMap { diff --git a/sync/evmdriver.go b/sync/evmdriver.go index ae7388e0..4e195af2 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -71,6 +71,7 @@ reset: attempts int err error ) + for { lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) if err != nil { @@ -84,18 +85,23 @@ reset: cancellableCtx, cancel := context.WithCancel(ctx) defer cancel() + log.Info("Starting sync...", " lastProcessedBlock", lastProcessedBlock) // start downloading downloadCh := make(chan EVMBlock, d.downloadBufferSize) - go d.downloader.Download(cancellableCtx, lastProcessedBlock, downloadCh) + go d.downloader.Download(cancellableCtx, lastProcessedBlock+1, downloadCh) for { select { + case <-ctx.Done(): + d.log.Info("sync stopped due to context done") + cancel() + return case b := <-downloadCh: - d.log.Debug("handleNewBlock") + d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) d.handleNewBlock(ctx, b) case firstReorgedBlock := <-d.reorgSub.ReorgedBlock: - d.log.Debug("handleReorg") - d.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) + d.log.Debug("handleReorg from block: ", firstReorgedBlock) + d.handleReorg(ctx, cancel, firstReorgedBlock) goto reset } } @@ -130,15 +136,10 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { } } -func (d *EVMDriver) handleReorg( - ctx context.Context, cancel context.CancelFunc, downloadCh chan EVMBlock, firstReorgedBlock uint64, -) { +func (d *EVMDriver) handleReorg(ctx context.Context, cancel context.CancelFunc, firstReorgedBlock uint64) { // stop downloader cancel() - _, ok := <-downloadCh - for ok { - _, ok = <-downloadCh - } + // handle reorg attempts := 0 for { diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index 907dac28..c17370e1 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -198,36 +198,19 @@ func TestHandleReorg(t *testing.T) { // happy path _, cancel := context.WithCancel(ctx) - downloadCh := make(chan EVMBlock) firstReorgedBlock := uint64(5) pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - close(downloadCh) + go driver.handleReorg(ctx, cancel, firstReorgedBlock) done := <-reorgProcessed require.True(t, done) - // download ch sends some garbage - _, cancel = context.WithCancel(ctx) - downloadCh = make(chan EVMBlock) - firstReorgedBlock = uint64(6) - pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - downloadCh <- EVMBlock{} - downloadCh <- EVMBlock{} - downloadCh <- EVMBlock{} - close(downloadCh) - done = <-reorgProcessed - require.True(t, done) - // processor fails 2 times _, cancel = context.WithCancel(ctx) - downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(7) pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() pm.On("Reorg", ctx, firstReorgedBlock).Return(nil).Once() - go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) - close(downloadCh) + go driver.handleReorg(ctx, cancel, firstReorgedBlock) done = <-reorgProcessed require.True(t, done) } diff --git a/sync/mock_downloader_test.go b/sync/mock_downloader_test.go index c965efb6..f28045b5 100644 --- a/sync/mock_downloader_test.go +++ b/sync/mock_downloader_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package sync @@ -20,7 +20,7 @@ func (_m *EVMDownloaderMock) Download(ctx context.Context, fromBlock uint64, dow } // GetBlockHeader provides a mock function with given fields: ctx, blockNum -func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) EVMBlockHeader { +func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { ret := _m.Called(ctx, blockNum) if len(ret) == 0 { @@ -28,13 +28,23 @@ func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64 } var r0 EVMBlockHeader + var r1 bool + if rf, ok := ret.Get(0).(func(context.Context, uint64) (EVMBlockHeader, bool)); ok { + return rf(ctx, blockNum) + } if rf, ok := ret.Get(0).(func(context.Context, uint64) EVMBlockHeader); ok { r0 = rf(ctx, blockNum) } else { r0 = ret.Get(0).(EVMBlockHeader) } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, uint64) bool); ok { + r1 = rf(ctx, blockNum) + } else { + r1 = ret.Get(1).(bool) + } + + return r0, r1 } // GetEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock diff --git a/sync/mock_l2_test.go b/sync/mock_l2_test.go index 78d75191..7a4bae36 100644 --- a/sync/mock_l2_test.go +++ b/sync/mock_l2_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package sync diff --git a/sync/mock_processor_test.go b/sync/mock_processor_test.go index 8e562e9b..afbb34cb 100644 --- a/sync/mock_processor_test.go +++ b/sync/mock_processor_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package sync diff --git a/sync/mock_reorgdetector_test.go b/sync/mock_reorgdetector_test.go index 52cd0cd0..9689f7e7 100644 --- a/sync/mock_reorgdetector_test.go +++ b/sync/mock_reorgdetector_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.39.0. DO NOT EDIT. package sync diff --git a/test/Makefile b/test/Makefile index 0c50ec35..a1b51bb1 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,5 +1,7 @@ .PHONY: generate-mocks -generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers generate-mocks-sync +generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender \ + generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers \ + generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator .PHONY: generate-mocks-bridgesync @@ -15,21 +17,66 @@ COMMON_MOCKERY_PARAMS=--disable-version-string --with-expecter --exported generate-mocks-sequencesender: ## Generates mocks for sequencesender, using mockery tool rm -Rf ../sequencesender/txbuilder/mocks_txbuilder export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../sequencesender/txbuilder --output ../sequencesender/txbuilder/mocks_txbuilder --outpkg mocks_txbuilder ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthTxMngrMock --filename=mock_ethtxmanager.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go .PHONY: generate-mocks-da generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool rm -Rf ../dataavailability/mocks_da export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../dataavailability --output ../dataavailability/mocks_da --outpkg mocks_da ${COMMON_MOCKERY_PARAMS} +.PHONY: generate-mocks-rpc +generate-mocks-rpc: ## Generates mocks for rpc, using mockery tool + rm -Rf ../rpc/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../rpc --output ../rpc/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} -.PHONY: test-e2e-elderberry-validium -test-e2e-elderberry-validium: stop ## Runs e2e tests checking elderberry/validium - ./run-e2e.sh cdk-validium +.PHONY: generate-mocks-l1infotreesync +generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync, using mockery tool + rm -Rf ../l1infotreesync/mocks + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../l1infotreesync --output ../l1infotreesync/mocks --outpkg mocks_l1infotreesync ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + +.PHONY: generate-mocks-aggoracle +generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=./helpers --outpkg=helpers --structname=EthTxManagerMock --filename=mock_ethtxmanager.go + +.PHONY: generate-mocks-sync +generate-mocks-sync: ## Generates mocks for sync, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + +.PHONY: generate-mocks-aggregator +generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../aggregator/agglayer --output=../aggregator/mocks --outpkg=mocks --structname=AgglayerClientInterfaceMock --filename=mock_agglayer_client.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go + +.PHONY: test-e2e-fork9-validium +test-e2e-fork9-validium: stop + ./run-e2e.sh fork9 cdk-validium + bats . + +.PHONY: test-e2e-fork11-rollup +test-e2e-fork11-rollup: stop + ./run-e2e.sh fork11 rollup + bats . + +.PHONY: test-e2e-fork12-validium +test-e2e-fork12-validium: stop + ./run-e2e.sh fork12 cdk-validium bats . -.PHONY: test-e2e-elderberry-rollup -test-e2e-elderberry-rollup: stop ## Runs e2e tests checking elderberry/rollup - ./run-e2e.sh rollup +.PHONY: test-e2e-fork12-rollup +test-e2e-fork12-rollup: stop + ./run-e2e.sh fork12 rollup bats . .PHONY: stop @@ -47,17 +94,3 @@ help: ## Prints this help @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) \ | sort \ | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' -.PHONY: generate-mocks-l1infotreesync -generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync , using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go - -.PHONY: generate-mocks-aggoracle -generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=./helpers --outpkg=helpers --structname=EthTxManagerMock --filename=mock_ethtxmanager.go - -.PHONY: generate-mocks-sync -generate-mocks-sync: ## Generates mocks for sync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go diff --git a/test/access-list-e2e.bats b/test/access-list-e2e.bats index c47b004a..83947c03 100644 --- a/test/access-list-e2e.bats +++ b/test/access-list-e2e.bats @@ -3,14 +3,11 @@ setup() { load 'helpers/common' _common_setup - readonly enclave=${ENCLAVE:-cdk-v1} - readonly sequencer=${KURTOSIS_NODE:-cdk-erigon-sequencer-001} - readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} - readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} - readonly key=${SENDER_key:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly erigon_sequencer_node=${KURTOSIS_ERIGON_SEQUENCER:-cdk-erigon-sequencer-001} + readonly kurtosis_sequencer_wrapper=${KURTOSIS_SEQUENCER_WRAPPER:-"kurtosis service exec $enclave $erigon_sequencer_node"} + readonly key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} readonly data_dir=${ACL_DATA_DIR:-"/home/erigon/data/dynamic-kurtosis-sequencer/txpool/acls"} - readonly kurtosis_sequencer_wrapper=${KURTOSIS_WRAPPER:-"kurtosis service exec $enclave $sequencer"} } teardown() { @@ -36,7 +33,7 @@ set_acl_mode() { @test "Test Block List - Sending regular transaction when address not in block list" { local value="10ether" run set_acl_mode "blocklist" - run sendTx $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" @@ -45,7 +42,7 @@ set_acl_mode() { @test "Test Block List - Sending contracts deploy transaction when address not in block list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "blocklist" - run deployContract $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_success @@ -59,7 +56,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "sendTx" - run sendTx $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -70,7 +67,7 @@ set_acl_mode() { run set_acl_mode "blocklist" run add_to_access_list "blocklist" "deploy" - run deployContract $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -80,7 +77,7 @@ set_acl_mode() { local value="10ether" run set_acl_mode "allowlist" - run sendTx $key $receiver $value + run send_tx $l2_rpc_url $key $receiver $value assert_failure assert_output --partial "sender disallowed to send tx by ACL policy" @@ -90,7 +87,7 @@ set_acl_mode() { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" run set_acl_mode "allowlist" - run deployContract $key $contract_artifact + run deploy_contract $l2_rpc_url $key $contract_artifact assert_failure assert_output --partial "sender disallowed to deploy contract by ACL policy" @@ -99,10 +96,10 @@ set_acl_mode() { @test "Test Allow List - Sending regular transaction when address is in allow list" { local value="10ether" - run set_acl_mode "allowlist" - run add_to_access_list "allowlist" "sendTx" - run sendTx $key $receiver $value - + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "sendTx" + run send_tx $l2_rpc_url $key $receiver $value + assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" } @@ -110,9 +107,9 @@ set_acl_mode() { @test "Test Allow List - Sending contracts deploy transaction when address is in allow list" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" - run set_acl_mode "allowlist" - run add_to_access_list "allowlist" "deploy" - run deployContract $key $contract_artifact + run set_acl_mode "allowlist" + run add_to_access_list "allowlist" "deploy" + run deploy_contract $l2_rpc_url $key $contract_artifact assert_success diff --git a/test/aggoraclehelpers/aggoracle_e2e.go b/test/aggoraclehelpers/aggoracle_e2e.go new file mode 100644 index 00000000..be362ccc --- /dev/null +++ b/test/aggoraclehelpers/aggoracle_e2e.go @@ -0,0 +1,199 @@ +package aggoraclehelpers + +import ( + "context" + "path" + "testing" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" + gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" + gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" + "github.com/0xPolygon/cdk/aggoracle" + "github.com/0xPolygon/cdk/aggoracle/chaingersender" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" + "github.com/0xPolygon/cdk/test/helpers" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/stretchr/testify/require" +) + +const ( + NetworkIDL2 = uint32(1) + syncBlockChunkSize = 10 + retries = 3 + periodRetry = time.Millisecond * 100 +) + +type AggoracleWithEVMChainEnv struct { + L1Client *simulated.Backend + L2Client *simulated.Backend + L1InfoTreeSync *l1infotreesync.L1InfoTreeSync + GERL1Contract *gerContractL1.Globalexitrootnopush0 + GERL1Addr common.Address + GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 + GERL2Addr common.Address + AuthL1 *bind.TransactOpts + AuthL2 *bind.TransactOpts + AggOracle *aggoracle.AggOracle + AggOracleSender aggoracle.ChainSender + ReorgDetector *reorgdetector.ReorgDetector + BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL1Addr common.Address + BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL2Addr common.Address + NetworkIDL2 uint32 + EthTxManMockL2 *helpers.EthTxManagerMock +} + +func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { + t.Helper() + + ctx := context.Background() + l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) + sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) + oracle, err := aggoracle.New( + log.GetDefaultLogger(), sender, + l1Client.Client(), syncer, + etherman.LatestBlock, time.Millisecond*20) //nolint:mnd + require.NoError(t, err) + go oracle.Start(ctx) + + return &AggoracleWithEVMChainEnv{ + L1Client: l1Client, + L2Client: l2Client, + L1InfoTreeSync: syncer, + GERL1Contract: gerL1Contract, + GERL1Addr: gerL1Addr, + GERL2Contract: gerL2Contract, + GERL2Addr: gerL2Addr, + AuthL1: authL1, + AuthL2: authL2, + AggOracle: oracle, + AggOracleSender: sender, + ReorgDetector: rd, + BridgeL1Contract: bridgeL1Contract, + BridgeL1Addr: bridgeL1Addr, + BridgeL2Contract: bridgeL2Contract, + BridgeL2Addr: bridgeL2Addr, + NetworkIDL2: NetworkIDL2, + EthTxManMockL2: ethTxManMockL2, + } +} + +func CommonSetup(t *testing.T) ( + *simulated.Backend, + *l1infotreesync.L1InfoTreeSync, + *gerContractL1.Globalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, + *bind.TransactOpts, + *reorgdetector.ReorgDetector, +) { + t.Helper() + + // Config and spin up + ctx := context.Background() + + // Simulated L1 + l1Client, authL1, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract := newSimulatedL1(t) + + // Reorg detector + dbPathReorgDetector := t.TempDir() + reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) + require.NoError(t, err) + + // Syncer + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, + gerL1Addr, common.Address{}, + syncBlockChunkSize, etherman.LatestBlock, + reorg, l1Client.Client(), + time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go syncer.Start(ctx) + + return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg +} + +func EVMSetup(t *testing.T) ( + aggoracle.ChainSender, + *simulated.Backend, + *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, + *bind.TransactOpts, + *helpers.EthTxManagerMock, +) { + t.Helper() + + l2Client, authL2, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc := newSimulatedEVMAggSovereignChain(t) + ethTxManMock := helpers.NewEthTxManMock(t, l2Client, authL2) + sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), + gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd + require.NoError(t, err) + + return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock +} + +func newSimulatedL1(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + *gerContractL1.Globalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, +) { + t.Helper() + + client, setup := helpers.SimulatedBackend(t, nil, 0) + + precalculatedAddr := crypto.CreateAddress(setup.DeployerAuth.From, 2) //nolint:mnd + + gerAddr, _, gerContract, err := gerContractL1.DeployGlobalexitrootnopush0(setup.DeployerAuth, client.Client(), + setup.UserAuth.From, setup.EBZkevmBridgeProxyAddr) + require.NoError(t, err) + client.Commit() + + require.Equal(t, precalculatedAddr, gerAddr) + + return client, setup.UserAuth, gerAddr, gerContract, setup.EBZkevmBridgeProxyAddr, setup.EBZkevmBridgeProxyContract +} + +func newSimulatedEVMAggSovereignChain(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, +) { + t.Helper() + + client, setup := helpers.SimulatedBackend(t, nil, NetworkIDL2) + + precalculatedAddr := crypto.CreateAddress(setup.DeployerAuth.From, 2) //nolint:mnd + + gerAddr, _, gerContract, err := gerContractEVMChain.DeployPessimisticglobalexitrootnopush0( + setup.DeployerAuth, client.Client(), setup.UserAuth.From) + require.NoError(t, err) + client.Commit() + + globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") + _, err = gerContract.GrantRole(setup.DeployerAuth, globalExitRootSetterRole, setup.UserAuth.From) + require.NoError(t, err) + client.Commit() + + hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, setup.UserAuth.From) + require.True(t, hasRole) + require.Equal(t, precalculatedAddr, gerAddr) + + return client, setup.UserAuth, gerAddr, gerContract, setup.EBZkevmBridgeProxyAddr, setup.EBZkevmBridgeProxyContract +} diff --git a/test/basic-e2e.bats b/test/basic-e2e.bats index cbd845f5..1024ac4a 100644 --- a/test/basic-e2e.bats +++ b/test/basic-e2e.bats @@ -3,47 +3,193 @@ setup() { load 'helpers/common' _common_setup - readonly enclave=${ENCLAVE:-cdk-v1} - readonly node=${KURTOSIS_NODE:-cdk-erigon-node-001} - readonly rpc_url=${RPC_URL:-$(kurtosis port print "$enclave" "$node" http-rpc)} - readonly private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} readonly receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} } @test "Send EOA transaction" { + local sender_addr=$(cast wallet address --private-key "$sender_private_key") + local initial_nonce=$(cast nonce "$sender_addr" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } local value="10ether" - run sendTx "$private_key" "$receiver" "$value" + # case 1: Transaction successful sender has sufficient balance + run send_tx "$l2_rpc_url" "$sender_private_key" "$receiver" "$value" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # case 2: Transaction rejected as sender attempts to transfer more than it has in its wallet. + # Transaction will fail pre-validation check on the node and will be dropped subsequently from the pool + # without recording it on the chain and hence nonce will not change + local sender_balance=$(cast balance "$sender_addr" --ether --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve balance for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } + local excessive_value=$(echo "$sender_balance + 1" | bc)"ether" + run send_tx "$l2_rpc_url" "$sender_private_key" "$receiver" "$excessive_value" + assert_failure + + # Check whether the sender's nonce was updated correctly + local final_nonce=$(cast nonce "$sender_addr" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $sender_addr using RPC URL: $l2_rpc_url" + return 1 + } + assert_equal "$final_nonce" "$(echo "$initial_nonce + 1" | bc)" } -@test "Deploy ERC20Mock contract" { +@test "Test ERC20Mock contract" { local contract_artifact="./contracts/erc20mock/ERC20Mock.json" + wallet_A_output=$(cast wallet new) + address_A=$(echo "$wallet_A_output" | grep "Address" | awk '{print $2}') + address_A_private_key=$(echo "$wallet_A_output" | grep "Private key" | awk '{print $3}') + address_B=$(cast wallet new | grep "Address" | awk '{print $2}') # Deploy ERC20Mock - run deployContract "$private_key" "$contract_artifact" + run deploy_contract "$l2_rpc_url" "$sender_private_key" "$contract_artifact" assert_success contract_addr=$(echo "$output" | tail -n 1) # Mint ERC20 tokens - local mintFnSig="function mint(address receiver, uint256 amount)" local amount="5" - run sendTx "$private_key" "$contract_addr" "$mintFnSig" "$receiver" "$amount" + run send_tx "$l2_rpc_url" "$sender_private_key" "$contract_addr" "$mint_fn_sig" "$address_A" "$amount" assert_success assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" - # Assert that balance is correct - local balanceOfFnSig="function balanceOf(address) (uint256)" - run queryContract "$contract_addr" "$balanceOfFnSig" "$receiver" + ## Case 2: Insufficient gas scenario => Transactions fails + # nonce would not increase since transaction fails at the node's pre-validation check + # Get bytecode from the contract artifact + local bytecode=$(jq -r .bytecode "$contract_artifact") + if [[ -z "$bytecode" || "$bytecode" == "null" ]]; then + echo "Error: Failed to read bytecode from $contract_artifact" + return 1 + fi + + # Estimate gas, gas price and gas cost + local gas_units=$(cast estimate --rpc-url "$l2_rpc_url" --create "$bytecode") + gas_units=$(echo "scale=0; $gas_units / 2" | bc) + local gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") + local value=$(echo "$gas_units * $gas_price" | bc) + local value_ether=$(cast to-unit "$value" ether)"ether" + + # Transfer only half amount of tokens needed for contract deployment fees + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + # Fetch initial nonce for address_A + local address_A_initial_nonce=$(cast nonce "$address_A" --rpc-url "$l2_rpc_url") || return 1 + # Attempt to deploy contract with insufficient gas + run deploy_contract "$l2_rpc_url" "$address_A_private_key" "$contract_artifact" + assert_failure + + ## Case 3: Transaction should fail as address_A tries to transfer more tokens than it has + # nonce would not increase + # Transfer funds for gas fees to address_A + value_ether="4ether" + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + # Fetch balance of address_A to simulate excessive transfer + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_A" + assert_success + local address_A_Balance=$(echo "$output" | tail -n 1) + address_A_Balance=$(echo "$address_A_Balance" | xargs) + + # Set excessive amount for transfer + local excessive_amount=$(echo "$address_A_Balance + 1" | bc) + + # Attempt transfer of excessive amount from address_A to address_B + local tranferFnSig="transfer(address,uint256)" + run send_tx "$l2_rpc_url" "$address_A_private_key" "$contract_addr" "$tranferFnSig" "$address_B" "$excessive_amount" + assert_failure + + # Verify balance of address_A after failed transaction + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_A" + assert_success + address_A_BalanceAfterFailedTx=$(echo "$output" | tail -n 1) + address_A_BalanceAfterFailedTx=$(echo "$address_A_BalanceAfterFailedTx" | xargs) + + # Ensure balance is unchanged + assert_equal "$address_A_BalanceAfterFailedTx" "$address_A_Balance" + + # Verify balance of address_B is still zero + run query_contract "$l2_rpc_url" "$contract_addr" "$balance_of_fn_sig" "$address_B" assert_success - receiverBalance=$(echo "$output" | tail -n 1) + local address_B_Balance=$(echo "$output" | tail -n 1) + address_B_Balance=$(echo "$address_B_Balance" | xargs) - # Convert balance and amount to a standard format for comparison (e.g., remove any leading/trailing whitespace) - receiverBalance=$(echo "$receiverBalance" | xargs) - amount=$(echo "$amount" | xargs) + assert_equal "$address_B_Balance" "0" - # Check if the balance is equal to the amount - assert_equal "$receiverBalance" "$amount" + # Nonce should not increase + local address_A_final_nonce=$(cast nonce "$address_A" --rpc-url "$l2_rpc_url") || { + echo "Failed to retrieve nonce for sender: $address_A using RPC URL: $l2_rpc_url" + return 1 + } + assert_equal "$address_A_final_nonce" "$address_A_initial_nonce" } + + +@test "Deploy and test UniswapV3 contract" { + # Generate new key pair + wallet_A_output=$(cast wallet new) + address_A=$(echo "$wallet_A_output" | grep "Address" | awk '{print $2}') + address_A_private_key=$(echo "$wallet_A_output" | grep "Private key" | awk '{print $3}') + + # Transfer funds for gas + local value_ether="50ether" + cast_output=$(cast send --rpc-url "$l2_rpc_url" --private-key "$sender_private_key" "$address_A" --value "$value_ether" --legacy 2>&1) + if [[ $? -ne 0 ]]; then + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" + return 1 + fi + + run polycli loadtest uniswapv3 --legacy -v 600 --rpc-url $l2_rpc_url --private-key $address_A_private_key + assert_success + + # Remove ANSI escape codes from the output + output=$(echo "$output" | sed -r "s/\x1B\[[0-9;]*[mGKH]//g") + + # Check if all required Uniswap contracts were deployed + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=WETH9" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapV3Factory" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapInterfaceMulticall" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=ProxyAdmin" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=TickLens" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NFTDescriptor" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NonfungibleTokenPositionDescriptor" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=TransparentUpgradeableProxy" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=NonfungiblePositionManager" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=V3Migrator" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=UniswapV3Staker" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=QuoterV2" + assert_output --regexp "Contract deployed address=0x[a-fA-F0-9]{40} name=SwapRouter02" + + # Check if ERC20 tokens were minted + assert_output --regexp "Minted tokens amount=[0-9]+ recipient=0x[a-fA-F0-9]{40} token=SwapperA" + assert_output --regexp "Minted tokens amount=[0-9]+ recipient=0x[a-fA-F0-9]{40} token=SwapperB" + + # Check if liquidity pool was created and initialized + assert_output --regexp "Pool created and initialized fees=[0-9]+" + + # Check if liquidity was provided to the pool + assert_output --regexp "Liquidity provided to the pool liquidity=[0-9]+" + + # Check if transaction got executed successfully + assert_output --regexp "Starting main load test loop currentNonce=[0-9]+" + assert_output --regexp "Finished main load test loop lastNonce=[0-9]+ startNonce=[0-9]+" + assert_output --regexp "Got final block number currentNonce=[0-9]+ final block number=[0-9]+" + assert_output --regexp "Num errors numErrors=0" + assert_output --regexp "Finished" +} + diff --git a/test/bridge-e2e.bats b/test/bridge-e2e.bats index 98443b3b..f5391d1c 100644 --- a/test/bridge-e2e.bats +++ b/test/bridge-e2e.bats @@ -1,43 +1,42 @@ setup() { load 'helpers/common-setup' _common_setup + load 'helpers/common' + load 'helpers/lxly-bridge-test' - readonly data_availability_mode=${DATA_AVAILABILITY_MODE:-"cdk-validium"} - $PROJECT_ROOT/test/scripts/kurtosis_prepare_params_yml.sh ../kurtosis-cdk $data_availability_mode - [ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 + if [ -z "$BRIDGE_ADDRESS" ]; then + local combined_json_file="/opt/zkevm/combined.json" + echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - # Check if the genesis file is already downloaded - if [ ! -f "./tmp/cdk/genesis/genesis.json" ]; then - mkdir -p ./tmp/cdk - kurtosis files download cdk-v1 genesis ./tmp/cdk/genesis - [ $? -ne 0 ] && echo "Error downloading genesis file" && exit 1 + # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress + combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) + bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) + BRIDGE_ADDRESS=$bridge_default_address fi - # Download the genesis file - readonly bridge_default_address=$(jq -r ".genesis[] | select(.contractName == \"PolygonZkEVMBridge proxy\") | .address" ./tmp/cdk/genesis/genesis.json) - - readonly skey=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly destination_net=${DESTINATION_NET:-"1"} - readonly destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - readonly ether_value=${ETHER_VALUE:-"0.0200000054"} - readonly token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + + echo "Bridge address=$BRIDGE_ADDRESS" >&3 + + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + destination_net=${DESTINATION_NET:-"1"} + destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} + ether_value=${ETHER_VALUE:-"0.0200000054"} + amount=$(cast to-wei $ether_value ether) + token_addr=${TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=${BRIDGE_ADDRESS:-$bridge_default_address} + readonly bridge_addr=$BRIDGE_ADDRESS readonly meta_bytes=${META_BYTES:-"0x"} - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 el-1-geth-lighthouse rpc)"} - readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print cdk-v1 zkevm-bridge-service-001 rpc)"} + readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} + readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} readonly dry_run=${DRY_RUN:-"false"} - - readonly amount=$(cast to-wei $ether_value ether) - readonly current_addr="$(cast wallet address --private-key $skey)" - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID()(uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID()(uint32)') + readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" + readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') + readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') + gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") } @test "Run deposit" { - load 'helpers/lxly-bridge-test' echo "Running LxLy deposit" >&3 run deposit assert_success @@ -45,27 +44,82 @@ setup() { } @test "Run claim" { - load 'helpers/lxly-bridge-test' - echo "Running LxLy claim" + echo "Running LxLy claim" >&3 + + timeout="120" + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" + assert_success +} + +@test "Custom native token transfer" { + # Use GAS_TOKEN_ADDR if provided, otherwise retrieve from file + if [[ -n "$GAS_TOKEN_ADDR" ]]; then + echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 + local gas_token_addr="$GAS_TOKEN_ADDR" + else + echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 + readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json + run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" + assert_success + assert_output --regexp "0x[a-fA-F0-9]{40}" + local gas_token_addr=$output + fi + + echo "Gas token addr $gas_token_addr, L1 RPC: $l1_rpc_url" >&3 + + # Set receiver address and query for its initial native token balance on the L2 + receiver=${RECEIVER:-"0x85dA99c8a7C2C95964c8EfD687E95E632Fc533D6"} + local initial_receiver_balance=$(cast balance --ether "$receiver" --rpc-url "$l2_rpc_url") + echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 - # The script timeout (in seconds). + # Query for initial sender balance + run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local gas_token_init_sender_balance=$(echo "$output" | tail -n 1 | awk '{print $1}') + echo "Initial sender balance $gas_token_init_sender_balance" of gas token on L1 >&3 + + # Mint gas token on L1 + local tokens_amount="0.1ether" + local wei_amount=$(cast --to-unit $tokens_amount wei) + local minter_key=${MINTER_KEY:-"42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa"} + run mint_erc20_tokens "$l1_rpc_url" "$gas_token_addr" "$minter_key" "$sender_addr" "$tokens_amount" + assert_success + + # Assert that balance of gas token (on the L1) is correct + run query_contract "$l1_rpc_url" "$gas_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local gas_token_final_sender_balance=$(echo "$output" | + tail -n 1 | + awk '{print $1}') + local expected_balance=$(echo "$gas_token_init_sender_balance + $wei_amount" | + bc | + awk '{print $1}') + + echo "Sender balance ($sender_addr) (gas token L1): $gas_token_final_sender_balance" >&3 + assert_equal "$gas_token_final_sender_balance" "$expected_balance" + + # Send approve transaction to the gas token on L1 + deposit_ether_value="0.1ether" + run send_tx "$l1_rpc_url" "$sender_private_key" "$gas_token_addr" "$approve_fn_sig" "$bridge_addr" "$deposit_ether_value" + assert_success + assert_output --regexp "Transaction successful \(transaction hash: 0x[a-fA-F0-9]{64}\)" + + # Deposit + token_addr=$gas_token_addr + destination_addr=$receiver + destination_net=$l2_rpc_network_id + amount=$wei_amount + run deposit + assert_success + + # Claim deposits (settle them on the L2) timeout="120" - start_time=$(date +%s) - end_time=$((start_time + timeout)) - - while true; do - current_time=$(date +%s) - if ((current_time > end_time)); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" - exit 1 - fi - - run claim - if [ $status -eq 0 ]; then - break - fi - sleep 10 - done - + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" + assert_success + + # Validate that the native token of receiver on L2 has increased by the bridge tokens amount + run verify_native_token_balance "$l2_rpc_url" "$receiver" "$initial_receiver_balance" "$tokens_amount" assert_success } diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml new file mode 100644 index 00000000..653adc9d --- /dev/null +++ b/test/combinations/fork11-rollup.yml @@ -0,0 +1,11 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 + zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.2-fork.11 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - pless_zkevm_node + data_availability_mode: rollup + sequencer_type: erigon diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml new file mode 100644 index 00000000..ed618754 --- /dev/null +++ b/test/combinations/fork12-cdk-validium.yml @@ -0,0 +1,8 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 + zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + data_availability_mode: cdk-validium + sequencer_type: erigon diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml new file mode 100644 index 00000000..c97a25cf --- /dev/null +++ b/test/combinations/fork12-rollup.yml @@ -0,0 +1,8 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 + zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + data_availability_mode: rollup + sequencer_type: erigon diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml new file mode 100644 index 00000000..f60fec9c --- /dev/null +++ b/test/combinations/fork9-cdk-validium.yml @@ -0,0 +1,12 @@ +args: + zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 + zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.4 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.0 + zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 + cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk + cdk_node_image: cdk + zkevm_use_gas_token_contract: true + additional_services: + - pless_zkevm_node + data_availability_mode: cdk-validium + sequencer_type: erigon diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template new file mode 100644 index 00000000..15948b60 --- /dev/null +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -0,0 +1,174 @@ +ForkUpgradeBatchNumber = 0 +ForkUpgradeNewForkId = 0 + +[Common] +IsValidiumMode = {{.is_cdk_validium}} + +{{if eq .zkevm_rollup_fork_id "12"}} +ContractVersions = "banana" +{{else}} +ContractVersions = "elderberry" +{{end}} + +[Etherman] +URL = "{{.l1_rpc_url}}" + +[Log] +Environment = "development" # "production" or "development" +Level = "{{.global_log_level}}" +Outputs = ["stderr"] + +[SequenceSender] +WaitPeriodSendSequence = "15s" +LastBatchVirtualizationTimeMaxWaitPeriod = "10s" +MaxTxSizeForL1 = 131072 +L2Coinbase = "{{.zkevm_l2_sequencer_address}}" +PrivateKey = {Path = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"} +SequencesTxFileName = "/data/sequencesender.json" +GasOffset = 80000 +WaitPeriodPurgeTxFile = "15m" +MaxPendingTx = 1 +{{if eq .zkevm_rollup_fork_id "12"}} +MaxBatchesForL1 = 300 +BlockFinality="FinalizedBlock" +RPCURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" +GetBatchWaitInterval = "10s" +{{end}} + [SequenceSender.StreamClient] + Server = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" + [SequenceSender.EthTxManager] + FrequencyToMonitorTxs = "1s" + WaitTxToBeMined = "2m" + ConsolidationL1ConfirmationBlocks = 5 + {{if eq .zkevm_rollup_fork_id "12"}} + FinalizedStatusL1NumberOfBlocks = 10 + WaitReceiptMaxTime = "250ms" + WaitReceiptCheckInterval = "8s" + {{else}} + FinalizationL1ConfirmationBlocks = 10 + WaitReceiptToBeGenerated = "8s" + {{end}} + PrivateKeys = [ + {Path = "{{or .zkevm_l2_sequencer_keystore_file "/etc/cdk/sequencer.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"}, + ] + ForcedGas = 0 + GasPriceMarginFactor = 1 + MaxGasPriceLimit = 0 + PersistenceFilename = "/data/ethtxmanager.json" + [SequenceSender.EthTxManager.Etherman] + URL = "{{.l1_rpc_url}}" + L1ChainID = {{.l1_chain_id}} + HTTPHeaders = [] + +[Aggregator] + FinalProofSanityCheckEnabled = false + Host = "0.0.0.0" + Port = "{{.zkevm_aggregator_port}}" + RetryTime = "30s" + VerifyProofInterval = "30s" + ProofStatePollingInterval = "5s" + TxProfitabilityCheckerType = "acceptall" + TxProfitabilityMinReward = "1.1" + IntervalAfterWhichBatchConsolidateAnyway = "0s" + ChainID = "{{.zkevm_rollup_chain_id}}" + ForkId = {{.zkevm_rollup_fork_id}} + CleanupLockedProofsInterval = "2m0s" + GeneratingProofCleanupThreshold = "10m" + GasOffset = 150000 + UpgradeEtrogBatchNumber = "{{.zkevm_rollup_manager_block_number}}" + WitnessURL = "http://{{.l2_rpc_name}}{{.deployment_suffix}}:{{.zkevm_rpc_http_port}}" + {{if .is_cdk_validium}} + SenderAddress = "{{.zkevm_l2_agglayer_address}}" + SettlementBackend = "agglayer" + AggLayerTxTimeout = "600s" + AggLayerURL = "http://agglayer:{{.agglayer_port}}" + {{else}} + SenderAddress = "{{.zkevm_l2_aggregator_address}}" + {{end}} + + {{if eq .zkevm_rollup_fork_id "12"}} + UseL1BatchData = true + UseFullWitness = false + MaxWitnessRetrievalWorkers = 2 + SyncModeOnlyEnabled = false + {{end}} + + [Aggregator.SequencerPrivateKey] + Path = "/etc/cdk/sequencer.keystore" + Password = "{{.zkevm_l2_keystore_password}}" + [Aggregator.DB] + Name = "{{.aggregator_db.name}}" + User = "{{.aggregator_db.user}}" + Password = "{{.aggregator_db.password}}" + Host = "{{.aggregator_db.hostname}}" + Port = "{{.aggregator_db.port}}" + EnableLog = false + MaxConns = 200 + [Aggregator.Log] + Environment = "development" # "production" or "development" + Level = "{{.global_log_level}}" + Outputs = ["stderr"] + [Aggregator.StreamClient] + Server = "{{.sequencer_name}}{{.deployment_suffix}}:{{.zkevm_data_streamer_port}}" + [Aggregator.EthTxManager] + FrequencyToMonitorTxs = "1s" + WaitTxToBeMined = "2m" + + {{if eq .zkevm_rollup_fork_id "12"}} + WaitReceiptMaxTime = "250ms" + WaitReceiptCheckInterval = "1s" + {{else}} + GetReceiptMaxTime = "250ms" + GetReceiptWaitInterval = "1s" + {{end}} + + PrivateKeys = [ + {Path = "{{or .zkevm_l2_aggregator_keystore_file "/etc/cdk/aggregator.keystore"}}", Password = "{{.zkevm_l2_keystore_password}}"}, + ] + ForcedGas = 0 + GasPriceMarginFactor = 1 + MaxGasPriceLimit = 0 + PersistenceFilename = "" + ReadPendingL1Txs = false + SafeStatusL1NumberOfBlocks = 0 + FinalizedStatusL1NumberOfBlocks = 0 + [Aggregator.EthTxManager.Etherman] + URL = "{{.l1_rpc_url}}" + L1ChainID = {{.l1_chain_id}} + HTTPHeaders = [] + [Aggregator.Synchronizer] + [Aggregator.Synchronizer.SQLDB] + DriverName = "sqlite3" + DataSource = "file:/data/aggregator_sync_db.sqlite" + [Aggregator.Synchronizer.Synchronizer] + SyncInterval = "10s" + SyncChunkSize = 1000 + GenesisBlockNumber = "{{.zkevm_rollup_manager_block_number}}" + SyncUpToBlock = "latest" + BlockFinality = "latest" + OverrideStorageCheck = false + [Aggregator.Synchronizer.Etherman] + [Aggregator.Synchronizer.Etherman.Validium] + Enabled = {{.is_cdk_validium}} + + +[L1InfoTreeSync] +DBPath = "/tmp/L1InfoTreeSync" # TODO: put a more realisitic path here +GlobalExitRootAddr = "{{.zkevm_global_exit_root_address}}" +RollupManagerAddr = "{{.zkevm_rollup_manager_address}}" +SyncBlockChunkSize = 10 +BlockFinality = "LatestBlock" +URLRPCL1 = "{{.l1_rpc_url}}" +WaitForNewBlocksPeriod = "1s" +InitialBlock = "{{.zkevm_rollup_manager_block_number}}" + +[NetworkConfig.L1] +{{if eq .zkevm_rollup_fork_id "12"}} +L1ChainID = "{{.l1_chain_id}}" +{{else}} +ChainID = "{{.l1_chain_id}}" +{{end}} +PolAddr = "{{.pol_token_address}}" +ZkEVMAddr = "{{.zkevm_rollup_address}}" +RollupManagerAddr = "{{.zkevm_rollup_manager_address}}" +GlobalExitRootManagerAddr = "{{.zkevm_global_exit_root_address}}" diff --git a/test/config/test.config.toml b/test/config/test.config.toml index 92707e39..61fd4401 100644 --- a/test/config/test.config.toml +++ b/test/config/test.config.toml @@ -13,7 +13,8 @@ SequencesTxFileName = "sequencesender.json" GasOffset = 80000 WaitPeriodPurgeTxFile = "60m" MaxPendingTx = 1 -SanityCheckRPCURL = "http://127.0.0.1:8123" +RPCURL = "http://127.0.0.1:8123" +GetBatchWaitInterval = "10s" [SequenceSender.StreamClient] Server = "127.0.0.1:6900" [SequenceSender.EthTxManager] @@ -27,7 +28,7 @@ SanityCheckRPCURL = "http://127.0.0.1:8123" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" + StoragePath = "ethtxmanager.db" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -85,7 +86,7 @@ SequencerPrivateKey = {} ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - PersistenceFilename = "" + StoragePath = "" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 diff --git a/test/config/test.kurtosis_template.toml b/test/config/test.kurtosis_template.toml deleted file mode 100644 index 66471c6a..00000000 --- a/test/config/test.kurtosis_template.toml +++ /dev/null @@ -1,126 +0,0 @@ -ForkUpgradeBatchNumber = 0 -ForkUpgradeNewForkId = 0 - -[Common] -IsValidiumMode = ${zkevm_is_validium} -ContractVersions = "elderberry" -[Common.Translator] - FullMatchRules = [ - {Old="http://zkevm-dac-001:8484", New="http://127.0.0.1:${dac_port}"}, - ] - -[Log] -Environment = "development" # "production" or "development" -Level = "info" -Outputs = ["stderr"] - -[SequenceSender] -WaitPeriodSendSequence = "15s" -LastBatchVirtualizationTimeMaxWaitPeriod = "10s" -L1BlockTimestampMargin = "30s" -MaxTxSizeForL1 = 131072 -MaxBatchesForL1 = 2 -L2Coinbase = "${zkevm_l2_sequencer_address}" -PrivateKey = {Path = "${sequencer_keystore_file}", Password = "${zkevm_l2_keystore_password}"} - -SequencesTxFileName = "sequencesender.json" -GasOffset = 80000 -WaitPeriodPurgeTxFile = "15m" -MaxPendingTx = 1 -SanityCheckRPCURL = "http://127.0.0.1:8123" - [SequenceSender.StreamClient] - Server = "127.0.0.1:${zkevm_data_streamer_port}" - [SequenceSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "${sequencer_keystore_file}", Password = "${zkevm_l2_keystore_password}"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "ethtxmanager.json" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [SequenceSender.EthTxManager.Etherman] - URL = "http://127.0.0.1:${l1_rpc_port}" - MultiGasProvider = false - L1ChainID = ${l1_chain_id} -[Aggregator] -Host = "0.0.0.0" -Port = 50081 -RetryTime = "5s" -VerifyProofInterval = "10s" -TxProfitabilityCheckerType = "acceptall" -TxProfitabilityMinReward = "1.1" -ProofStatePollingInterval = "5s" -SenderAddress = "" -CleanupLockedProofsInterval = "2m" -GeneratingProofCleanupThreshold = "10m" -BatchProofSanityCheckEnabled = true -ForkId = 9 -GasOffset = 0 -WitnessURL = "localhost:8123" -UseL1BatchData = true -UseFullWitness = false -SettlementBackend = "l1" -AggLayerTxTimeout = "5m" -AggLayerURL = "" -MaxWitnessRetrievalWorkers = 2 -SyncModeOnlyEnabled = false -SequencerPrivateKey = {} - [Aggregator.DB] - Name = "aggregator_db" - User = "aggregator_user" - Password = "aggregator_password" - Host = "cdk-aggregator-db" - Port = "5432" - EnableLog = false - MaxConns = 200 - [Aggregator.Log] - Environment = "development" # "production" or "development" - Level = "info" - Outputs = ["stderr"] - [Aggregator.StreamClient] - Server = "localhost:6900" - [Aggregator.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2m" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/pk/aggregator.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - PersistenceFilename = "" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 0 - FinalizedStatusL1NumberOfBlocks = 0 - [Aggregator.EthTxManager.Etherman] - URL = "" - L1ChainID = ${l1_chain_id} - HTTPHeaders = [] - [Aggregator.Synchronizer] - [Aggregator.Synchronizer.DB] - Name = "sync_db" - User = "sync_user" - Password = "sync_password" - Host = "cdk-l1-sync-db" - Port = "5432" - EnableLog = false - MaxConns = 10 - [Aggregator.Synchronizer.Synchronizer] - SyncInterval = "10s" - SyncChunkSize = 1000 - GenesisBlockNumber = 5511080 - SyncUpToBlock = "finalized" - BlockFinality = "finalized" - OverrideStorageCheck = false - [Aggregator.Synchronizer.Etherman] - [Aggregator.Synchronizer.Etherman.Validium] - Enabled = ${zkevm_is_validium} diff --git a/test/contracts/abi/claimmocktest.abi b/test/contracts/abi/claimmocktest.abi new file mode 100644 index 00000000..14e67686 --- /dev/null +++ b/test/contracts/abi/claimmocktest.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"contract IClaimMock","name":"_claimMock","type":"address"},{"internalType":"contract IClaimMockCaller","name":"_claimMockCaller","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"bytes","name":"claim1","type":"bytes"},{"internalType":"bytes","name":"claim2","type":"bytes"},{"internalType":"bool[2]","name":"reverted","type":"bool[2]"}],"name":"claim2TestInternal","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes","name":"claim1","type":"bytes"},{"internalType":"bytes","name":"claim2","type":"bytes"},{"internalType":"bytes","name":"claim3","type":"bytes"},{"internalType":"bool[3]","name":"reverted","type":"bool[3]"}],"name":"claim3TestInternal","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"claimMock","outputs":[{"internalType":"contract IClaimMock","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"claimMockCaller","outputs":[{"internalType":"contract IClaimMockCaller","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes","name":"claim","type":"bytes"},{"internalType":"bool","name":"reverted","type":"bool"}],"name":"claimTestInternal","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/test/contracts/bin/claimmocktest.bin b/test/contracts/bin/claimmocktest.bin new file mode 100644 index 00000000..6bb6d170 --- /dev/null +++ b/test/contracts/bin/claimmocktest.bin @@ -0,0 +1 @@ +60c0346100a157601f61072e38819003918201601f19168301916001600160401b038311848410176100a65780849260409485528339810103126100a15780516001600160a01b039182821682036100a1576020015191821682036100a15760a05260805260405161067190816100bd82396080518181816102d5015281816103870152818161046a015261054e015260a05181818161031a01526105c20152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816348f0c6801461006a575080636e53085414610065578063837a84701461006057806383f5b0061461005b57639bee34681461005657600080fd5b610349565b610304565b6102bf565b610217565b346100f45760c03660031901126100f45767ffffffffffffffff6004358181116100f05761009c903690600401610142565b6024358281116100ec576100b4903690600401610142565b916044359081116100ec576100cd903690600401610142565b36608312156100ec576100e9926100e3366101c6565b92610533565b80f35b8380fd5b8280fd5b80fd5b634e487b7160e01b600052604160045260246000fd5b67ffffffffffffffff811161012157604052565b6100f7565b6040810190811067ffffffffffffffff82111761012157604052565b81601f820112156101a55780359067ffffffffffffffff928383116101215760405193601f8401601f19908116603f011685019081118582101761012157604052828452602083830101116101a557816000926020809301838601378301015290565b600080fd5b6024359081151582036101a557565b359081151582036101a557565b90604051916060830183811067ffffffffffffffff821117610121576040528260c49182116101a5576064905b8282106101ff57505050565b6020809161020c846101b9565b8152019101906101f3565b346101a55760803660031901126101a55767ffffffffffffffff6004358181116101a557610249903690600401610142565b906024359081116101a557610262903690600401610142565b36606312156101a5576040519061027882610126565b819260843681116101a5576044945b81861061029c57505061029a9350610467565b005b602080916102a9886101b9565b815201950194610287565b60009103126101a557565b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a557600060403660031901126100f45760043567ffffffffffffffff81116103f75761037c903690600401610142565b816103856101aa565b7f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316803b156100f0576103d793836040518096819582946327e3584360e01b84526004840161043b565b03925af180156103f2576103e9575080f35b6100e99061010d565b61045b565b5080fd5b919082519283825260005b848110610427575050826000602080949584010152601f8019910116010190565b602081830181015184830182015201610406565b906104536020919493946040845260408401906103fb565b931515910152565b6040513d6000823e3d90fd5b917f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031691823b156101a557604051631cf865cf60e01b815260806004820152938492916104d6916104c49060848601906103fb565b848103600319016024860152906103fb565b90600090604484015b60028310610517575050509181600081819503925af180156103f2576105025750565b8061050f6105159261010d565b806102b4565b565b81511515815286945060019290920191602091820191016104df565b91926000906020810151610632575b80516001600160a01b037f000000000000000000000000000000000000000000000000000000000000000081169490911515853b156101a557600061059d91604051809381926327e3584360e01b9b8c84526004840161043b565b0381838a5af19283156103f25760409560208094610aac9460009761061f575b5001917f0000000000000000000000000000000000000000000000000000000000000000165af1500151151590803b156101a55761060e93600080946040519687958694859384526004840161043b565b03925af180156103f2576105025750565b8061050f61062c9261010d565b386105bd565b6001915061054256fea264697066735822122091357ca0b4807d5579dc633a7d2a9263efbfe31944c644c21b7ccf83594a9e2c64736f6c63430008120033 \ No newline at end of file diff --git a/test/contracts/bind.sh b/test/contracts/bind.sh index 957fd956..25ddd782 100755 --- a/test/contracts/bind.sh +++ b/test/contracts/bind.sh @@ -10,4 +10,5 @@ gen() { gen verifybatchesmock gen claimmock -gen claimmockcaller \ No newline at end of file +gen claimmockcaller +gen claimmocktest \ No newline at end of file diff --git a/test/contracts/claimmocktest/ClaimMockTest.sol b/test/contracts/claimmocktest/ClaimMockTest.sol new file mode 100644 index 00000000..81f748a7 --- /dev/null +++ b/test/contracts/claimmocktest/ClaimMockTest.sol @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: AGPL-3.0 + +pragma solidity 0.8.18; + +interface IClaimMock { + event ClaimEvent(uint256 globalIndex, uint32 originNetwork, address originAddress, address destinationAddress, uint256 amount); + function claimAsset(bytes32[32] calldata smtProofLocalExitRoot,bytes32[32] calldata smtProofRollupExitRoot,uint256 globalIndex,bytes32 mainnetExitRoot,bytes32 rollupExitRoot,uint32 originNetwork,address originTokenAddress,uint32 destinationNetwork,address destinationAddress,uint256 amount,bytes calldata metadata) external; + function claimMessage(bytes32[32] calldata smtProofLocalExitRoot,bytes32[32] calldata smtProofRollupExitRoot,uint256 globalIndex,bytes32 mainnetExitRoot,bytes32 rollupExitRoot,uint32 originNetwork,address originAddress,uint32 destinationNetwork,address destinationAddress,uint256 amount,bytes calldata metadata) external; +} + +interface IClaimMockCaller { + function claimAsset(bytes32[32] calldata smtProofLocalExitRoot, bytes32[32] calldata smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originTokenAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes calldata metadata, bool reverted) external; + function claimMessage(bytes32[32] calldata smtProofLocalExitRoot, bytes32[32] calldata smtProofRollupExitRoot, uint256 globalIndex, bytes32 mainnetExitRoot, bytes32 rollupExitRoot, uint32 originNetwork, address originAddress, uint32 destinationNetwork, address destinationAddress, uint256 amount, bytes calldata metadata, bool reverted) external; + function claimBytes(bytes memory claim, bool reverted) external; + function claim2Bytes(bytes memory claim1, bytes memory claim2, bool[2] memory reverted) external; +} + +contract ClaimMockTest { + IClaimMockCaller public immutable claimMockCaller; + IClaimMock public immutable claimMock; + + uint8 constant _DEPOSIT_CONTRACT_TREE_DEPTH = 32; + + constructor( + IClaimMock _claimMock, + IClaimMockCaller _claimMockCaller + ) { + claimMock = _claimMock; + claimMockCaller = _claimMockCaller; + } + + function claimTestInternal(bytes memory claim, bool reverted) external { + claimMockCaller.claimBytes(claim, reverted); + } + + function claim2TestInternal(bytes memory claim1, bytes memory claim2, bool[2] memory reverted) external { + claimMockCaller.claim2Bytes(claim1, claim2, reverted); + } + + function claim3TestInternal(bytes memory claim1, bytes memory claim2, bytes memory claim3, bool[3] memory reverted) external { + address addr = address(claimMock); + uint256 value1 = 0; + if(reverted[1]) { + value1 = 1; + } + claimMockCaller.claimBytes(claim1, reverted[0]); + assembly { + let success1 := call(gas(), addr, value1, add(claim2, 32), 0xaac, 0x20, 0) + } + claimMockCaller.claimBytes(claim3, reverted[2]); + } + +} \ No newline at end of file diff --git a/test/contracts/claimmocktest/claimmocktest.go b/test/contracts/claimmocktest/claimmocktest.go new file mode 100644 index 00000000..2b4494a1 --- /dev/null +++ b/test/contracts/claimmocktest/claimmocktest.go @@ -0,0 +1,328 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package claimmocktest + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// ClaimmocktestMetaData contains all meta data concerning the Claimmocktest contract. +var ClaimmocktestMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"_claimMock\",\"type\":\"address\"},{\"internalType\":\"contractIClaimMockCaller\",\"name\":\"_claimMockCaller\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim1\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim2\",\"type\":\"bytes\"},{\"internalType\":\"bool[2]\",\"name\":\"reverted\",\"type\":\"bool[2]\"}],\"name\":\"claim2TestInternal\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim1\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim2\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"claim3\",\"type\":\"bytes\"},{\"internalType\":\"bool[3]\",\"name\":\"reverted\",\"type\":\"bool[3]\"}],\"name\":\"claim3TestInternal\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMock\",\"outputs\":[{\"internalType\":\"contractIClaimMock\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"claimMockCaller\",\"outputs\":[{\"internalType\":\"contractIClaimMockCaller\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"claim\",\"type\":\"bytes\"},{\"internalType\":\"bool\",\"name\":\"reverted\",\"type\":\"bool\"}],\"name\":\"claimTestInternal\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x60c0346100a157601f61072e38819003918201601f19168301916001600160401b038311848410176100a65780849260409485528339810103126100a15780516001600160a01b039182821682036100a1576020015191821682036100a15760a05260805260405161067190816100bd82396080518181816102d5015281816103870152818161046a015261054e015260a05181818161031a01526105c20152f35b600080fd5b634e487b7160e01b600052604160045260246000fdfe6080604052600436101561001257600080fd5b6000803560e01c90816348f0c6801461006a575080636e53085414610065578063837a84701461006057806383f5b0061461005b57639bee34681461005657600080fd5b610349565b610304565b6102bf565b610217565b346100f45760c03660031901126100f45767ffffffffffffffff6004358181116100f05761009c903690600401610142565b6024358281116100ec576100b4903690600401610142565b916044359081116100ec576100cd903690600401610142565b36608312156100ec576100e9926100e3366101c6565b92610533565b80f35b8380fd5b8280fd5b80fd5b634e487b7160e01b600052604160045260246000fd5b67ffffffffffffffff811161012157604052565b6100f7565b6040810190811067ffffffffffffffff82111761012157604052565b81601f820112156101a55780359067ffffffffffffffff928383116101215760405193601f8401601f19908116603f011685019081118582101761012157604052828452602083830101116101a557816000926020809301838601378301015290565b600080fd5b6024359081151582036101a557565b359081151582036101a557565b90604051916060830183811067ffffffffffffffff821117610121576040528260c49182116101a5576064905b8282106101ff57505050565b6020809161020c846101b9565b8152019101906101f3565b346101a55760803660031901126101a55767ffffffffffffffff6004358181116101a557610249903690600401610142565b906024359081116101a557610262903690600401610142565b36606312156101a5576040519061027882610126565b819260843681116101a5576044945b81861061029c57505061029a9350610467565b005b602080916102a9886101b9565b815201950194610287565b60009103126101a557565b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a55760003660031901126101a5576040517f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03168152602090f35b346101a557600060403660031901126100f45760043567ffffffffffffffff81116103f75761037c903690600401610142565b816103856101aa565b7f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316803b156100f0576103d793836040518096819582946327e3584360e01b84526004840161043b565b03925af180156103f2576103e9575080f35b6100e99061010d565b61045b565b5080fd5b919082519283825260005b848110610427575050826000602080949584010152601f8019910116010190565b602081830181015184830182015201610406565b906104536020919493946040845260408401906103fb565b931515910152565b6040513d6000823e3d90fd5b917f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031691823b156101a557604051631cf865cf60e01b815260806004820152938492916104d6916104c49060848601906103fb565b848103600319016024860152906103fb565b90600090604484015b60028310610517575050509181600081819503925af180156103f2576105025750565b8061050f6105159261010d565b806102b4565b565b81511515815286945060019290920191602091820191016104df565b91926000906020810151610632575b80516001600160a01b037f000000000000000000000000000000000000000000000000000000000000000081169490911515853b156101a557600061059d91604051809381926327e3584360e01b9b8c84526004840161043b565b0381838a5af19283156103f25760409560208094610aac9460009761061f575b5001917f0000000000000000000000000000000000000000000000000000000000000000165af1500151151590803b156101a55761060e93600080946040519687958694859384526004840161043b565b03925af180156103f2576105025750565b8061050f61062c9261010d565b386105bd565b6001915061054256fea264697066735822122091357ca0b4807d5579dc633a7d2a9263efbfe31944c644c21b7ccf83594a9e2c64736f6c63430008120033", +} + +// ClaimmocktestABI is the input ABI used to generate the binding from. +// Deprecated: Use ClaimmocktestMetaData.ABI instead. +var ClaimmocktestABI = ClaimmocktestMetaData.ABI + +// ClaimmocktestBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use ClaimmocktestMetaData.Bin instead. +var ClaimmocktestBin = ClaimmocktestMetaData.Bin + +// DeployClaimmocktest deploys a new Ethereum contract, binding an instance of Claimmocktest to it. +func DeployClaimmocktest(auth *bind.TransactOpts, backend bind.ContractBackend, _claimMock common.Address, _claimMockCaller common.Address) (common.Address, *types.Transaction, *Claimmocktest, error) { + parsed, err := ClaimmocktestMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ClaimmocktestBin), backend, _claimMock, _claimMockCaller) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Claimmocktest{ClaimmocktestCaller: ClaimmocktestCaller{contract: contract}, ClaimmocktestTransactor: ClaimmocktestTransactor{contract: contract}, ClaimmocktestFilterer: ClaimmocktestFilterer{contract: contract}}, nil +} + +// Claimmocktest is an auto generated Go binding around an Ethereum contract. +type Claimmocktest struct { + ClaimmocktestCaller // Read-only binding to the contract + ClaimmocktestTransactor // Write-only binding to the contract + ClaimmocktestFilterer // Log filterer for contract events +} + +// ClaimmocktestCaller is an auto generated read-only Go binding around an Ethereum contract. +type ClaimmocktestCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ClaimmocktestTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ClaimmocktestTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ClaimmocktestFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ClaimmocktestFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ClaimmocktestSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ClaimmocktestSession struct { + Contract *Claimmocktest // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ClaimmocktestCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ClaimmocktestCallerSession struct { + Contract *ClaimmocktestCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ClaimmocktestTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ClaimmocktestTransactorSession struct { + Contract *ClaimmocktestTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ClaimmocktestRaw is an auto generated low-level Go binding around an Ethereum contract. +type ClaimmocktestRaw struct { + Contract *Claimmocktest // Generic contract binding to access the raw methods on +} + +// ClaimmocktestCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ClaimmocktestCallerRaw struct { + Contract *ClaimmocktestCaller // Generic read-only contract binding to access the raw methods on +} + +// ClaimmocktestTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ClaimmocktestTransactorRaw struct { + Contract *ClaimmocktestTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewClaimmocktest creates a new instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktest(address common.Address, backend bind.ContractBackend) (*Claimmocktest, error) { + contract, err := bindClaimmocktest(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Claimmocktest{ClaimmocktestCaller: ClaimmocktestCaller{contract: contract}, ClaimmocktestTransactor: ClaimmocktestTransactor{contract: contract}, ClaimmocktestFilterer: ClaimmocktestFilterer{contract: contract}}, nil +} + +// NewClaimmocktestCaller creates a new read-only instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktestCaller(address common.Address, caller bind.ContractCaller) (*ClaimmocktestCaller, error) { + contract, err := bindClaimmocktest(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ClaimmocktestCaller{contract: contract}, nil +} + +// NewClaimmocktestTransactor creates a new write-only instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktestTransactor(address common.Address, transactor bind.ContractTransactor) (*ClaimmocktestTransactor, error) { + contract, err := bindClaimmocktest(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ClaimmocktestTransactor{contract: contract}, nil +} + +// NewClaimmocktestFilterer creates a new log filterer instance of Claimmocktest, bound to a specific deployed contract. +func NewClaimmocktestFilterer(address common.Address, filterer bind.ContractFilterer) (*ClaimmocktestFilterer, error) { + contract, err := bindClaimmocktest(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ClaimmocktestFilterer{contract: contract}, nil +} + +// bindClaimmocktest binds a generic wrapper to an already deployed contract. +func bindClaimmocktest(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ClaimmocktestMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Claimmocktest *ClaimmocktestRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Claimmocktest.Contract.ClaimmocktestCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Claimmocktest *ClaimmocktestRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimmocktestTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Claimmocktest *ClaimmocktestRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimmocktestTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Claimmocktest *ClaimmocktestCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Claimmocktest.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Claimmocktest *ClaimmocktestTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Claimmocktest.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Claimmocktest *ClaimmocktestTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Claimmocktest.Contract.contract.Transact(opts, method, params...) +} + +// ClaimMock is a free data retrieval call binding the contract method 0x83f5b006. +// +// Solidity: function claimMock() view returns(address) +func (_Claimmocktest *ClaimmocktestCaller) ClaimMock(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Claimmocktest.contract.Call(opts, &out, "claimMock") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// ClaimMock is a free data retrieval call binding the contract method 0x83f5b006. +// +// Solidity: function claimMock() view returns(address) +func (_Claimmocktest *ClaimmocktestSession) ClaimMock() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMock(&_Claimmocktest.CallOpts) +} + +// ClaimMock is a free data retrieval call binding the contract method 0x83f5b006. +// +// Solidity: function claimMock() view returns(address) +func (_Claimmocktest *ClaimmocktestCallerSession) ClaimMock() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMock(&_Claimmocktest.CallOpts) +} + +// ClaimMockCaller is a free data retrieval call binding the contract method 0x837a8470. +// +// Solidity: function claimMockCaller() view returns(address) +func (_Claimmocktest *ClaimmocktestCaller) ClaimMockCaller(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Claimmocktest.contract.Call(opts, &out, "claimMockCaller") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// ClaimMockCaller is a free data retrieval call binding the contract method 0x837a8470. +// +// Solidity: function claimMockCaller() view returns(address) +func (_Claimmocktest *ClaimmocktestSession) ClaimMockCaller() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMockCaller(&_Claimmocktest.CallOpts) +} + +// ClaimMockCaller is a free data retrieval call binding the contract method 0x837a8470. +// +// Solidity: function claimMockCaller() view returns(address) +func (_Claimmocktest *ClaimmocktestCallerSession) ClaimMockCaller() (common.Address, error) { + return _Claimmocktest.Contract.ClaimMockCaller(&_Claimmocktest.CallOpts) +} + +// Claim2TestInternal is a paid mutator transaction binding the contract method 0x6e530854. +// +// Solidity: function claim2TestInternal(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactor) Claim2TestInternal(opts *bind.TransactOpts, claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmocktest.contract.Transact(opts, "claim2TestInternal", claim1, claim2, reverted) +} + +// Claim2TestInternal is a paid mutator transaction binding the contract method 0x6e530854. +// +// Solidity: function claim2TestInternal(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmocktest *ClaimmocktestSession) Claim2TestInternal(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim2TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, reverted) +} + +// Claim2TestInternal is a paid mutator transaction binding the contract method 0x6e530854. +// +// Solidity: function claim2TestInternal(bytes claim1, bytes claim2, bool[2] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactorSession) Claim2TestInternal(claim1 []byte, claim2 []byte, reverted [2]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim2TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, reverted) +} + +// Claim3TestInternal is a paid mutator transaction binding the contract method 0x48f0c680. +// +// Solidity: function claim3TestInternal(bytes claim1, bytes claim2, bytes claim3, bool[3] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactor) Claim3TestInternal(opts *bind.TransactOpts, claim1 []byte, claim2 []byte, claim3 []byte, reverted [3]bool) (*types.Transaction, error) { + return _Claimmocktest.contract.Transact(opts, "claim3TestInternal", claim1, claim2, claim3, reverted) +} + +// Claim3TestInternal is a paid mutator transaction binding the contract method 0x48f0c680. +// +// Solidity: function claim3TestInternal(bytes claim1, bytes claim2, bytes claim3, bool[3] reverted) returns() +func (_Claimmocktest *ClaimmocktestSession) Claim3TestInternal(claim1 []byte, claim2 []byte, claim3 []byte, reverted [3]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim3TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, claim3, reverted) +} + +// Claim3TestInternal is a paid mutator transaction binding the contract method 0x48f0c680. +// +// Solidity: function claim3TestInternal(bytes claim1, bytes claim2, bytes claim3, bool[3] reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactorSession) Claim3TestInternal(claim1 []byte, claim2 []byte, claim3 []byte, reverted [3]bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.Claim3TestInternal(&_Claimmocktest.TransactOpts, claim1, claim2, claim3, reverted) +} + +// ClaimTestInternal is a paid mutator transaction binding the contract method 0x9bee3468. +// +// Solidity: function claimTestInternal(bytes claim, bool reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactor) ClaimTestInternal(opts *bind.TransactOpts, claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmocktest.contract.Transact(opts, "claimTestInternal", claim, reverted) +} + +// ClaimTestInternal is a paid mutator transaction binding the contract method 0x9bee3468. +// +// Solidity: function claimTestInternal(bytes claim, bool reverted) returns() +func (_Claimmocktest *ClaimmocktestSession) ClaimTestInternal(claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimTestInternal(&_Claimmocktest.TransactOpts, claim, reverted) +} + +// ClaimTestInternal is a paid mutator transaction binding the contract method 0x9bee3468. +// +// Solidity: function claimTestInternal(bytes claim, bool reverted) returns() +func (_Claimmocktest *ClaimmocktestTransactorSession) ClaimTestInternal(claim []byte, reverted bool) (*types.Transaction, error) { + return _Claimmocktest.Contract.ClaimTestInternal(&_Claimmocktest.TransactOpts, claim, reverted) +} diff --git a/test/contracts/compile.sh b/test/contracts/compile.sh index faeba125..7dd357a9 100755 --- a/test/contracts/compile.sh +++ b/test/contracts/compile.sh @@ -13,5 +13,12 @@ mv -f ClaimMock.bin bin/claimmock.bin docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmockcaller/ClaimMockCaller.sol -o /contracts --abi --bin --overwrite --optimize --via-ir mv -f ClaimMockCaller.abi abi/claimmockcaller.abi mv -f ClaimMockCaller.bin bin/claimmockcaller.bin + +docker run --rm -v $(pwd):/contracts ethereum/solc:0.8.18-alpine - /contracts/claimmocktest/ClaimMockTest.sol -o /contracts --abi --bin --overwrite --optimize --via-ir +mv -f ClaimMockTest.abi abi/claimmocktest.abi +mv -f ClaimMockTest.bin bin/claimmocktest.bin + rm -f IClaimMock.abi -rm -f IClaimMock.bin \ No newline at end of file +rm -f IClaimMock.bin +rm -f IClaimMockCaller.abi +rm -f IClaimMockCaller.bin \ No newline at end of file diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go deleted file mode 100644 index 8b5073fb..00000000 --- a/test/helpers/aggoracle_e2e.go +++ /dev/null @@ -1,390 +0,0 @@ -package helpers - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" - gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" - gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" - "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/aggoracle/chaingersender" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/stretchr/testify/require" -) - -const ( - NetworkIDL2 = uint32(1) - chainID = 1337 - initialBalance = "10000000000000000000000000" - blockGasLimit = uint64(999999999999999999) - syncBlockChunkSize = 10 - retries = 3 - periodRetry = time.Millisecond * 100 -) - -type AggoracleWithEVMChainEnv struct { - L1Client *simulated.Backend - L2Client *simulated.Backend - L1InfoTreeSync *l1infotreesync.L1InfoTreeSync - GERL1Contract *gerContractL1.Globalexitrootnopush0 - GERL1Addr common.Address - GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 - GERL2Addr common.Address - AuthL1 *bind.TransactOpts - AuthL2 *bind.TransactOpts - AggOracle *aggoracle.AggOracle - AggOracleSender aggoracle.ChainSender - ReorgDetector *reorgdetector.ReorgDetector - BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 - BridgeL1Addr common.Address - BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 - BridgeL2Addr common.Address - NetworkIDL2 uint32 - EthTxManMockL2 *EthTxManagerMock -} - -func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { - t.Helper() - - ctx := context.Background() - l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) - sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) - oracle, err := aggoracle.New(log.GetDefaultLogger(), sender, l1Client.Client(), syncer, etherman.LatestBlock, time.Millisecond*20) //nolint:mnd - require.NoError(t, err) - go oracle.Start(ctx) - - return &AggoracleWithEVMChainEnv{ - L1Client: l1Client, - L2Client: l2Client, - L1InfoTreeSync: syncer, - GERL1Contract: gerL1Contract, - GERL1Addr: gerL1Addr, - GERL2Contract: gerL2Contract, - GERL2Addr: gerL2Addr, - AuthL1: authL1, - AuthL2: authL2, - AggOracle: oracle, - AggOracleSender: sender, - ReorgDetector: rd, - BridgeL1Contract: bridgeL1Contract, - BridgeL1Addr: bridgeL1Addr, - BridgeL2Contract: bridgeL2Contract, - BridgeL2Addr: bridgeL2Addr, - NetworkIDL2: NetworkIDL2, - EthTxManMockL2: ethTxManMockL2, - } -} - -func CommonSetup(t *testing.T) ( - *simulated.Backend, - *l1infotreesync.L1InfoTreeSync, - *gerContractL1.Globalexitrootnopush0, - common.Address, - *polygonzkevmbridgev2.Polygonzkevmbridgev2, - common.Address, - *bind.TransactOpts, - *reorgdetector.ReorgDetector, -) { - t.Helper() - - // Config and spin up - ctx := context.Background() - // Simulated L1 - privateKeyL1, err := crypto.GenerateKey() - require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - require.NoError(t, err) - l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1) - require.NoError(t, err) - // Reorg detector - dbPathReorgDetector := t.TempDir() - reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) - require.NoError(t, err) - // Syncer - dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, syncBlockChunkSize, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0, periodRetry, retries) - require.NoError(t, err) - go syncer.Start(ctx) - - return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg -} - -func EVMSetup(t *testing.T) ( - aggoracle.ChainSender, - *simulated.Backend, - *gerContractEVMChain.Pessimisticglobalexitrootnopush0, - common.Address, - *polygonzkevmbridgev2.Polygonzkevmbridgev2, - common.Address, - *bind.TransactOpts, - *EthTxManagerMock, -) { - t.Helper() - - privateKeyL2, err := crypto.GenerateKey() - require.NoError(t, err) - authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(chainID)) - require.NoError(t, err) - l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) - require.NoError(t, err) - ethTxManMock := NewEthTxManMock(t, l2Client, authL2) - sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), - gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd - require.NoError(t, err) - - return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock -} - -func newSimulatedL1(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - gerContract *gerContractL1.Globalexitrootnopush0, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - - privateKeyL1, err := crypto.GenerateKey() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) - } - - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) - } - - balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - authDeployer.From: { - Balance: balance, - }, - } - - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err) - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - - dataCallProxy, err := bridgeABI.Pack("initialize", - uint32(0), // networkIDMainnet - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err) - } - - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) - } - client.Commit() - - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err) - } - - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false}) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) - } - if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error deploying bridge, unexpected GER addr. Expected %s. Actual %s", - precalculatedAddr.Hex(), checkGERAddr.Hex(), - ) - } - - gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), auth.From, bridgeAddr) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) - } - client.Commit() - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error calculating GER address. Expected %s. Actual %s", - precalculatedAddr.Hex(), gerAddr.Hex(), - ) - } - - return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil -} - -func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - - privateKeyL1, err := crypto.GenerateKey() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) - } - - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) - } - - balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd - address := auth.From - precalculatedBridgeAddr := crypto.CreateAddress(authDeployer.From, 1) - - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - authDeployer.From: { - Balance: balance, - }, - precalculatedBridgeAddr: { - Balance: balance, - }, - } - - const blockGasLimit = uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy bridge implementation: %w", err) - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - - dataCallProxy, err := bridgeABI.Pack("initialize", - NetworkIDL2, - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to pack data for proxy initialization: %w", err) - } - - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) - } - if bridgeAddr != precalculatedBridgeAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error calculating bridge addr. Expected: %s. Actual: %s", - precalculatedBridgeAddr, bridgeAddr, - ) - } - client.Commit() - - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create bridge contract instance: %w", err) - } - - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get Global Exit Root Manager: %w", err) - } - if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, errors.New( - "error deploying bridge, unexpected GER Manager address", - ) - } - - gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0(authDeployer, client.Client(), auth.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) - } - client.Commit() - - globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") - _, err = gerContract.GrantRole(authDeployer, globalExitRootSetterRole, auth.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to grant role to GER contract: %w", err) - } - client.Commit() - - hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, auth.From) - if !hasRole { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to set role for GER contract") - } - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("error calculating GER address") - } - - return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil -} diff --git a/test/helpers/common-setup.bash b/test/helpers/common-setup.bash index b7691366..dac81beb 100644 --- a/test/helpers/common-setup.bash +++ b/test/helpers/common-setup.bash @@ -3,11 +3,24 @@ _common_setup() { bats_load_library 'bats-support' bats_load_library 'bats-assert' - + # get the containing directory of this file # use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0, # as those will point to the bats executable's location or the preprocessed file respectively - PROJECT_ROOT="$( cd "$( dirname "$BATS_TEST_FILENAME" )/.." >/dev/null 2>&1 && pwd )" + PROJECT_ROOT="$(cd "$(dirname "$BATS_TEST_FILENAME")/.." >/dev/null 2>&1 && pwd)" # make executables in src/ visible to PATH PATH="$PROJECT_ROOT/src:$PATH" + + # ERC20 contracts function signatures + readonly mint_fn_sig="function mint(address,uint256)" + readonly balance_of_fn_sig="function balanceOf(address) (uint256)" + readonly approve_fn_sig="function approve(address,uint256)" + + + # Kurtosis enclave and service identifiers + readonly enclave=${KURTOSIS_ENCLAVE:-cdk} + readonly contracts_container=${KURTOSIS_CONTRACTS:-contracts-001} + readonly contracts_service_wrapper=${KURTOSIS_CONTRACTS_WRAPPER:-"kurtosis service exec $enclave $contracts_container"} + readonly erigon_rpc_node=${KURTOSIS_ERIGON_RPC:-cdk-erigon-node-001} + readonly l2_rpc_url=${L2_ETH_RPC_URL:-"$(kurtosis port print $enclave $erigon_rpc_node rpc)"} } diff --git a/test/helpers/common.bash b/test/helpers/common.bash index 15057d17..a5ed751c 100644 --- a/test/helpers/common.bash +++ b/test/helpers/common.bash @@ -1,12 +1,13 @@ #!/usr/bin/env bash -function deployContract() { - local private_key="$1" - local contract_artifact="$2" +function deploy_contract() { + local rpc_url="$1" + local private_key="$2" + local contract_artifact="$3" # Check if rpc_url is available if [[ -z "$rpc_url" ]]; then - echo "Error: rpc_url environment variable is not set." + echo "Error: rpc_url parameter is not set." return 1 fi @@ -16,13 +17,13 @@ function deployContract() { fi # Get the sender address - local senderAddr=$(cast wallet address "$private_key") + local sender=$(cast wallet address "$private_key") if [[ $? -ne 0 ]]; then echo "Error: Failed to retrieve sender address." return 1 fi - echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $senderAddr)" >&3 + echo "Attempting to deploy contract artifact '$contract_artifact' to $rpc_url (sender: $sender)" >&3 # Get bytecode from the contract artifact local bytecode=$(jq -r .bytecode "$contract_artifact") @@ -32,8 +33,15 @@ function deployContract() { fi # Send the transaction and capture the output + gas_price=$(cast gas-price --rpc-url "$rpc_url") + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi local cast_output=$(cast send --rpc-url "$rpc_url" \ --private-key "$private_key" \ + --gas-price $comp_gas_price \ --legacy \ --create "$bytecode" \ 2>&1) @@ -69,98 +77,141 @@ function deployContract() { return 0 } -function sendTx() { - # Check if at least 3 arguments are provided - if [[ $# -lt 3 ]]; then - echo "Usage: sendTx [ ...]" +function send_tx() { + # Check if at least 4 arguments are provided + if [[ $# -lt 4 ]]; then + echo "Usage: send_tx [ ...]" return 1 fi - local private_key="$1" # Sender private key - local account_addr="$2" # Receiver address - local value_or_function_sig="$3" # Value or function signature + local rpc_url="$1" # RPC URL + local private_key="$2" # Sender private key + local receiver_addr="$3" # Receiver address + local value_or_function_sig="$4" # Value or function signature # Error handling: Ensure the receiver is a valid Ethereum address - if [[ ! "$account_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then - echo "Error: Invalid receiver address '$account_addr'." + if [[ ! "$receiver_addr" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$receiver_addr'." return 1 fi - shift 3 # Shift the first 3 arguments (private_key, account_addr, value_or_function_sig) + shift 4 # Shift the first 4 arguments (rpc_url, private_key, receiver_addr, value_or_function_sig) + local params=("$@") # Collect all remaining arguments as function parameters + + # Get sender address from private key + local sender + sender=$(cast wallet address "$private_key") || { + echo "Error: Failed to extract the sender address." + return 1 + } + + # Check if the value_or_function_sig is a numeric value (Ether to be transferred) + if [[ "$value_or_function_sig" =~ ^[0-9]+(\.[0-9]+)?(ether)?$ ]]; then + # Case: Ether transfer (EOA transaction) + # Get initial ether balances of sender and receiver + local sender_addr=$(cast wallet address --private-key "$private_key") + local sender_initial_balance receiver_initial_balance + sender_initial_balance=$(cast balance "$sender_addr" --ether --rpc-url "$rpc_url") || return 1 + receiver_initial_balance=$(cast balance "$receiver_addr" --ether --rpc-url "$rpc_url") || return 1 + + send_eoa_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "$sender_addr" "$sender_initial_balance" "$receiver_initial_balance" + else + # Case: Smart contract interaction (contract interaction with function signature and parameters) + send_smart_contract_transaction "$private_key" "$receiver_addr" "$value_or_function_sig" "${params[@]}" + fi +} - local senderAddr - senderAddr=$(cast wallet address "$private_key") +function send_eoa_transaction() { + local private_key="$1" + local receiver_addr="$2" + local value="$3" + local sender="$4" + local sender_initial_balance="$5" + local receiver_initial_balance="$6" + + echo "Sending EOA transaction (from: $sender, rpc url: $rpc_url) to: $receiver_addr with value: $value" >&3 + + # Send transaction via cast + local cast_output tx_hash + gas_price=$(cast gas-price --rpc-url "$rpc_url") + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi + echo "cast send --gas-price $comp_gas_price --rpc-url $rpc_url --private-key $private_key $receiver_addr --value $value --legacy" >&3 + cast_output=$(cast send --gas-price $comp_gas_price --rpc-url "$rpc_url" --private-key "$private_key" "$receiver_addr" --value "$value" --legacy 2>&1) if [[ $? -ne 0 ]]; then - echo "Error: Failed to extract the sender address for $private_key" + echo "Error: Failed to send transaction. Output:" + echo "$cast_output" return 1 fi - # Check if the first remaining argument is a numeric value (Ether to be transferred) - if [[ "$value_or_function_sig" =~ ^[0-9]+(ether)?$ ]]; then - # Case: EOA transaction (Ether transfer) - echo "Sending EOA transaction (RPC URL: $rpc_url, sender: $senderAddr) to: $account_addr " \ - "with value: $value_or_function_sig" >&3 + tx_hash=$(extract_tx_hash "$cast_output") + [[ -z "$tx_hash" ]] && { + echo "Error: Failed to extract transaction hash." + return 1 + } - cast_output=$(cast send --rpc-url "$rpc_url" \ - --private-key "$private_key" \ - "$account_addr" --value "$value_or_function_sig" \ - --legacy \ - 2>&1) - else - # Case: Smart contract transaction (contract interaction with function signature and parameters) - local params=("$@") # Collect all remaining arguments as function parameters + check_balances "$sender" "$receiver_addr" "$value" "$tx_hash" "$sender_initial_balance" "$receiver_initial_balance" + if [[ $? -ne 0 ]]; then + echo "Error: Balance not updated correctly." + return 1 + fi - echo "Function signature: '$value_or_function_sig'" >&3 + echo "Transaction successful (transaction hash: $tx_hash)" +} - # Verify if the function signature starts with "function" - if [[ ! "$value_or_function_sig" =~ ^function\ .+\(.+\)$ ]]; then - echo "Error: Invalid function signature format '$value_or_function_sig'." - return 1 - fi +function send_smart_contract_transaction() { + local private_key="$1" + local receiver_addr="$2" + local function_sig="$3" + shift 3 + local params=("$@") - echo "Sending smart contract transaction (RPC URL: $rpc_url, sender: $senderAddr) to $account_addr" \ - "with function signature: '$value_or_function_sig' and params: ${params[*]}" >&3 + echo "Sending smart contract transaction to $receiver_addr with function signature: '$function_sig' and params: ${params[*]}" >&3 - # Send the smart contract interaction using cast - cast_output=$(cast send --rpc-url "$rpc_url" \ - --private-key "$private_key" \ - "$account_addr" "$value_or_function_sig" "${params[@]}" \ - --legacy \ - 2>&1) + # Send the smart contract interaction using cast + local cast_output tx_hash + gas_price=$(cast gas-price --rpc-url "$rpc_url") + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 fi - - # Check if the transaction was successful + cast_output=$(cast send "$receiver_addr" --rpc-url "$rpc_url" --private-key "$private_key" --gas-price $comp_gas_price --legacy "$function_sig" "${params[@]}" 2>&1) if [[ $? -ne 0 ]]; then - echo "Error: Failed to send transaction. The cast send output:" + echo "Error: Failed to send transaction. Output:" echo "$cast_output" return 1 fi - # Extract the transaction hash from the output - local tx_hash=$(echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1) - echo "Tx hash: $tx_hash" - - if [[ -z "$tx_hash" ]]; then + tx_hash=$(extract_tx_hash "$cast_output") + [[ -z "$tx_hash" ]] && { echo "Error: Failed to extract transaction hash." return 1 - fi + } echo "Transaction successful (transaction hash: $tx_hash)" +} - return 0 +function extract_tx_hash() { + local cast_output="$1" + echo "$cast_output" | grep 'transactionHash' | awk '{print $2}' | tail -n 1 } -function queryContract() { - local addr="$1" # Contract address - local funcSignature="$2" # Function signature - shift 2 # Shift past the first two arguments +function query_contract() { + local rpc_url="$1" # RPC URL + local addr="$2" # Contract address + local funcSignature="$3" # Function signature + shift 3 # Shift past the first 3 arguments local params=("$@") # Collect remaining arguments as parameters array echo "Querying state of $addr account (RPC URL: $rpc_url) with function signature: '$funcSignature' and params: ${params[*]}" >&3 - # Check if rpc_url is available + # Check if rpc url is available if [[ -z "$rpc_url" ]]; then - echo "Error: rpc_url environment variable is not set." + echo "Error: rpc_url parameter is not provided." return 1 fi @@ -186,3 +237,110 @@ function queryContract() { return 0 } + +function check_balances() { + local sender="$1" + local receiver="$2" + local amount="$3" + local tx_hash="$4" + local sender_initial_balance="$5" + local receiver_initial_balance="$6" + + # Ethereum address regex: 0x followed by 40 hexadecimal characters + if [[ ! "$sender" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid sender address '$sender'." + return 1 + fi + + if [[ ! "$receiver" =~ ^0x[a-fA-F0-9]{40}$ ]]; then + echo "Error: Invalid receiver address '$receiver'." + return 1 + fi + + # Transaction hash regex: 0x followed by 64 hexadecimal characters + if [[ ! "$tx_hash" =~ ^0x[a-fA-F0-9]{64}$ ]]; then + echo "Error: Invalid transaction hash: $tx_hash". + return 1 + fi + + local sender_final_balance=$(cast balance "$sender" --ether --rpc-url "$rpc_url") || return 1 + local tx_output=$(cast tx "$tx_hash" --rpc-url "$rpc_url") + local gas_used=$(tx_output | grep '^gas ' | awk '{print $2}') + local gas_price=$(tx_output | grep '^gasPrice' | awk '{print $2}') + local gas_fee=$(echo "$gas_used * $gas_price" | bc) + local gas_fee_in_ether=$(cast to-unit "$gas_fee" ether) + + local sender_balance_change=$(echo "$sender_initial_balance - $sender_final_balance" | bc) + echo "Sender balance changed by: '$sender_balance_change' wei" + echo "Gas fee paid: '$gas_fee_in_ether' ether" + + local receiver_final_balance=$(cast balance "$receiver" --ether --rpc-url "$rpc_url") || return 1 + local receiver_balance_change=$(echo "$receiver_final_balance - $receiver_initial_balance" | bc) + echo "Receiver balance changed by: '$receiver_balance_change' wei" + + # Trim 'ether' suffix from amount to get the numeric part + local value_in_ether=$(echo "$amount" | sed 's/ether$//') + + if ! echo "$receiver_balance_change == $value_in_ether" | bc -l; then + echo "Error: receiver balance updated incorrectly. Expected: $value_in_ether, Actual: $receiver_balance_change" + return 1 + fi + + # Calculate expected sender balance change + local expected_sender_change=$(echo "$value_in_ether + $gas_fee_in_ether" | bc) + if ! echo "$sender_balance_change == $expected_sender_change" | bc -l; then + echo "Error: sender balance updated incorrectly. Expected: $expected_sender_change, Actual: $sender_balance_change" + return 1 + fi +} + +function verify_native_token_balance() { + local rpc_url="$1" # RPC URL + local account="$2" # account address + local initial_balance="$3" # initial balance in Ether (decimal) + local ether_amount="$4" # amount to be added (in Ether, decimal) + + # Convert initial balance and amount to wei (no decimals) + local initial_balance_wei=$(cast --to-wei "$initial_balance") + + # Trim 'ether' from ether_amount if it exists + ether_amount=$(echo "$ether_amount" | sed 's/ether//') + local amount_wei=$(cast --to-wei "$ether_amount") + + # Get final balance in wei (after the operation) + local final_balance_wei=$(cast balance "$account" --rpc-url "$rpc_url" | awk '{print $1}') + + # Calculate expected final balance (initial_balance + amount) + local expected_final_balance_wei=$(echo "$initial_balance_wei + $amount_wei" | bc) + + # Check if final_balance matches the expected final balance + if [ "$(echo "$final_balance_wei == $expected_final_balance_wei" | bc)" -eq 1 ]; then + echo "✅ Balance verification successful: final balance is correct." + else + echo "❌ Balance verification failed: expected $expected_final_balance_wei but got $final_balance_wei." + exit 1 + fi +} + +function mint_erc20_tokens() { + local rpc_url="$1" # The L1 RPC URL + local erc20_token_addr="$2" # The gas token contract address + local minter_private_key="$3" # The minter private key + local receiver="$4" # The receiver address (for minted tokens) + local tokens_amount="$5" # The amount of tokens to transfer (e.g., "0.1ether") + + # Query the erc20 token balance of the sender + run query_contract "$rpc_url" "$erc20_token_addr" "$balance_of_fn_sig" "$sender_addr" + assert_success + local erc20_token_balance=$(echo "$output" | tail -n 1) + + # Log the account's current gas token balance + echo "Initial account balance: $erc20_token_balance wei" >&3 + + # Convert tokens_amount to Wei for comparison + local wei_amount=$(cast --to-unit "$tokens_amount" wei) + + # Mint the required tokens by sending a transaction + run send_tx "$rpc_url" "$minter_private_key" "$erc20_token_addr" "$mint_fn_sig" "$receiver" "$tokens_amount" + assert_success +} diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go index b6753c22..0c8ee0f8 100644 --- a/test/helpers/ethtxmanmock_e2e.go +++ b/test/helpers/ethtxmanmock_e2e.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/0xPolygon/cdk/log" - "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -24,13 +24,13 @@ func NewEthTxManMock( const ( ArgToIndex = 1 - ArgDataIndex = 4 + ArgDataIndex = 3 ZeroValue = 0 ) ethTxMock := NewEthTxManagerMock(t) ethTxMock.On( - "Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + "Add", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Run(func(args mock.Arguments) { ctx := context.Background() nonce, err := client.Client().PendingNonceAt(ctx, auth.From) @@ -97,7 +97,7 @@ func NewEthTxManMock( Return(common.Hash{}, nil) // res, err := c.ethTxMan.Result(ctx, id) ethTxMock.On("Result", mock.Anything, mock.Anything). - Return(ethtxmanager.MonitoredTxResult{Status: ethtxmanager.MonitoredTxStatusMined}, nil) + Return(ethtxtypes.MonitoredTxResult{Status: ethtxtypes.MonitoredTxStatusMined}, nil) return ethTxMock } diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index bbaf45e1..700e7ad2 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -1,25 +1,26 @@ #!/usr/bin/env bash # Error code reference https://hackmd.io/WwahVBZERJKdfK3BbKxzQQ -function deposit () { +function deposit() { readonly deposit_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - echo "Checking the current ETH balance: " >&3 - cast balance -e --rpc-url $l1_rpc_url $current_addr >&3 + echo "The ETH balance for sender "$sender_addr":" >&3 + cast balance -e --rpc-url $l1_rpc_url $sender_addr >&3 else - echo "Checking the current token balance for token at $token_addr: " >&3 - cast call --rpc-url $l1_rpc_url $token_addr 'balanceOf(address)(uint256)' $current_addr >&3 + echo "The "$token_addr" token balance for sender "$sender_addr":" >&3 + balance_wei=$(cast call --rpc-url "$l1_rpc_url" "$token_addr" "$balance_of_fn_sig" "$sender_addr") + echo "$(cast --from-wei "$balance_wei")" >&3 fi - echo "Attempting to deposit $amount wei to net $destination_net for token $token_addr" >&3 + echo "Attempting to deposit $amount [wei] to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$l1_rpc_url)" >&3 if [[ $dry_run == "true" ]]; then cast calldata $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then - cast send --legacy --private-key $skey --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else - cast send --legacy --private-key $skey --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + cast send --legacy --private-key $sender_private_key --rpc-url $l1_rpc_url $bridge_addr $deposit_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes fi fi } @@ -30,7 +31,7 @@ function claim() { readonly claimable_deposit_file=$(mktemp) echo "Getting full list of deposits" >&3 curl -s "$bridge_api_url/bridges/$destination_addr?limit=100&offset=0" | jq '.' | tee $bridge_deposit_file - + echo "Looking for claimable deposits" >&3 jq '[.deposits[] | select(.ready_for_claim == true and .claim_tx_hash == "" and .dest_net == '$destination_net')]' $bridge_deposit_file | tee $claimable_deposit_file readonly claimable_count=$(jq '. | length' $claimable_deposit_file) @@ -40,10 +41,11 @@ function claim() { echo "We have no claimable deposits at this time" >&3 exit 1 fi - + echo "We have $claimable_count claimable deposits on network $destination_net. Let's get this party started." >&3 readonly current_deposit=$(mktemp) readonly current_proof=$(mktemp) + local gas_price_factor=1 while read deposit_idx; do echo "Starting claim for tx index: "$deposit_idx >&3 echo "Deposit info:" >&3 @@ -69,9 +71,37 @@ function claim() { cast calldata $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata cast call --rpc-url $l2_rpc_url $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata else - cast send --legacy --rpc-url $l2_rpc_url --private-key $skey $bridge_addr $claim_sig "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + local comp_gas_price=$(bc -l <<< "$gas_price * 1.5" | sed 's/\..*//') + if [[ $? -ne 0 ]]; then + echo "Failed to calculate gas price" >&3 + exit 1 + fi + + echo "cast send --legacy --gas-price $comp_gas_price --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr \"$claim_sig\" \"$in_merkle_proof\" \"$in_rollup_merkle_proof\" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata" >&3 + cast send --legacy --gas-price $comp_gas_price --rpc-url $l2_rpc_url --private-key $sender_private_key $bridge_addr "$claim_sig" "$in_merkle_proof" "$in_rollup_merkle_proof" $in_global_index $in_main_exit_root $in_rollup_exit_root $in_orig_net $in_orig_addr $in_dest_net $in_dest_addr $in_amount $in_metadata + fi + + done < <(seq 0 $((claimable_count - 1))) +} + +function wait_for_claim() { + local timeout="$1" # timeout (in seconds) + local claim_frequency="$2" # claim frequency (in seconds) + local start_time=$(date +%s) + local end_time=$((start_time + timeout)) + + while true; do + local current_time=$(date +%s) + if ((current_time > end_time)); then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached!" + exit 1 fi + run claim + if [ $status -eq 0 ]; then + break + fi - done < <(seq 0 $((claimable_count - 1)) ) + sleep "$claim_frequency" + done } diff --git a/test/helpers/mock_ethtxmanager.go b/test/helpers/mock_ethtxmanager.go index 848992f4..adb21f26 100644 --- a/test/helpers/mock_ethtxmanager.go +++ b/test/helpers/mock_ethtxmanager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.45.0. DO NOT EDIT. package helpers @@ -9,7 +9,7 @@ import ( context "context" - ethtxmanager "github.com/0xPolygonHermez/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" mock "github.com/stretchr/testify/mock" @@ -21,9 +21,9 @@ type EthTxManagerMock struct { mock.Mock } -// Add provides a mock function with given fields: ctx, to, forcedNonce, value, data, gasOffset, sidecar -func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, forcedNonce *uint64, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { - ret := _m.Called(ctx, to, forcedNonce, value, data, gasOffset, sidecar) +// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar +func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) if len(ret) == 0 { panic("no return value specified for Add") @@ -31,19 +31,19 @@ func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, forcedN var r0 common.Hash var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { - return rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar) } - if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { - r0 = rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(common.Hash) } } - if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *uint64, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { - r1 = rf(ctx, to, forcedNonce, value, data, gasOffset, sidecar) + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar) } else { r1 = ret.Error(1) } @@ -70,22 +70,22 @@ func (_m *EthTxManagerMock) Remove(ctx context.Context, id common.Hash) error { } // Result provides a mock function with given fields: ctx, id -func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxmanager.MonitoredTxResult, error) { +func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) { ret := _m.Called(ctx, id) if len(ret) == 0 { panic("no return value specified for Result") } - var r0 ethtxmanager.MonitoredTxResult + var r0 ethtxtypes.MonitoredTxResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxmanager.MonitoredTxResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (ethtxtypes.MonitoredTxResult, error)); ok { return rf(ctx, id) } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxmanager.MonitoredTxResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ethtxtypes.MonitoredTxResult); ok { r0 = rf(ctx, id) } else { - r0 = ret.Get(0).(ethtxmanager.MonitoredTxResult) + r0 = ret.Get(0).(ethtxtypes.MonitoredTxResult) } if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { @@ -98,27 +98,27 @@ func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (ethtxma } // ResultsByStatus provides a mock function with given fields: ctx, statuses -func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error) { +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) { ret := _m.Called(ctx, statuses) if len(ret) == 0 { panic("no return value specified for ResultsByStatus") } - var r0 []ethtxmanager.MonitoredTxResult + var r0 []ethtxtypes.MonitoredTxResult var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []ethtxmanager.MonitoredTxStatus) ([]ethtxmanager.MonitoredTxResult, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error)); ok { return rf(ctx, statuses) } - if rf, ok := ret.Get(0).(func(context.Context, []ethtxmanager.MonitoredTxStatus) []ethtxmanager.MonitoredTxResult); ok { + if rf, ok := ret.Get(0).(func(context.Context, []ethtxtypes.MonitoredTxStatus) []ethtxtypes.MonitoredTxResult); ok { r0 = rf(ctx, statuses) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]ethtxmanager.MonitoredTxResult) + r0 = ret.Get(0).([]ethtxtypes.MonitoredTxResult) } } - if rf, ok := ret.Get(1).(func(context.Context, []ethtxmanager.MonitoredTxStatus) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []ethtxtypes.MonitoredTxStatus) error); ok { r1 = rf(ctx, statuses) } else { r1 = ret.Error(1) diff --git a/test/helpers/simulated.go b/test/helpers/simulated.go index eb4cab20..d85baf92 100644 --- a/test/helpers/simulated.go +++ b/test/helpers/simulated.go @@ -1,8 +1,24 @@ package helpers import ( + "math/big" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" + "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" +) + +const ( + defaultBlockGasLimit = uint64(999999999999999999) + defaultBalance = "10000000000000000000000000" + chainID = 1337 ) type ClientRenamed simulated.Client @@ -14,3 +30,106 @@ type TestClient struct { func (tc TestClient) Client() *rpc.Client { return nil } + +// SimulatedBackendSetup defines the setup for a simulated backend. +type SimulatedBackendSetup struct { + UserAuth *bind.TransactOpts + DeployerAuth *bind.TransactOpts + EBZkevmBridgeAddr common.Address + EBZkevmBridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + EBZkevmBridgeProxyAddr common.Address + EBZkevmBridgeProxyContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 +} + +// SimulatedBackend creates a simulated backend with two accounts: user and deployer. +func SimulatedBackend( + t *testing.T, + balances map[common.Address]types.Account, + ebZkevmBridgeNetwork uint32, +) (*simulated.Backend, *SimulatedBackendSetup) { + t.Helper() + + // Define default balance + balance, ok := new(big.Int).SetString(defaultBalance, 10) //nolint:mnd + require.Truef(t, ok, "failed to set balance") + + // Create user + userPK, err := crypto.GenerateKey() + require.NoError(t, err) + userAuth, err := bind.NewKeyedTransactorWithChainID(userPK, big.NewInt(chainID)) + require.NoError(t, err) + + // Create deployer + deployerPK, err := crypto.GenerateKey() + require.NoError(t, err) + deployerAuth, err := bind.NewKeyedTransactorWithChainID(deployerPK, big.NewInt(chainID)) + require.NoError(t, err) + precalculatedBridgeAddr := crypto.CreateAddress(deployerAuth.From, 1) + + // Define balances map + if balances == nil { + balances = make(map[common.Address]types.Account) + } + balances[userAuth.From] = types.Account{Balance: balance} + balances[deployerAuth.From] = types.Account{Balance: balance} + balances[precalculatedBridgeAddr] = types.Account{Balance: balance} + + client := simulated.NewBackend(balances, simulated.WithBlockGasLimit(defaultBlockGasLimit)) + + // Mine the first block + client.Commit() + + // MUST BE DEPLOYED FIRST + // Deploy zkevm bridge contract + ebZkevmBridgeAddr, _, ebZkevmBridgeContract, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(deployerAuth, client.Client()) + require.NoError(t, err) + client.Commit() + + // Create proxy contract for the bridge + var ebZkevmBridgeProxyAddr common.Address + var ebZkevmBridgeProxyContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + { + precalculatedAddr := crypto.CreateAddress(deployerAuth.From, 2) //nolint:mnd + + bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() + require.NoError(t, err) + require.NotNil(t, bridgeABI) + + dataCallProxy, err := bridgeABI.Pack("initialize", + ebZkevmBridgeNetwork, + common.Address{}, // gasTokenAddressMainnet + uint32(0), // gasTokenNetworkMainnet + precalculatedAddr, + common.Address{}, + []byte{}, // gasTokenMetadata + ) + require.NoError(t, err) + + ebZkevmBridgeProxyAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( + deployerAuth, + client.Client(), + ebZkevmBridgeAddr, + deployerAuth.From, + dataCallProxy, + ) + require.NoError(t, err) + require.Equal(t, precalculatedBridgeAddr, ebZkevmBridgeProxyAddr) + client.Commit() + + ebZkevmBridgeProxyContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(ebZkevmBridgeProxyAddr, client.Client()) + require.NoError(t, err) + + checkGERAddr, err := ebZkevmBridgeProxyContract.GlobalExitRootManager(&bind.CallOpts{}) + require.NoError(t, err) + require.Equal(t, precalculatedAddr, checkGERAddr) + } + + return client, &SimulatedBackendSetup{ + UserAuth: userAuth, + DeployerAuth: deployerAuth, + EBZkevmBridgeAddr: ebZkevmBridgeAddr, + EBZkevmBridgeContract: ebZkevmBridgeContract, + EBZkevmBridgeProxyAddr: ebZkevmBridgeProxyAddr, + EBZkevmBridgeProxyContract: ebZkevmBridgeProxyContract, + } +} diff --git a/test/run-e2e.sh b/test/run-e2e.sh index 6a29e416..08a6b2cd 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -1,13 +1,19 @@ #!/bin/bash source $(dirname $0)/scripts/env.sh -FORK=elderberry -DATA_AVAILABILITY_MODE=$1 + +FORK=$1 +if [ -z $FORK ]; then + echo "Missing FORK: ['fork9', 'fork12']" + exit 1 +fi + +DATA_AVAILABILITY_MODE=$2 if [ -z $DATA_AVAILABILITY_MODE ]; then echo "Missing DATA_AVAILABILITY_MODE: ['rollup', 'cdk-validium']" exit 1 fi -BASE_FOLDER=$(dirname $0) +BASE_FOLDER=$(dirname $0) docker images -q cdk:latest > /dev/null if [ $? -ne 0 ] ; then echo "Building cdk:latest" @@ -18,8 +24,7 @@ else echo "docker cdk:latest already exists" fi -$BASE_FOLDER/scripts/kurtosis_prepare_params_yml.sh "$KURTOSIS_FOLDER" $DATA_AVAILABILITY_MODE -[ $? -ne 0 ] && echo "Error preparing params.yml" && exit 1 - kurtosis clean --all -kurtosis run --enclave cdk-v1 --args-file $DEST_KURTOSIS_PARAMS_YML --image-download always $KURTOSIS_FOLDER +echo "Override cdk config file" +cp $BASE_FOLDER/config/kurtosis-cdk-node-config.toml.template $KURTOSIS_FOLDER/templates/trusted-node/cdk-node-config.toml +kurtosis run --enclave cdk --args-file "combinations/$FORK-$DATA_AVAILABILITY_MODE.yml" --image-download always $KURTOSIS_FOLDER diff --git a/test/scripts/batch_verification_monitor.sh b/test/scripts/batch_verification_monitor.sh index 9dc18e64..9c923888 100755 --- a/test/scripts/batch_verification_monitor.sh +++ b/test/scripts/batch_verification_monitor.sh @@ -17,7 +17,7 @@ timeout="$2" start_time=$(date +%s) end_time=$((start_time + timeout)) -rpc_url="$(kurtosis port print cdk-v1 cdk-erigon-node-001 http-rpc)" +rpc_url="$(kurtosis port print cdk cdk-erigon-node-001 rpc)" while true; do verified_batches="$(cast to-dec "$(cast rpc --rpc-url "$rpc_url" zkevm_verifiedBatchNumber | sed 's/"//g')")" diff --git a/test/scripts/env.sh b/test/scripts/env.sh index b81c18a4..063b7d61 100644 --- a/test/scripts/env.sh +++ b/test/scripts/env.sh @@ -1,8 +1,7 @@ #!/bin/bash ### Common variables -ENCLAVE=cdk-v1 -CDK_ERIGON_NODE_NAME=cdk-erigon-node-001 +KURTOSIS_ENCLAVE=cdk TMP_CDK_FOLDER=tmp/cdk DEST_KURTOSIS_PARAMS_YML=../$TMP_CDK_FOLDER/e2e-params.yml -KURTOSIS_VERSION=develop KURTOSIS_FOLDER=../kurtosis-cdk +USE_L1_GAS_TOKEN_CONTRACT=true diff --git a/test/scripts/kurtosis_prepare_params_yml.sh b/test/scripts/kurtosis_prepare_params_yml.sh deleted file mode 100755 index aa57e272..00000000 --- a/test/scripts/kurtosis_prepare_params_yml.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -source $(dirname $0)/env.sh - -if [ -z $DEST_KURTOSIS_PARAMS_YML ]; then - echo "DEST_KURTOSIS_PARAMS_YML is not set. Must be set on file env.sh" - exit 1 -fi - -# Check if the destination params file exists and don't do nothing -if [ -f $DEST_KURTOSIS_PARAMS_YML ]; then - echo "Destination params file already exists" - exit 0 -fi - -KURTOSIS_FOLDER=$1 -if [ -z $KURTOSIS_FOLDER ]; then - echo "Missing param Kurtosis Folder" - exit 1 -fi - -DATA_AVAILABILITY_MODE=$2 -if [ -z $DATA_AVAILABILITY_MODE ]; then - echo "Missing param Data Availability Mode : [rollup, cdk-validium]" - exit 1 -fi - -mkdir -p $(dirname $DEST_KURTOSIS_PARAMS_YML) -cp $KURTOSIS_FOLDER/params.yml $DEST_KURTOSIS_PARAMS_YML -yq -Y --in-place ".args.cdk_node_image = \"cdk\"" $DEST_KURTOSIS_PARAMS_YML -yq -Y --in-place ".args.data_availability_mode = \"$DATA_AVAILABILITY_MODE\"" $DEST_KURTOSIS_PARAMS_YML diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index 20d22ec1..5b14b962 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -82,7 +82,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { siblings := [types.DefaultHeight]common.Hash{} lastRoot, err := t.getLastRootWithTx(tx) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { t.lastIndex = -1 t.lastLeftCache = siblings return nil @@ -102,7 +102,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { ) } if currentNode == nil { - return ErrNotFound + return db.ErrNotFound } siblings[h] = currentNode.Left if index&(1< 0 { @@ -113,7 +113,7 @@ func (t *AppendOnlyTree) initCache(tx db.Txer) error { } // Reverse the siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { + for i, j := 0, len(siblings)-1; i == j; i, j = i+1, j-1 { siblings[i], siblings[j] = siblings[j], siblings[i] } diff --git a/tree/tree.go b/tree/tree.go index 2107ba68..0e3a0c69 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -14,8 +14,7 @@ import ( ) var ( - EmptyProof = types.Proof{} - ErrNotFound = errors.New("not found") + EmptyProof = types.Proof{} ) type Tree struct { @@ -50,7 +49,7 @@ func newTree(db *sql.DB, tablePrefix string) *Tree { } func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( - siblings [32]common.Hash, + siblings types.Proof, hasUsedZeroHashes bool, err error, ) { @@ -60,7 +59,7 @@ func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( var currentNode *types.TreeNode currentNode, err = t.getRHTNode(tx, currentNodeHash) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { hasUsedZeroHashes = true siblings[h] = t.zeroHashes[h] err = nil @@ -113,7 +112,7 @@ func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (ty return types.Proof{}, err } if isErrNotFound { - return types.Proof{}, ErrNotFound + return types.Proof{}, db.ErrNotFound } return siblings, nil } @@ -127,7 +126,7 @@ func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return node, ErrNotFound + return node, db.ErrNotFound } return node, err } @@ -173,8 +172,11 @@ func (t *Tree) storeRoot(tx db.Txer, root types.Root) error { } // GetLastRoot returns the last processed root -func (t *Tree) GetLastRoot(ctx context.Context) (types.Root, error) { - return t.getLastRootWithTx(t.db) +func (t *Tree) GetLastRoot(tx db.Querier) (types.Root, error) { + if tx == nil { + tx = t.db + } + return t.getLastRootWithTx(tx) } func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { @@ -185,7 +187,7 @@ func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { ) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return root, db.ErrNotFound } return root, err } @@ -201,7 +203,7 @@ func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, er index, ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return root, db.ErrNotFound } return root, err } @@ -209,25 +211,25 @@ func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, er } // GetRootByHash returns the root associated to the hash -func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (types.Root, error) { - var root types.Root +func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (*types.Root, error) { + var root *types.Root if err := meddler.QueryRow( - t.db, &root, + t.db, root, fmt.Sprintf(`SELECT * FROM %s WHERE hash = $1;`, t.rootTable), hash.Hex(), ); err != nil { if errors.Is(err, sql.ErrNoRows) { - return root, ErrNotFound + return nil, db.ErrNotFound } - return root, err + return nil, err } return root, nil } -func (t *Tree) GetLeaf(ctx context.Context, index uint32, root common.Hash) (common.Hash, error) { +func (t *Tree) GetLeaf(tx db.Querier, index uint32, root common.Hash) (common.Hash, error) { currentNodeHash := root for h := int(types.DefaultHeight - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(t.db, currentNodeHash) + currentNode, err := t.getRHTNode(tx, currentNodeHash) if err != nil { return common.Hash{}, err } diff --git a/tree/tree_test.go b/tree/tree_test.go index b5278723..c2748856 100644 --- a/tree/tree_test.go +++ b/tree/tree_test.go @@ -2,6 +2,7 @@ package tree_test import ( "context" + "database/sql" "encoding/json" "fmt" "os" @@ -18,6 +19,88 @@ import ( "github.com/stretchr/testify/require" ) +func TestCheckExpectedRoot(t *testing.T) { + createTreeDB := func() *sql.DB { + dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") + log.Debug("DB created at: ", dbPath) + require.NoError(t, migrations.RunMigrations(dbPath)) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + + return treeDB + } + + addLeaves := func(merkletree *tree.AppendOnlyTree, + treeDB *sql.DB, + numOfLeavesToAdd, from int) { + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + + for i := from; i < from+numOfLeavesToAdd; i++ { + require.NoError(t, merkletree.AddLeaf(tx, uint64(i), 0, types.Leaf{ + Index: uint32(i), + Hash: common.HexToHash(fmt.Sprintf("%x", i)), + })) + } + + require.NoError(t, tx.Commit()) + } + + t.Run("Check when no reorg", func(t *testing.T) { + numOfLeavesToAdd := 10 + indexToCheck := uint32(numOfLeavesToAdd - 1) + + treeDB := createTreeDB() + merkleTree := tree.NewAppendOnlyTree(treeDB, "") + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) + + expectedRoot, err := merkleTree.GetLastRoot(nil) + require.NoError(t, err) + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) + + root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) + require.NoError(t, err) + require.Equal(t, expectedRoot.Hash, root2.Hash) + require.Equal(t, expectedRoot.Index, root2.Index) + }) + + t.Run("Check after rebuild tree when reorg", func(t *testing.T) { + numOfLeavesToAdd := 10 + indexToCheck := uint32(numOfLeavesToAdd - 1) + treeDB := createTreeDB() + merkleTree := tree.NewAppendOnlyTree(treeDB, "") + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) + + expectedRoot, err := merkleTree.GetLastRoot(nil) + require.NoError(t, err) + + addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) + + // reorg tree + tx, err := db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + require.NoError(t, merkleTree.Reorg(tx, uint64(indexToCheck+1))) + require.NoError(t, tx.Commit()) + + // rebuild cache on adding new leaf + tx, err = db.NewTx(context.Background(), treeDB) + require.NoError(t, err) + require.NoError(t, merkleTree.AddLeaf(tx, uint64(indexToCheck+1), 0, types.Leaf{ + Index: indexToCheck + 1, + Hash: common.HexToHash(fmt.Sprintf("%x", indexToCheck+1)), + })) + require.NoError(t, tx.Commit()) + + root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) + require.NoError(t, err) + require.Equal(t, expectedRoot.Hash, root2.Hash) + require.Equal(t, expectedRoot.Index, root2.Index) + }) +} + func TestMTAddLeaf(t *testing.T) { data, err := os.ReadFile("testvectors/root-vectors.json") require.NoError(t, err) @@ -51,7 +134,7 @@ func TestMTAddLeaf(t *testing.T) { } require.NoError(t, tx.Commit()) if len(testVector.ExistingLeaves) > 0 { - root, err := merkletree.GetLastRoot(ctx) + root, err := merkletree.GetLastRoot(nil) require.NoError(t, err) require.Equal(t, common.HexToHash(testVector.CurrentRoot), root.Hash) } @@ -66,7 +149,7 @@ func TestMTAddLeaf(t *testing.T) { require.NoError(t, err) require.NoError(t, tx.Commit()) - root, err := merkletree.GetLastRoot(ctx) + root, err := merkletree.GetLastRoot(nil) require.NoError(t, err) require.Equal(t, common.HexToHash(testVector.NewRoot), root.Hash) }) @@ -102,7 +185,7 @@ func TestMTGetProof(t *testing.T) { } require.NoError(t, tx.Commit()) - root, err := tre.GetLastRoot(ctx) + root, err := tre.GetLastRoot(nil) require.NoError(t, err) expectedRoot := common.HexToHash(testVector.ExpectedRoot) require.Equal(t, expectedRoot, root.Hash) @@ -115,3 +198,13 @@ func TestMTGetProof(t *testing.T) { }) } } + +func createTreeDBForTest(t *testing.T) *sql.DB { + t.Helper() + dbPath := "file::memory:?cache=shared" + err := migrations.RunMigrations(dbPath) + require.NoError(t, err) + treeDB, err := db.NewSQLiteDB(dbPath) + require.NoError(t, err) + return treeDB +} diff --git a/tree/updatabletree.go b/tree/updatabletree.go index 3ed8b881..be861b55 100644 --- a/tree/updatabletree.go +++ b/tree/updatabletree.go @@ -23,21 +23,21 @@ func NewUpdatableTree(db *sql.DB, dbPrefix string) *UpdatableTree { return ut } -func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { +func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) (common.Hash, error) { var rootHash common.Hash root, err := t.getLastRootWithTx(tx) if err != nil { - if errors.Is(err, ErrNotFound) { + if errors.Is(err, db.ErrNotFound) { rootHash = t.zeroHashes[types.DefaultHeight] } else { - return err + return common.Hash{}, err } } else { rootHash = root.Hash } siblings, _, err := t.getSiblings(tx, leaf.Index, rootHash) if err != nil { - return err + return common.Hash{}, err } currentChildHash := leaf.Hash newNodes := []types.TreeNode{} @@ -59,10 +59,10 @@ func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, l BlockNum: blockNum, BlockPosition: blockPosition, }); err != nil { - return err + return common.Hash{}, err } if err := t.storeNodes(tx, newNodes); err != nil { - return err + return common.Hash{}, err } - return nil + return currentChildHash, nil } diff --git a/tree/updatabletree_test.go b/tree/updatabletree_test.go new file mode 100644 index 00000000..a684fd0e --- /dev/null +++ b/tree/updatabletree_test.go @@ -0,0 +1,49 @@ +package tree_test + +import ( + "context" + "testing" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/tree" + "github.com/0xPolygon/cdk/tree/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestUpdatableTreeExploratory(t *testing.T) { + treeDB := createTreeDBForTest(t) + sut := tree.NewUpdatableTree(treeDB, "") + blockNum := uint64(1) + blockPosition := uint64(1) + leaf1 := types.Leaf{ + Index: 10, + Hash: common.HexToHash("0x123456"), + } + leaf2 := types.Leaf{ + Index: 1, + Hash: common.HexToHash("0x123478"), + } + ctx := context.TODO() + + tx, err := db.NewTx(ctx, treeDB) + require.NoError(t, err) + _, err = sut.UpsertLeaf(tx, blockNum, blockPosition, leaf1) + require.NoError(t, err) + + root2, err := sut.UpsertLeaf(tx, blockNum, blockPosition, leaf2) + require.NoError(t, err) + leaf1get, err := sut.GetLeaf(tx, leaf1.Index, root2) + require.NoError(t, err) + require.Equal(t, leaf1.Hash, leaf1get) + // If a leaf dont exist return 'not found' error + _, err = sut.GetLeaf(tx, 99, root2) + require.ErrorIs(t, err, db.ErrNotFound) + leaf99 := types.Leaf{ + Index: 99, + Hash: common.Hash{}, // 0x00000 + } + + _, err = sut.UpsertLeaf(tx, blockNum, blockPosition, leaf99) + require.Error(t, err, "insert 0x000 doesnt change root and return UNIQUE constraint failed: root.hash") +}