diff --git a/.env b/.env deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 811c773b6f5..9bae11c7b26 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.24.0", + "core": "24.25.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/.github/scripts/rate_limit_check.sh b/.github/scripts/rate_limit_check.sh new file mode 100755 index 00000000000..6594c685d84 --- /dev/null +++ b/.github/scripts/rate_limit_check.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -o errexit +set -o pipefail + + +api_endpoint="https://api.github.com/users/zksync-era-bot" +wait_time=60 +max_retries=60 +retry_count=0 + +while [[ $retry_count -lt $max_retries ]]; do + response=$(run_retried curl -s -w "%{http_code}" -o temp.json "$api_endpoint") + http_code=$(echo "$response" | tail -n1) + + if [[ "$http_code" == "200" ]]; then + echo "Request successful. Not rate-limited." + cat temp.json + rm temp.json + exit 0 + elif [[ "$http_code" == "403" ]]; then + rate_limit_exceeded=$(jq -r '.message' temp.json | grep -i "API rate limit exceeded") + if [[ -n "$rate_limit_exceeded" ]]; then + retry_count=$((retry_count+1)) + echo "API rate limit exceeded. Retry $retry_count of $max_retries. Retrying in $wait_time seconds..." + sleep $wait_time + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi +done + +echo "Reached the maximum number of retries ($max_retries). Exiting." +rm temp.json +exit 1 diff --git a/.github/workflows/build-base.yml b/.github/workflows/build-base.yml new file mode 100644 index 00000000000..83be44c126f --- /dev/null +++ b/.github/workflows/build-base.yml @@ -0,0 +1,159 @@ +name: Build zksync-build-base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.arch, 'arm')] }} + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ build-base ] + repository: [ zksync-build-base ] + arch: [ amd64, arm64 ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + file: docker/build-base/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + matterlabs/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + ghcr.io/${{ github.repository_owner }}/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + + multiarch_manifest: + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + needs: [ build-images ] + env: + IMAGE_TAG_SUFFIX: ${{ needs.build-images.outputs.image_tag_sha }} + runs-on: [ matterlabs-ci-runner-high-performance ] + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Create and push multi-arch manifests for Dockerhub + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="matterlabs/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull matterlabs/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("matterlabs/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for GitHub Container Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="ghcr.io/${{ github.repository_owner }}/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for Google Artifact Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index db7c4ba387f..bb385b2797b 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -113,18 +113,15 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk || true - ci_run yarn zk build + ci_run ./bin/zkt || true + ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | - ci_run zk run yarn ci_run cp etc/tokens/{test,localhost}.json - ci_run zk compiler all - ci_run zk contract build - ci_run zk f yarn run l2-contracts build + ci_run zk_supervisor contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 7e5dcc10a93..dc46c4ba95e 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -71,11 +71,15 @@ jobs: if [ $(jq length <<<"$tags") -eq 0 ]; then echo "No tag found on all pages." echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/artifacts/ exit 0 fi filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") if [[ ! -z "$filtered_tag" ]]; then echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/out break fi ((page++)) @@ -122,18 +126,15 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk || true - ci_run yarn zk build + ci_run ./bin/zk || true + ci_run ./bin/zkt || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | - ci_run zk run yarn ci_run cp etc/tokens/{test,localhost}.json - ci_run zk compiler all - ci_run zk contract build - ci_run zk f yarn run l2-contracts build + ci_run zk_supervisor contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 791f4411747..29d26a713d8 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -102,7 +102,7 @@ jobs: with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 7f36f28f286..f664bfaaa00 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -16,7 +16,7 @@ on: jobs: build-images: name: Local Node - Build and Push Docker Image - runs-on: [matterlabs-ci-runner-high-performance] + runs-on: [ matterlabs-ci-runner-high-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: @@ -61,9 +61,9 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk + ci_run zkt ci_run cp etc/tokens/{test,localhost}.json - ci_run zk compiler all - ci_run zk contract build + ci_run zk_supervisor contracts - name: update-image run: | diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml new file mode 100644 index 00000000000..eaec05bc6bc --- /dev/null +++ b/.github/workflows/build-runtime-base.yml @@ -0,0 +1,66 @@ +name: Build zksync-runtime-base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: matterlabs-ci-runner-high-performance + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ runtime-base ] + image_name: [ zksync-runtime-base ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Set up QEMU + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + platforms: arm64, amd64 + file: docker/${{ matrix.name }}/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.image_name }}:latest + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index d4667a273ef..3d28df592e9 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -18,21 +18,24 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + echo "RUSTFLAGS=--cfg=no_cuda" >> .env - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | - ci_run zk - ci_run run_retried rustup show - ci_run zk db setup + ci_run zkt # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints - run: ci_run zk lint prover + run: ci_run zk_supervisor lint -t rs --check + diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index e46a67dd8af..85e4be3ff5e 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -15,13 +15,17 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + echo "RUSTFLAGS=--cfg=no_cuda" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Build run: | diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index c6d56b4b115..a960ac3934a 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -9,9 +9,10 @@ on: default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' jobs: - # lint: - # name: lint - # uses: ./.github/workflows/ci-core-lint-reusable.yml + lint: + name: lint + uses: ./.github/workflows/ci-core-lint-reusable.yml + unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] @@ -26,6 +27,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env # TODO: Remove when we after upgrade of hardhat-plugins - name: pre-download compilers @@ -48,7 +53,6 @@ jobs: - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Init run: | @@ -82,11 +86,15 @@ jobs: # submodules: "recursive" # fetch-depth: 0 - # - name: Setup environment - # run: | - # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - # echo $(pwd)/bin >> $GITHUB_PATH - # echo IN_DOCKER=1 >> .env + # - name: Setup environment + # run: | + # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + # echo $(pwd)/bin >> $GITHUB_PATH + # echo IN_DOCKER=1 >> .env + # echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + # echo "RUSTC_WRAPPER=sccache" >> .env # - name: Loadtest configuration # run: | @@ -109,31 +117,29 @@ jobs: # ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts # ci_run git config --global --add safe.directory /usr/src/zksync/contracts - # ci_run ./bin/zkt - # ci_run zk_inception chain create \ - # --chain-name legacy \ - # --chain-id sequential \ - # --prover-mode no-proofs \ - # --wallet-creation localhost \ - # --l1-batch-commit-data-generator-mode rollup \ - # --base-token-address 0x0000000000000000000000000000000000000001 \ - # --base-token-price-nominator 1 \ - # --base-token-price-denominator 1 \ - # --set-as-default false \ - # --ignore-prerequisites \ - # --legacy-bridge - - # ci_run zk_inception ecosystem init --dev --verbose - # ci_run zk_supervisor contracts --test-contracts - - # # `sleep 60` because we need to wait until server added all the tokens - # - name: Run server - # run: | - # ci_run sed -i -e 's/mode: FULL/mode: LIGHTWEIGHT/' chains/legacy/configs/general.yaml - # ci_run sed -i -e 's/state_keeper_fast_vm_mode:.*/state_keeper_fast_vm_mode: ${{ matrix.vm_mode }}/' chains/legacy/configs/general.yaml - # ci_run sed -i -e 's/delay_interval:.*/delay_interval: 50/' chains/legacy/configs/general.yaml - # ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & - # ci_run sleep 60 + # ci_run ./bin/zkt + # ci_run zk_inception chain create \ + # --chain-name legacy \ + # --chain-id sequential \ + # --prover-mode no-proofs \ + # --wallet-creation localhost \ + # --l1-batch-commit-data-generator-mode rollup \ + # --base-token-address 0x0000000000000000000000000000000000000001 \ + # --base-token-price-nominator 1 \ + # --base-token-price-denominator 1 \ + # --set-as-default false \ + # --ignore-prerequisites \ + # --legacy-bridge + + # ci_run zk_inception ecosystem init --dev --verbose + # ci_run zk_supervisor contracts --test-contracts + + # # `sleep 60` because we need to wait until server added all the tokens + # - name: Run server + # run: | + # ci_run zk_supervisor config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy + # ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + # ci_run sleep 60 # - name: Perform loadtest # run: ci_run zk_supervisor t loadtest -v --chain=legacy @@ -148,332 +154,196 @@ jobs: # ci_run sccache --show-stats || true # ci_run cat /tmp/sccache_log.txt || true - integration: - name: Integration (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, gateway = ${{ matrix.gateway }}) - strategy: - # In matrix jobs, fail-fast is true by default. - # To be consistent with the rest of the workflow we disable it explicitly. - fail-fast: false - matrix: - # FIXME: support consensus tests for SL migration - consensus: [false] - # FIXME: support custom tokens - base_token: ["Eth"] - # FIXME: run tests even when not using synclayer - gateway: [true] - env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" - - runs-on: [ matterlabs-ci-runner-highmem-long ] + integration-tests: + runs-on: [ matterlabs-ci-runner-ultra-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: "recursive" fetch-depth: 0 + - name: Setup environment run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env - echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env - echo ZKSYNC_DEBUG_LOGS=true >> .env - - - name: Download zksolc/solc and zkvyper/vyper - run: | - sudo apt update && sudo apt install wget -y - - mkdir -p $(pwd)/etc/solc-bin/0.8.23 - wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.23%2Bcommit.f704f362 - mv solc-linux-amd64-v0.8.23+commit.f704f362 $(pwd)/etc/solc-bin/0.8.23/solc - chmod +x $(pwd)/etc/solc-bin/0.8.23/solc - - mkdir -p $(pwd)/etc/solc-bin/zkVM-0.8.23-1.0.0 - wget https://github.com/matter-labs/era-solidity/releases/download/0.8.23-1.0.0/solc-linux-amd64-0.8.23-1.0.0 -O $(pwd)/etc/solc-bin/zkVM-0.8.23-1.0.0/solc - chmod +x $(pwd)/etc/solc-bin/zkVM-0.8.23-1.0.0/solc - - mkdir -p $(pwd)/etc/zksolc-bin/v1.3.21 - wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.21 - mv zksolc-linux-amd64-musl-v1.3.21 $(pwd)/etc/zksolc-bin/v1.3.21/zksolc - chmod +x $(pwd)/etc/zksolc-bin/v1.3.21/zksolc - - mkdir -p $(pwd)/etc/vyper-bin/0.3.10 - wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux - mv vyper0.3.10 $(pwd)/etc/vyper-bin/0.3.10/vyper - chmod +x $(pwd)/etc/vyper-bin/0.3.10/vyper - - mkdir -p $(pwd)/etc/zkvyper-bin/v1.3.13 - wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-v1.3.13 - mv zkvyper-linux-amd64-musl-v1.3.13 $(pwd)/etc/zkvyper-bin/v1.3.13/zkvyper - chmod +x $(pwd)/etc/zkvyper-bin/v1.3.13/zkvyper + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run pre_download_compilers.sh - ci_run sccache --start-server - - name: Init + - name: Build zk_toolbox + run: ci_run bash -c "./bin/zkt" + + - name: Create log directories + run: | + SERVER_LOGS_DIR=logs/server + INTEGRATION_TESTS_LOGS_DIR=logs/integration_tests + INTEGRATION_TESTS_EN_LOGS_DIR=logs/integration_tests/en + SNAPSHOT_RECOVERY_LOGS_DIR=logs/snapshot_recovery/ + GENESIS_RECOVERY_LOGS_DIR=logs/genesis_recovery/ + EXTERNAL_NODE_LOGS_DIR=logs/external_node + REVERT_LOGS_DIR=logs/revert + + mkdir -p $SERVER_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR + mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR + mkdir -p $GENESIS_RECOVERY_LOGS_DIR + mkdir -p $EXTERNAL_NODE_LOGS_DIR + mkdir -p $REVERT_LOGS_DIR + + echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV + echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV + echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV +# FIXME: restore tests for all the various types of chains + - name: Initialize ecosystem run: | ci_run git config --global --add safe.directory /usr/src/zksync - ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk - ci_run run_retried rustup show - if [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ci_run zk env dev_validium_docker - ci_run zk config compile dev_validium_docker - else - ci_run zk config compile - fi - ci_run zk init ${{ matrix.base_token == 'Custom' && '--base-token-name BAT' || ''}} ${{ matrix.deployment_mode == 'Validium' && '--validium-mode' || ''}} - - # `sleep 5` because we need to wait until server started properly - - name: Run server - run: | - ci_run zk server --components=$SERVER_COMPONENTS &>server.log & - ci_run sleep 5 - - name: Run contract verifier + ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ + --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_era \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_era \ + --ignore-prerequisites --verbose \ + --observability=false + + - name: Read Custom Token address and set as environment variable run: | - ci_run zk contract_verifier &>contract_verifier.log & - ci_run sleep 2 + address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) + echo "address=$address" + echo "address=$address" >> $GITHUB_ENV - # FIXME: do this only when the sync layer is true - - name: Prepare the server to be the synclayer + - name: Create and initialize Validium chain run: | - ci_run zk dev2 supply-rich-wallets - ci_run zk contract prepare-sync-layer - ci_run zk contract register-sync-layer-counterpart - - # Note that the server will live only for 120s. Since we'll need to restart it - # before migrating it to the sync layer, we'll need to turn it off and update the config - # - # FIXME: The `zk server --clear-l1-txs-history` is needed to clear the history of transactions on the server side. - # It is a workaround and shouild be removed before prod. - - name: Prepare launch sync-layer based chain + ci_run zk_inception chain create \ + --chain-name validium \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_validium \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_validium \ + --port-offset 2000 \ + --chain validium + + - name: Create and initialize chain with Custom Token run: | - ci_run zk config prepare-l1-hyperchain --env-name test-chain --chain-id 320 - ci_run zk env test-chain - ci_run zk config compile test-chain --diff 5 - ci_run zk init hyper --skip-contract-compilation-override - ci_run zk contract migrate-to-sync-layer - ci_run zk contract prepare-sync-layer-validators - ci_run zk contract update-config-for-sync-layer - ci_run sleep 120 - ci_run zk server >> server2.log 2>&1 & - ci_run sleep 5 - - - name: Server integration tests - run: ci_run zk test i server - - # FIXME: restore snapshot test - # - name: Snapshot recovery test - # # We use `yarn` directly because the test launches `zk` commands in both server and EN envs. - # # An empty topmost environment helps avoid a mess when redefining env vars shared between both envs - # # (e.g., DATABASE_URL). - # # - # # Since `base_token` doesn't meaningfully influence the test, we use it as a flag for - # # enabling / disabling tree during pruning. - # run: | - # if [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - # ci_run zk config compile ext-node-validium - # ci_run zk config compile ext-node-validium-docker - # fi - # ENABLE_CONSENSUS=${{ matrix.consensus }} \ - # DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - # SNAPSHOTS_CREATOR_VERSION=${{ matrix.deployment_mode == 'Validium' && '0' || '1' }} \ - # DISABLE_TREE_DURING_PRUNING=${{ matrix.base_token == 'Eth' }} \ - # ETH_CLIENT_WEB3_URL="http://localhost:8545" \ - # PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,DISABLE_TREE_DURING_PRUNING,SNAPSHOTS_CREATOR_VERSION,ETH_CLIENT_WEB3_URL" \ - # ci_run yarn recovery-test snapshot-recovery-test - - # - name: Genesis recovery test - # run: | - # ENABLE_CONSENSUS=${{ matrix.consensus }} \ - # DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - # ETH_CLIENT_WEB3_URL="http://localhost:8545" \ - # PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,ETH_CLIENT_WEB3_URL" \ - # ci_run yarn recovery-test genesis-recovery-test - - # FIXME: restore fee proj tests - # - name: Fee projection tests - # run: ci_run zk test i fees - - # FIXME: restore revert tests - # - name: Run revert test - # run: | - # ci_run pkill zksync_server || true - # ci_run sleep 2 - # ENABLE_CONSENSUS=${{ matrix.consensus }} DEPLOYMENT_MODE=Rollup PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" ci_run zk test i revert - - # FIXME: restore upgrade test for SL - # # This test should be the last one as soon as it - # # finished bootloader will be different - # - name: Run upgrade test - # run: | - # ci_run pkill zksync_server || true - # ci_run sleep 10 - # ci_run zk test i upgrade - - - name: Show server.log logs - if: always() - run: ci_run cat server.log || true - - - name: Show server2.log logs - if: always() - run: ci_run cat server2.log || true - - - name: Show contract_verifier.log logs - if: always() - run: ci_run cat contract_verifier.log || true - - - name: Show snapshot-creator.log logs - if: always() - run: ci_run cat core/tests/recovery-test/snapshot-creator.log || true - - name: Show snapshot-recovery.log logs - if: always() - run: ci_run cat core/tests/recovery-test/snapshot-recovery.log || true - - name: Show genesis-recovery.log logs - if: always() - run: ci_run cat core/tests/recovery-test/genesis-recovery.log || true - - - name: Show revert.log logs - if: always() - run: ci_run cat logs/revert/default/server.log || true - - - name: Show upgrade.log logs - if: always() - run: ci_run cat core/tests/upgrade-test/upgrade.log || true - - - name: Show fee-projection.log logs - if: always() - run: ci_run cat core/tests/ts-integration/fees.log || true - - - name: Show sccache logs - if: always() + ci_run zk_inception chain create \ + --chain-name custom_token \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address ${{ env.address }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_custom_token \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_custom_token \ + --port-offset 3000 \ + --chain custom_token + + - name: Initialize gateway chain run: | - ci_run sccache --show-stats || true - ci_run cat /tmp/sccache_log.txt || true - - # FIXME: restore tests with EN and sync layer - # external-node: - # name: External node (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }}) - # strategy: - # fail-fast: false - # matrix: - # consensus: [ false, true ] - # base_token: [ "Eth", "Custom" ] - # deployment_mode: [ "Rollup", "Validium" ] - # runs-on: [ matterlabs-ci-runner-highmem-long ] - - # env: - # SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" - # EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" - - # steps: - # - name: Checkout code # Checks out the repository under $GITHUB_WORKSPACE, so the job can access it. - # uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - # with: - # submodules: "recursive" - # fetch-depth: 0 - - # - name: Setup environment - # run: | - # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - # echo $(pwd)/bin >> $GITHUB_PATH - # echo IN_DOCKER=1 >> .env - # echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env - # echo ZKSYNC_DEBUG_LOGS=true >> .env - - # - name: Start services - # run: | - # ci_localnet_up - # ci_run pre_download_compilers.sh - # ci_run sccache --start-server - - # - name: Init - # run: | - # ci_run git config --global --add safe.directory /usr/src/zksync - # ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts - # ci_run zk - # ci_run run_retried rustup show - # if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then - # ci_run zk config compile - # elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - # ci_run zk env dev_validium_docker - # ci_run zk config compile dev_validium_docker - # fi - # ci_run zk init ${{ matrix.base_token == 'Custom' && '--base-token-name BAT' || ''}} ${{ matrix.deployment_mode == 'Validium' && '--validium-mode' || ''}} - - # # `sleep 30` because we need to wait until server started properly - # - name: Run server - # run: | - # ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & - # ci_run sleep 30 - - # - name: Run external node - # run: | - # ci_run zk env ext-node-docker - # ci_run zk db setup - # ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & - # ci_run sleep 30 + ci_run zk_inception chain create \ + --chain-name gateway \ + --chain-id 505 \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_gateway \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_gateway \ + --port-offset 4000 \ + --chain gateway \ + + ci_run zk_inception chain convert-to-gateway --chain gateway --ignore-prerequisites + + - name: Run gateway + run: | + ci_run zk_inception server --ignore-prerequisites --chain gateway &> ${{ env.SERVER_LOGS_DIR }}/gateway.log & - # - name: Integration tests - # run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|snapshots-creator' + ci_run sleep 5 - # - name: Run revert test - # run: | - # ENABLE_CONSENSUS=${{ matrix.consensus }} \ - # DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - # PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" \ - # ci_run zk test i revert-en - - # # test terminates the nodes, so we restart them. - # if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then - # ZKSYNC_ENV=docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & - # ZKSYNC_ENV=ext-node-docker ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & - # elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - # ZKSYNC_ENV=dev_validium_docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & - # ZKSYNC_ENV=ext-node-validium-docker ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & - # fi - # ci_run sleep 30 - - # - name: Run upgrade test - # run: | - # ci_run zk env docker - # CHECK_EN_URL="http://0.0.0.0:3060" ci_run zk test i upgrade + - name: Migrate chains to gateway + run: | + ci_run zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway + ci_run zk_inception chain migrate-to-gateway --chain validium --gateway-chain-name gateway + ci_run zk_inception chain migrate-to-gateway --chain custom_token --gateway-chain-name gateway - # - name: Show server.log logs - # if: always() - # run: ci_run cat server.log || true + - name: Build test dependencies + run: | + ci_run zk_supervisor test build - # - name: Show ext-node.log logs - # if: always() - # run: ci_run cat ext-node.log || true + - name: Run chains + run: | + ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & - # - name: Show contract_verifier.log logs - # if: always() - # run: ci_run cat ext-node.log || true + ci_run sleep 5 - # - name: Show revert logs (main node) - # if: always() - # run: ci_run cat logs/revert/en/default/server.log || true + - name: Run integration tests + run: | + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & + PID1=$! - # - name: Show revert logs (EN) - # if: always() - # run: ci_run cat logs/revert/en/default/external_node.log || true + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & + PID2=$! - # - name: Show upgrade.log logs - # if: always() - # run: ci_run cat core/tests/upgrade-test/upgrade.log || true + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & + PID3=$! - # - name: Show sccache logs - # if: always() - # run: | - # ci_run sccache --show-stats || true - # ci_run cat /tmp/sccache_log.txt || true + wait $PID1 + wait $PID2 + wait $PID3 + - name: Upload logs + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + if: always() + with: + name: logs + path: logs diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 2b8eea15a82..5b1d5a9bcdf 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -17,12 +17,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker compose pull zk docker compose up -d zk - + - name: Build run: | ci_run ./bin/zkt diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index d1d4a9ab96b..6fa987b1cec 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -3,7 +3,7 @@ on: workflow_call: jobs: lint: - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" @@ -17,24 +17,29 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV + echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | - ci_run zk - ci_run zk db setup + ci_run zkt + ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting run: ci_run bash -c "cd prover && cargo fmt --check" unit-tests: - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" @@ -48,21 +53,24 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + echo "RUSTFLAGS=--cfg=no_cuda" >> .env - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | - ci_run zk + ci_run zkt ci_run run_retried rustup show - ci_run zk db setup - name: Prover unit tests run: | # Not all tests are enabled, since prover and setup_key_generator_and_server requires bellman-cuda to be present - ci_run zk test prover + ci_run zk_supervisor test prover diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml deleted file mode 100644 index 2f69b2dfa12..00000000000 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ /dev/null @@ -1,314 +0,0 @@ -name: Workflow template for CI jobs for Core Components -on: - workflow_call: - -env: - CLICOLOR: 1 - # We run multiple binaries in parallel, and by default they will try to utilize all the - # available CPUs. In tests, there is not much CPU-intensive work (rayon), but a lot of - # async work (tokio), so we prioritize tokio. - TOKIO_WORKER_THREADS: 4 - RAYON_NUM_THREADS: 2 - -jobs: - lint: - name: lint - uses: ./.github/workflows/ci-core-lint-reusable.yml - - tests: - runs-on: [ matterlabs-ci-runner-ultra-performance ] - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: "recursive" - fetch-depth: 0 - - - - name: Setup environment - run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo IN_DOCKER=1 >> .env - - - name: Start services - run: | - ci_localnet_up - ci_run sccache --start-server - - # - name: Build zk_toolbox - # run: ci_run bash -c "./bin/zkt" - - # - name: Create log directories - # run: | - # SERVER_LOGS_DIR=logs/server - # INTEGRATION_TESTS_LOGS_DIR=logs/integration_tests - # INTEGRATION_TESTS_EN_LOGS_DIR=logs/integration_tests/en - # SNAPSHOT_RECOVERY_LOGS_DIR=logs/integration_tests/en - # GENESIS_RECOVERY_LOGS_DIR=logs/integration_tests/en - # EXTERNAL_NODE_LOGS_DIR=logs/external_node - # REVERT_LOGS_DIR=logs/revert - - # mkdir -p $SERVER_LOGS_DIR - # mkdir -p $INTEGRATION_TESTS_LOGS_DIR - # mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR - # mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR - # mkdir -p $GENESIS_RECOVERY_LOGS_DIR - # mkdir -p $EXTERNAL_NODE_LOGS_DIR - # mkdir -p $REVERT_LOGS_DIR - - # echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV - # echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV - # echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV - # echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV - # echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV - # echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV - # echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV - - # - name: Initialize ecosystem - # run: | - # ci_run git config --global --add safe.directory /usr/src/zksync - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts - - # ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - # --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ - # --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --server-db-name=zksync_server_localhost_era \ - # --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --prover-db-name=zksync_prover_localhost_era \ - # --ignore-prerequisites --verbose \ - # --observability=false - - # - name: Read Custom Token address and set as environment variable - # run: | - # address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) - # echo "address=$address" - # echo "address=$address" >> $GITHUB_ENV - - # - name: Create and initialize Validium chain - # run: | - # ci_run zk_inception chain create \ - # --chain-name validium \ - # --chain-id sequential \ - # --prover-mode no-proofs \ - # --wallet-creation localhost \ - # --l1-batch-commit-data-generator-mode validium \ - # --base-token-address 0x0000000000000000000000000000000000000001 \ - # --base-token-price-nominator 1 \ - # --base-token-price-denominator 1 \ - # --set-as-default false \ - # --ignore-prerequisites - - # ci_run zk_inception chain init \ - # --deploy-paymaster \ - # --l1-rpc-url=http://localhost:8545 \ - # --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --server-db-name=zksync_server_localhost_validium \ - # --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --prover-db-name=zksync_prover_localhost_validium \ - # --port-offset 2000 \ - # --chain validium - - # - name: Create and initialize chain with Custom Token - # run: | - # ci_run zk_inception chain create \ - # --chain-name custom_token \ - # --chain-id sequential \ - # --prover-mode no-proofs \ - # --wallet-creation localhost \ - # --l1-batch-commit-data-generator-mode rollup \ - # --base-token-address ${{ env.address }} \ - # --base-token-price-nominator 3 \ - # --base-token-price-denominator 2 \ - # --set-as-default false \ - # --ignore-prerequisites - - # ci_run zk_inception chain init \ - # --deploy-paymaster \ - # --l1-rpc-url=http://localhost:8545 \ - # --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --server-db-name=zksync_server_localhost_custom_token \ - # --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --prover-db-name=zksync_prover_localhost_custom_token \ - # --port-offset 3000 \ - # --chain custom_token - - # - name: Create and initialize Consensus chain - # run: | - # ci_run zk_inception chain create \ - # --chain-name consensus \ - # --chain-id sequential \ - # --prover-mode no-proofs \ - # --wallet-creation localhost \ - # --l1-batch-commit-data-generator-mode validium \ - # --base-token-address ${{ env.address }} \ - # --base-token-price-nominator 3 \ - # --base-token-price-denominator 2 \ - # --set-as-default false \ - # --ignore-prerequisites - - # ci_run zk_inception chain init \ - # --deploy-paymaster \ - # --l1-rpc-url=http://localhost:8545 \ - # --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --server-db-name=zksync_server_localhost_consensus \ - # --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --prover-db-name=zksync_prover_localhost_consensus \ - # --port-offset 4000 \ - # --chain consensus - - # - name: Build test dependencies - # run: | - # ci_run zk_supervisor test build - - # - name: Run servers - # run: | - # ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & - # ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & - # ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & - # ci_run zk_inception server --ignore-prerequisites --chain consensus \ - # --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ - # &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & - - # ci_run sleep 5 - - # - name: Run integration tests - # run: | - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - # PID1=$! - - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - # PID2=$! - - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - # PID3=$! - - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & - # PID4=$! - - # wait $PID1 - # wait $PID2 - # wait $PID3 - # wait $PID4 - - # - name: Init external nodes - # run: | - # ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era - # ci_run zk_inception external-node init --ignore-prerequisites --chain era - - # ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium - # ci_run zk_inception external-node init --ignore-prerequisites --chain validium - - # ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token - # ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token - - # ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - # --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus - # ci_run zk_inception external-node init --ignore-prerequisites --chain consensus - - # - name: Run recovery tests (from snapshot) - # run: | - - # # ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & - # # PID1=$! - - # # ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//validium.log & - # # PID2=$! - - # # ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//custom_token.log & - # # PID3=$! - - # ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//consensus.log & - # PID4=$! - - # wait $PID1 - # wait $PID2 - # wait $PID3 - # wait $PID4 - - # - name: Run recovery tests (from genesis) - # run: | - # ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & - # PID1=$! - - # ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & - # PID2=$! - - # ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & - # PID3=$! - - # ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & - # PID4=$! - - # wait $PID1 - # wait $PID2 - # wait $PID3 - # wait $PID4 - - # - name: Run external node server - # run: | - # ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & - # ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & - # ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & - # ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & - # ci_run sleep 5 - - # - name: Run integration tests en - # run: | - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & - # PID1=$! - - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & - # PID2=$! - - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & - # PID3=$! - - # ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & - # PID4=$! - - # wait $PID1 - # wait $PID2 - # wait $PID3 - # wait $PID4 - - # - name: Run revert tests - # run: | - # ci_run killall -INT zksync_server || true - # ci_run killall -INT zksync_external_node || true - - # ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & - # PID1=$! - - # ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & - # PID2=$! - - # ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & - # PID3=$! - - # ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & - # PID4=$! - - # wait $PID1 - # wait $PID2 - # wait $PID3 - # wait $PID4 - - - # # Upgrade tests should run last, because as soon as they - # # finish the bootloader will be different - # # TODO make upgrade tests safe to run multiple times - # - name: Run upgrade test - # run: | - # ci_run zk_supervisor test upgrade --no-deps --chain era - - - # - name: Upload logs - # uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 - # if: always() - # with: - # name: logs - # path: logs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dafefdd71c1..9c8817cf5cc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,14 +59,10 @@ jobs: - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' - 'Cargo.lock' - - '!**/*.md' - - '!**/*.MD' - - 'docker-compose.yml' - zk_toolbox: - - '.github/workflows/ci-zk-toolbox-reusable.yml' - 'zk_toolbox/**' - '!**/*.md' - '!**/*.MD' + - 'docker-compose.yml' docs: - '**/*.md' - '**/*.MD' @@ -90,7 +86,7 @@ jobs: ci-for-core: name: CI for Core Components needs: changed_files - if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} uses: ./.github/workflows/ci-core-reusable.yml ci-for-prover: @@ -99,12 +95,6 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml - ci-for-zk-toolbox: - needs: changed_files - if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - name: CI for zk_toolbox - uses: ./.github/workflows/ci-zk-toolbox-reusable.yml - ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' @@ -122,7 +112,7 @@ jobs: # name: Build core images # needs: changed_files # if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - # uses: ./.github/workflows/build-core-template.yml + # uses: ./.github/workflows/new-build-core-template.yml # with: # image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} # action: "build" @@ -147,7 +137,7 @@ jobs: name: Build contract verifier needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-contract-verifier-template.yml + uses: ./.github/workflows/new-build-contract-verifier-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -159,7 +149,7 @@ jobs: name: Build prover images needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-prover-template.yml + uses: ./.github/workflows/new-build-prover-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -173,12 +163,10 @@ jobs: name: Build prover images with avx512 instructions needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-witness-generator-template.yml + uses: ./.github/workflows/new-build-witness-generator-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 action: "build" - ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - is_pr_from_fork: ${{ github.event.pull_request.head.repo.fork == true }} WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} @@ -188,8 +176,8 @@ jobs: name: Github Status Check runs-on: ubuntu-latest if: always() && !cancelled() - #TODO return build-core-images - needs: [ci-for-core-lint, ci-for-common, ci-for-core, ci-for-prover, ci-for-docs, build-contract-verifier, build-prover-images] + # TODO restore build-core-images + needs: [ ci-for-core-lint, ci-for-common, ci-for-core, ci-for-prover, ci-for-docs, build-contract-verifier, build-prover-images ] steps: - name: Status run: | diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml new file mode 100644 index 00000000000..42791eab666 --- /dev/null +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -0,0 +1,271 @@ +name: Build contract verifier +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + action: + type: string + default: non-push + required: false + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts-verifier + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - contract-verifier + - verified-sources-fetcher + platforms: + - linux/amd64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts-verifier + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: contract-verifier + platform: linux/amd64 + - name: verified-sources-fetcher + platform: linux/amd64 + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml new file mode 100644 index 00000000000..fba6a68b8ee --- /dev/null +++ b/.github/workflows/new-build-core-template.yml @@ -0,0 +1,287 @@ +name: Build Core images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + en_alpha_release: + description: 'Flag that determins if EN release should be marked as alpha' + type: boolean + required: false + default: false + action: + type: string + required: false + default: "do nothing" + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.components == 'external-node') && '-alpha' || '' }} + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - server-v2 + - external-node + - snapshots-creator + platforms: + - linux/amd64 + include: + - components: external-node + platforms: linux/arm64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: server-v2 + platform: linux/amd64 + - name: external-node + platform: linux/amd64,linux/arm64 + - name: snapshots-creator + platform: linux/amd64 + + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.component.name == 'external-node') && '-alpha' || '' }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + shell: bash + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml new file mode 100644 index 00000000000..60c152213e6 --- /dev/null +++ b/.github/workflows/new-build-prover-template.yml @@ -0,0 +1,198 @@ +name: Build Prover images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + ERA_BELLMAN_CUDA_RELEASE: + description: "ERA_BELLMAN_CUDA_RELEASE" + type: string + required: true + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + description: "Action with docker image" + type: string + default: "push" + required: false + is_pr_from_fork: + description: "Indicates whether the workflow is invoked from a PR created from fork" + type: boolean + default: false + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-high-performance ] + strategy: + matrix: + components: + - witness-generator + - prover-gpu-fri + - witness-vector-generator + - prover-fri-gateway + - prover-job-monitor + - proof-fri-gpu-compressor + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: download CRS for GPU compressor + if: matrix.components == 'proof-fri-gpu-compressor' + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key + + # We need to run this only when ERA_BELLMAN_CUDA_RELEASE is not available + # In our case it happens only when PR is created from fork + - name: Wait for runner IP to be not rate-limited against GH API + if: ( inputs.is_pr_from_fork == true && matrix.components == 'proof-fri-gpu-compressor' ) + run: ./.github/scripts/rate_limit_check.sh + + - name: Hack to set env vars inside docker container + shell: bash + run: | + sed -i '/^FROM matterlabs\/zksync-build-base:latest as builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + #TODO: remove AS version =) + sed -i '/^FROM matterlabs\/zksync-build-base:latest AS builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + cat ./docker/${{ matrix.components }}/Dockerfile + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + + copy-images: + name: Copy images between docker registries + needs: [ build-images, get-protocol-version ] + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: matterlabs-ci-runner + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - witness-vector-generator + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Login and push to Asia GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Login and push to Europe GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + docker buildx imagetools create \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/new-build-witness-generator-template.yml b/.github/workflows/new-build-witness-generator-template.yml new file mode 100644 index 00000000000..2f1fc0b2dd8 --- /dev/null +++ b/.github/workflows/new-build-witness-generator-template.yml @@ -0,0 +1,133 @@ +name: Build witness generator image with custom compiler flags +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + type: string + default: non-push + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + WITNESS_GENERATOR_RUST_FLAGS: + description: "Rust flags for witness_generator compilation" + type: string + default: "" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-c3d ] + strategy: + matrix: + components: + - witness-generator + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 1da5aa9ac92..ce74b76a6b7 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [matterlabs-deployer-stage] + runs-on: [ matterlabs-deployer-stage ] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -61,7 +61,7 @@ jobs: build-push-core-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -72,7 +72,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,7 +84,7 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -95,26 +95,26 @@ jobs: build-push-prover-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} @@ -122,7 +122,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index 0de36ece2f7..a57bed3006a 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -8,7 +8,7 @@ on: jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] steps: - name: checkout base branch @@ -35,6 +35,10 @@ jobs: # touch .env # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV # echo $(pwd)/bin >> $GITHUB_PATH + # echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + # echo "RUSTC_WRAPPER=sccache" >> .env # - name: init # run: | @@ -44,20 +48,20 @@ jobs: # - name: run benchmarks on base branch # shell: bash # run: | - # ci_run zk - # ci_run zk compiler system-contracts + # ci_run zkt + # ci_run zk_supervisor contracts --system-contracts # ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes - # ci_run yarn workspace system-contracts clean # - name: checkout PR - # run: git checkout --force FETCH_HEAD --recurse-submodules + # run: | + # git checkout --force FETCH_HEAD --recurse-submodules # - name: run benchmarks on PR # shell: bash # run: | - # ci_run zk - # ci_run zk compiler system-contracts + # ci_run zkt + # ci_run zk_supervisor contracts --system-contracts # ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 4d90b2a24eb..4c8c90a0d8f 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -12,7 +12,7 @@ concurrency: vm-benchmarks jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -22,7 +22,10 @@ jobs: - name: setup-env run: | echo BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL=${{ secrets.BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL }} >> .env - + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH @@ -30,8 +33,8 @@ jobs: run: | run_retried docker compose pull zk docker compose up -d zk - ci_run zk - ci_run zk compiler all + ci_run zkt + ci_run zk_supervisor contracts - name: run benchmarks run: | diff --git a/.gitignore b/.gitignore index 71853a37b43..d60a93bba74 100644 --- a/.gitignore +++ b/.gitignore @@ -118,6 +118,8 @@ prover/data/keys/setup_* # Zk Toolbox chains/era/configs/* +chains/gateway/* configs/* era-observability/ core/tests/ts-integration/deployments-zk +transactions/ diff --git a/Cargo.lock b/Cargo.lock index e6bfe2b0eed..368f8a62a71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -336,6 +336,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "async-signal" version = "0.2.10" @@ -6270,11 +6281,14 @@ dependencies = [ name = "selector_generator" version = "0.1.0" dependencies = [ + "anyhow", "clap 4.4.6", + "ethabi", "glob", + "hex", "serde", "serde_json", - "sha3 0.10.8", + "tokio", ] [[package]] @@ -6840,6 +6854,7 @@ dependencies = [ "futures 0.3.30", "rand 0.8.5", "structopt", + "test-casing", "tokio", "tracing", "vise", @@ -9824,6 +9839,7 @@ name = "zksync_eth_watch" version = "0.1.0" dependencies = [ "anyhow", + "async-recursion", "async-trait", "test-log", "thiserror", @@ -9842,7 +9858,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.24.0" +version = "24.25.0" dependencies = [ "anyhow", "assert_matches", @@ -10021,6 +10037,7 @@ dependencies = [ "zksync_kzg", "zksync_prover_interface", "zksync_solidity_vk_codegen", + "zksync_system_constants", "zksync_types", ] @@ -10135,7 +10152,6 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", - "pretty_assertions", "thiserror", "tokio", "tracing", @@ -10892,6 +10908,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tokio", @@ -10959,7 +10976,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "enum_dispatch", "primitive-types", @@ -10971,7 +10988,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "primitive-types", ] @@ -11001,6 +11018,7 @@ dependencies = [ "assert_matches", "async-trait", "hex", + "pretty_assertions", "serde", "serde_json", "thiserror", @@ -11023,6 +11041,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde", + "serde_json", "tempfile", "test-casing", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 5eb862f0bcb..5a8a507b034 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,6 +105,7 @@ categories = ["cryptography"] anyhow = "1" assert_matches = "1.5" async-trait = "0.1" +async-recursion = "1" axum = "0.7.5" backon = "0.4.4" bigdecimal = "0.4.5" @@ -226,7 +227,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "cd6136c42ec56856e0abcf2a98d1a9e120161482" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } # Consensus dependencies. zksync_concurrency = "=0.1.1" diff --git a/contracts b/contracts index 18167a27f76..90b517394e2 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 18167a27f76ccc8c59697911c120a47ca97d75ce +Subproject commit 90b517394e222c98ea99713e1ce9b5a1c4e0870e diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 7d4381b09be..f0792807103 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## [24.25.0](https://github.com/matter-labs/zksync-era/compare/core-v24.24.0...core-v24.25.0) (2024-09-19) + + +### Features + +* (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) +* add da clients ([#2743](https://github.com/matter-labs/zksync-era/issues/2743)) ([9218612](https://github.com/matter-labs/zksync-era/commit/9218612fdb2b63c20841e2e2e5a45bbd23c01fbc)) +* attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) +* emit errors in prover API metrics ([#2890](https://github.com/matter-labs/zksync-era/issues/2890)) ([2ac7cc5](https://github.com/matter-labs/zksync-era/commit/2ac7cc5836e69fc82c98df2005fedee01c1084e1)) +* **en:** Resume incomplete snapshot in snapshot creator in more cases ([#2886](https://github.com/matter-labs/zksync-era/issues/2886)) ([f095b4a](https://github.com/matter-labs/zksync-era/commit/f095b4a3223222ac712de53592fe1e68f766600f)) +* make `to` address optional for transaction data ([#2852](https://github.com/matter-labs/zksync-era/issues/2852)) ([8363c1d](https://github.com/matter-labs/zksync-era/commit/8363c1d8697ad9bd2fe5d326218476bc3dad38af)) +* **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) +* Selector generator tool ([#2844](https://github.com/matter-labs/zksync-era/issues/2844)) ([b359b08](https://github.com/matter-labs/zksync-era/commit/b359b085895da6582f1d28722107bc5b25f1232c)) +* **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) +* **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) +* **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) + + +### Bug Fixes + +* count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) +* **en:** Fix connection starvation during snapshot recovery ([#2836](https://github.com/matter-labs/zksync-era/issues/2836)) ([52f4f76](https://github.com/matter-labs/zksync-era/commit/52f4f763674d25f8a5e7f3a111354a559f798d52)) +* **eth_watch:** fix `get_events_inner` ([#2882](https://github.com/matter-labs/zksync-era/issues/2882)) ([c957dd8](https://github.com/matter-labs/zksync-era/commit/c957dd8011213e0e95fa5962e2310321b29a0d16)) +* handling of HTTP 403 thrown by proxyd ([#2835](https://github.com/matter-labs/zksync-era/issues/2835)) ([2d71c74](https://github.com/matter-labs/zksync-era/commit/2d71c7408a0eed3662fc51f70fa9f525d66e4c6f)) +* **state-keeper:** Restore processed tx metrics in state keeper ([#2815](https://github.com/matter-labs/zksync-era/issues/2815)) ([4d8862b](https://github.com/matter-labs/zksync-era/commit/4d8862b76a55ac78edd481694fefd2107736ffd9)) +* **tee-prover:** fix deserialization of `std::time::Duration` in `envy` config ([#2817](https://github.com/matter-labs/zksync-era/issues/2817)) ([df8641a](https://github.com/matter-labs/zksync-era/commit/df8641a912a8d480ceecff58b0bfaef05e04f0c8)) + ## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index a1d3951ff3d..c7a4476173f 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.24.0" # x-release-please-version +version = "24.25.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 142b4b81330..821df0a06de 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -1402,6 +1402,9 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { Self { l1_chain_id: config.required.l1_chain_id, l2_chain_id: config.required.l2_chain_id, + // TODO: EN not supported yet + sl_chain_id: SLChainId(config.required.l1_chain_id.0), + settlement_layer_url: None, max_tx_size: config.optional.max_tx_size_bytes, estimate_gas_scale_factor: config.optional.estimate_gas_scale_factor, estimate_gas_acceptable_overestimation: config diff --git a/core/bin/external_node/src/config/observability.rs b/core/bin/external_node/src/config/observability.rs index 0dd83f3bd35..91b721bf77c 100644 --- a/core/bin/external_node/src/config/observability.rs +++ b/core/bin/external_node/src/config/observability.rs @@ -95,11 +95,10 @@ impl ObservabilityENConfig { ) }) .transpose()?; - let guard = zksync_vlog::ObservabilityBuilder::new() + zksync_vlog::ObservabilityBuilder::new() .with_logs(Some(logs)) .with_sentry(sentry) - .build(); - Ok(guard) + .try_build() } pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index 43210a76572..a32be3eff72 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -63,7 +63,10 @@ fn parsing_observability_config() { fn using_unset_sentry_url() { let env_vars = MockEnvironment::new(&[("MISC_SENTRY_URL", "unset")]); let config = ObservabilityENConfig::new(&env_vars).unwrap(); - config.build_observability().unwrap(); + if let Err(err) = config.build_observability() { + // Global tracer may be installed by another test, but the logic shouldn't fail before that. + assert!(format!("{err:?}").contains("global tracer"), "{err:?}"); + } } #[test] diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index e58a9fd95f2..efd76d4fa42 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -21,7 +21,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; @@ -92,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { #[tokio::test] async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await; let l2_client = utils::mock_l2_client_hanging(); @@ -128,7 +128,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { #[tokio::test] async fn running_tree_without_core_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await; let l2_client = utils::mock_l2_client(&env); @@ -165,7 +165,7 @@ async fn running_tree_without_core_is_not_allowed() { #[tokio::test] async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; let l2_client = utils::mock_l2_client(&env); diff --git a/core/bin/selector_generator/Cargo.toml b/core/bin/selector_generator/Cargo.toml index e0b0afe0ae2..b3425c11b4e 100644 --- a/core/bin/selector_generator/Cargo.toml +++ b/core/bin/selector_generator/Cargo.toml @@ -11,8 +11,11 @@ categories.workspace = true publish = false [dependencies] +anyhow.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true -sha3.workspace = true glob.workspace = true -clap = { workspace = true, features = ["derive"] } \ No newline at end of file +clap = { workspace = true, features = ["derive"] } +ethabi.workspace = true +hex.workspace = true +tokio = { workspace = true, features = ["full"] } diff --git a/core/bin/selector_generator/src/app.rs b/core/bin/selector_generator/src/app.rs new file mode 100644 index 00000000000..425bf9f4282 --- /dev/null +++ b/core/bin/selector_generator/src/app.rs @@ -0,0 +1,105 @@ +use std::path::PathBuf; + +use anyhow::Context; +use glob::glob; +use tokio::io::AsyncWriteExt as _; + +use crate::selectors::Selectors; + +#[derive(Debug, Default)] +pub(crate) struct App { + /// Selectors file. + file_path: PathBuf, + /// All the selectors. Initially, will be loaded from the file. + /// All the discovered selectors will be merged into it. + selectors: Selectors, + /// Number of selectors before processing the files. + /// Used for reporting. + selectors_before: usize, + /// Number of files analyzed. + /// Used for reporting. + analyzed_files: usize, +} + +impl App { + /// Loads the selectors from the file, or returns a new instance if the file doesn't exist. + pub async fn load(file_path: impl Into) -> anyhow::Result { + let file_path = file_path.into(); + // If doesn't exist, return default. + if !file_path.exists() { + return Ok(Self::default()); + } + + let file = tokio::fs::read(&file_path) + .await + .context("Failed to read file")?; + let selectors: Selectors = + serde_json::from_slice(&file).context("Failed to deserialize file")?; + let selectors_before = selectors.len(); + Ok(Self { + file_path, + selectors, + selectors_before, + analyzed_files: 0, + }) + } + + /// Analyses all the JSON files, looking for 'abi' entries, and then computing the selectors for them. + pub async fn process_files(&mut self, directory: &str) -> anyhow::Result<()> { + for file_path in Self::load_file_paths(directory) { + let Ok(new_selectors) = Selectors::load(&file_path).await.inspect_err(|e| { + eprintln!("Error parsing file {file_path:?}: {e:?}"); + }) else { + continue; + }; + self.merge(new_selectors); + } + Ok(()) + } + + /// Saves the selectors to the file. + pub async fn save(self) -> anyhow::Result<()> { + let mut file = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(self.file_path) + .await + .context("Failed to open file")?; + let json = serde_json::to_string_pretty(&self.selectors)?; + file.write_all(json.as_bytes()) + .await + .context("Failed to save file")?; + Ok(()) + } + + /// Merges the new selectors into the current ones. + pub fn merge(&mut self, new: Selectors) { + self.selectors.merge(new); + self.analyzed_files += 1; + } + + /// Reports the number of analyzed files and the number of added selectors. + pub fn report(&self) { + println!( + "Analyzed {} files. Added {} selectors (before: {} after: {})", + self.analyzed_files, + self.selectors.len() - self.selectors_before, + self.selectors_before, + self.selectors.len() + ); + } + + fn load_file_paths(dir: &str) -> Vec { + glob(&format!("{}/**/*.json", dir)) + .expect("Failed to read glob pattern") + .filter_map(|entry| match entry { + Ok(path) => Some(path), + Err(e) => { + eprintln!("Error reading file: {:?}", e); + None + } + }) + .collect() + } +} diff --git a/core/bin/selector_generator/src/main.rs b/core/bin/selector_generator/src/main.rs index ad6180413f1..f5ed2e01c58 100644 --- a/core/bin/selector_generator/src/main.rs +++ b/core/bin/selector_generator/src/main.rs @@ -1,105 +1,33 @@ -use std::{ - collections::HashMap, - fs::{File, OpenOptions}, - io::{self}, -}; - +use app::App; use clap::Parser; -use glob::glob; -use serde::{Deserialize, Serialize}; -use sha3::{Digest, Keccak256}; - -#[derive(Debug, Serialize, Deserialize)] -struct ABIEntry { - #[serde(rename = "type")] - entry_type: String, - name: Option, - inputs: Option>, -} -#[derive(Debug, Serialize, Deserialize)] -struct ABIInput { - #[serde(rename = "type")] - input_type: String, -} +pub(crate) mod app; +pub(crate) mod selectors; +/// Selector generator tool. +/// +/// Generates a mapping of short (4-byte) function selectors to their corresponding function names. +/// +/// The generated JSON can be used to lookup function names by their selectors, when interacting +/// with Ethereum contracts. #[derive(Debug, Parser)] -#[command(author, version, about, long_about = None)] +#[command(author, version, about, long_about)] struct Cli { + /// Path to the directory with JSON files containing ABI. + /// All JSON files in this directory will be processed. contracts_dir: String, + /// Path to the output file. + /// The file will contain the list of function selectors. + /// If the file already exists, new selectors will be appended to it. output_file: String, } -/// Computes solidity selector for a given method and arguments. -fn compute_selector(name: &str, inputs: &[ABIInput]) -> String { - let signature = format!( - "{}({})", - name, - inputs - .iter() - .map(|i| i.input_type.clone()) - .collect::>() - .join(",") - ); - let mut hasher = Keccak256::new(); - hasher.update(signature); - format!("{:x}", hasher.finalize())[..8].to_string() -} - -/// Analyses all the JSON files, looking for 'abi' entries, and then computing the selectors for them. -fn process_files(directory: &str, output_file: &str) -> io::Result<()> { - let mut selectors: HashMap = match File::open(output_file) { - Ok(file) => serde_json::from_reader(file).unwrap_or_default(), - Err(_) => HashMap::new(), - }; - let selectors_before = selectors.len(); - let mut analyzed_files = 0; - - for entry in glob(&format!("{}/**/*.json", directory)).expect("Failed to read glob pattern") { - match entry { - Ok(path) => { - let file_path = path.clone(); - let file = File::open(path)?; - let json: Result = serde_json::from_reader(file); - - if let Ok(json) = json { - if let Some(abi) = json.get("abi").and_then(|v| v.as_array()) { - analyzed_files += 1; - for item in abi { - let entry: ABIEntry = serde_json::from_value(item.clone()).unwrap(); - if entry.entry_type == "function" { - if let (Some(name), Some(inputs)) = (entry.name, entry.inputs) { - let selector = compute_selector(&name, &inputs); - selectors.entry(selector).or_insert(name); - } - } - } - } - } else { - eprintln!("Error parsing file: {:?} - ignoring.", file_path) - } - } - Err(e) => eprintln!("Error reading file: {:?}", e), - } - } - println!( - "Analyzed {} files. Added {} selectors (before: {} after: {})", - analyzed_files, - selectors.len() - selectors_before, - selectors_before, - selectors.len() - ); - - let file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(output_file)?; - serde_json::to_writer_pretty(file, &selectors)?; - Ok(()) -} - -fn main() -> io::Result<()> { +#[tokio::main] +async fn main() -> anyhow::Result<()> { let args = Cli::parse(); - process_files(&args.contracts_dir, &args.output_file) + let mut app = App::load(args.output_file).await?; + app.process_files(&args.contracts_dir).await?; + app.report(); + app.save().await?; + Ok(()) } diff --git a/core/bin/selector_generator/src/selectors.rs b/core/bin/selector_generator/src/selectors.rs new file mode 100644 index 00000000000..3b69854a947 --- /dev/null +++ b/core/bin/selector_generator/src/selectors.rs @@ -0,0 +1,118 @@ +use std::{collections::HashMap, path::PathBuf}; + +use anyhow::Context; +use serde::{Deserialize, Serialize}; + +/// Short (4-byte) function selector. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(transparent)] +struct Selector(String); + +/// Function name without parameters. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(transparent)] +struct FunctionName(String); + +/// A set of function selectors and their corresponding function names. +#[derive(Debug, Default, Serialize, Deserialize)] +pub(crate) struct Selectors { + #[serde(flatten)] + selectors: HashMap, +} + +impl Selectors { + /// Loads the selectors from the file, or returns a new instance if the file is a valid + /// JSON, but doesn't contain `ABI` section. + /// + /// Will return an error if file doesn't exist or cannot be deserialized. + pub async fn load(file_path: &PathBuf) -> anyhow::Result { + let file = tokio::fs::read(file_path) + .await + .context("Failed to read file")?; + let json: serde_json::Value = + serde_json::from_slice(&file).context("Failed to deserialize file")?; + let Some(abi) = json.get("abi").cloned() else { + return Ok(Selectors::default()); + }; + + let contract: ethabi::Contract = + serde_json::from_value(abi).context("Failed to parse abi")?; + Ok(Self::new(contract)) + } + + /// Loads selectors from a given contract. + pub fn new(contract: ethabi::Contract) -> Self { + let selectors: HashMap<_, _> = contract + .functions + .into_values() + .flatten() + .map(|function| { + let selector = hex::encode(function.short_signature()); + (Selector(selector), FunctionName(function.name)) + }) + .collect(); + Self { selectors } + } + + /// Merges new selectors into the existing set. + pub fn merge(&mut self, new: Self) { + for (selector, name) in new.selectors { + self.selectors + .entry(selector.clone()) + .and_modify(|e| { + assert_eq!( + e, &name, + "Function name mismatch for selector '{:?}'", + selector + ) + }) + .or_insert(name); + } + } + + pub fn len(&self) -> usize { + self.selectors.len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_selectors() { + let contract_json = r#"[ + { + "type": "function", + "name": "transfer", + "inputs": [ + { "name": "to", "type": "address" }, + { "name": "value", "type": "uint256" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "bar", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + } + ] + "#; + + let contract: ethabi::Contract = serde_json::from_str(contract_json).unwrap(); + let selectors = Selectors::new(contract); + assert_eq!(selectors.len(), 2); + + // Check the generated selectors. + assert_eq!( + selectors + .selectors + .get(&Selector("a9059cbb".to_string())) + .expect("No selector for transfer found"), + &FunctionName("transfer".to_string()) + ); + } +} diff --git a/core/bin/snapshots_creator/Cargo.toml b/core/bin/snapshots_creator/Cargo.toml index 530b9635cd4..5a36c646e88 100644 --- a/core/bin/snapshots_creator/Cargo.toml +++ b/core/bin/snapshots_creator/Cargo.toml @@ -29,3 +29,4 @@ futures.workspace = true [dev-dependencies] rand.workspace = true +test-casing.workspace = true diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index 18212a7d205..29150cd6b69 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -291,25 +291,38 @@ impl SnapshotCreator { .get_sealed_l1_batch_number() .await?; let sealed_l1_batch_number = sealed_l1_batch_number.context("No L1 batches in Postgres")?; - let requested_l1_batch_number = if let Some(l1_batch_number) = config.l1_batch_number { + let (requested_l1_batch_number, existing_snapshot) = if let Some(l1_batch_number) = + config.l1_batch_number + { anyhow::ensure!( l1_batch_number <= sealed_l1_batch_number, "Requested a snapshot for L1 batch #{l1_batch_number} that doesn't exist in Postgres (latest L1 batch: {sealed_l1_batch_number})" ); - l1_batch_number + + let existing_snapshot = master_conn + .snapshots_dal() + .get_snapshot_metadata(l1_batch_number) + .await?; + (l1_batch_number, existing_snapshot) } else { // We subtract 1 so that after restore, EN node has at least one L1 batch to fetch. anyhow::ensure!( sealed_l1_batch_number != L1BatchNumber(0), "Cannot create snapshot when only the genesis L1 batch is present in Postgres" ); - sealed_l1_batch_number - 1 - }; + let requested_l1_batch_number = sealed_l1_batch_number - 1; - let existing_snapshot = master_conn - .snapshots_dal() - .get_snapshot_metadata(requested_l1_batch_number) - .await?; + // Continue creating a pending snapshot if it exists, even if it doesn't correspond to the latest L1 batch. + // OTOH, a completed snapshot does not matter, unless it corresponds to `requested_l1_batch_number` (in which case it doesn't need to be created again). + let existing_snapshot = master_conn + .snapshots_dal() + .get_newest_snapshot_metadata() + .await? + .filter(|snapshot| { + !snapshot.is_complete() || snapshot.l1_batch_number == requested_l1_batch_number + }); + (requested_l1_batch_number, existing_snapshot) + }; drop(master_conn); match existing_snapshot { @@ -317,18 +330,7 @@ impl SnapshotCreator { tracing::info!("Snapshot for the requested L1 batch is complete: {snapshot:?}"); Ok(None) } - Some(snapshot) if config.l1_batch_number.is_some() => { - Ok(Some(SnapshotProgress::from_existing_snapshot(&snapshot))) - } - Some(snapshot) => { - // Unless creating a snapshot for a specific L1 batch is requested, we never continue an existing snapshot, even if it's incomplete. - // This it to make running multiple snapshot creator instances in parallel easier to reason about. - tracing::warn!( - "Snapshot at expected L1 batch #{requested_l1_batch_number} exists, but is incomplete: {snapshot:?}. If you need to resume creating it, \ - specify the L1 batch number in the snapshot creator config" - ); - Ok(None) - } + Some(snapshot) => Ok(Some(SnapshotProgress::from_existing_snapshot(&snapshot))), None => { Self::initialize_snapshot_progress( config, diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 5b08689782d..f3c19138880 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -10,6 +10,7 @@ use std::{ }; use rand::{thread_rng, Rng}; +use test_casing::test_casing; use zksync_config::SnapshotsCreatorConfig; use zksync_dal::{Connection, CoreDal}; use zksync_object_store::{MockObjectStore, ObjectStore}; @@ -64,6 +65,15 @@ impl HandleEvent for TestEventListener { } } +#[derive(Debug)] +struct UnreachableEventListener; + +impl HandleEvent for UnreachableEventListener { + fn on_chunk_started(&self) -> TestBehavior { + unreachable!("should not be reached"); + } +} + impl SnapshotCreator { fn for_tests(blob_store: Arc, pool: ConnectionPool) -> Self { Self { @@ -80,6 +90,13 @@ impl SnapshotCreator { ..self } } + + fn panic_on_chunk_start(self) -> Self { + Self { + event_listener: Box::new(UnreachableEventListener), + ..self + } + } } #[derive(Debug)] @@ -432,8 +449,9 @@ async fn persisting_snapshot_logs_for_v0_snapshot() { assert_eq!(actual_logs, expected_outputs.storage_logs); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn recovery_workflow() { +async fn recovery_workflow(specify_batch_after_recovery: bool) { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store = MockObjectStore::arc(); @@ -463,29 +481,9 @@ async fn recovery_workflow() { let actual_deps: HashSet<_> = factory_deps.into_iter().collect(); assert_eq!(actual_deps, expected_outputs.deps); - // Check that the creator does nothing unless it's requested to create a new snapshot. - SnapshotCreator::for_tests(object_store.clone(), pool.clone()) - .stop_after_chunk_count(2) - .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) - .await - .unwrap(); - let snapshot_metadata = conn - .snapshots_dal() - .get_snapshot_metadata(snapshot_l1_batch_number) - .await - .unwrap() - .expect("No snapshot metadata"); - assert!( - snapshot_metadata - .storage_logs_filepaths - .iter() - .all(Option::is_none), - "{snapshot_metadata:?}" - ); - // Process 2 storage log chunks, then stop. let recovery_config = SnapshotsCreatorConfig { - l1_batch_number: Some(snapshot_l1_batch_number), + l1_batch_number: specify_batch_after_recovery.then_some(snapshot_l1_batch_number), ..SEQUENTIAL_TEST_CONFIG }; SnapshotCreator::for_tests(object_store.clone(), pool.clone()) @@ -511,11 +509,68 @@ async fn recovery_workflow() { // Process the remaining chunks. SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(recovery_config.clone(), MIN_CHUNK_COUNT) + .await + .unwrap(); + + assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; + + // Check that the snapshot is not created anew after it is completed. + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .panic_on_chunk_start() .run(recovery_config, MIN_CHUNK_COUNT) .await .unwrap(); + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!(snapshot_metadata.is_complete(), "{snapshot_metadata:#?}"); +} + +#[tokio::test] +async fn recovery_workflow_with_new_l1_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut rng = thread_rng(); + let object_store = MockObjectStore::arc(); + let mut conn = pool.connection().await.unwrap(); + let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; + + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .stop_after_chunk_count(2) + .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) + .await + .unwrap(); + + let snapshot_l1_batch_number = L1BatchNumber(8); + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!(!snapshot_metadata.is_complete(), "{snapshot_metadata:#?}"); + + let new_logs = gen_storage_logs(&mut thread_rng(), 50); + create_l1_batch(&mut conn, snapshot_l1_batch_number + 2, &new_logs).await; + + // The old snapshot should be completed. + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) + .await + .unwrap(); assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; + + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!(snapshot_metadata.is_complete(), "{snapshot_metadata:#?}"); } #[tokio::test] diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 7c8eccd30ca..2a1c690fe38 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -2,7 +2,7 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ - load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, + load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ @@ -89,7 +89,7 @@ pub(super) fn get_l2_tx( pubdata_price: u32, ) -> L2Tx { L2Tx::new_signed( - contract_address, + Some(contract_address), vec![], Nonce(0), Fee { @@ -134,7 +134,7 @@ pub(super) fn get_l1_tx( ) -> L1Tx { L1Tx { execute: Execute { - contract_address, + contract_address: Some(contract_address), calldata: custom_calldata.unwrap_or_default(), value: U256::from(0), factory_deps, @@ -169,9 +169,8 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test + read_bytecode_from_path(format!( + "contracts/system-contracts/zkout/{test}.yul/contracts-preprocessed/bootloader/{test}.yul.json", )) } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index f51b04baa35..db9f125b1c3 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -10,6 +10,7 @@ use zksync_config::{ StateKeeperConfig, }, fri_prover_group::FriProverGroupConfig, + gateway::GatewayChainConfig, house_keeper::HouseKeeperConfig, BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, @@ -59,6 +60,9 @@ struct Cli { /// Path to the yaml with contracts. If set, it will be used instead of env vars. #[arg(long)] contracts_config_path: Option, + /// Path to the yaml with contracts. If set, it will be used instead of env vars. + #[arg(long)] + gateway_contracts_config_path: Option, /// Path to the wallets config. If set, it will be used instead of env vars. #[arg(long)] wallets_path: Option, @@ -142,7 +146,22 @@ fn main() -> anyhow::Result<()> { } }; - let gateway_contracts_config = ContractsConfig::from_env_variant("GATEWAY_".to_string()).ok(); + let gateway_contracts_config: Option = + match opt.gateway_contracts_config_path { + None => ContractsConfig::from_env_variant("GATEWAY_".to_string()) + .ok() + .map(Into::into), + Some(path) => { + let yaml = + std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; + let result = decode_yaml_repr::< + zksync_protobuf_config::proto::gateway::GatewayChainConfig, + >(&yaml) + .context("failed decoding contracts YAML config")?; + + Some(result) + } + }; let genesis = match opt.genesis_path { None => GenesisConfig::from_env().context("Genesis config")?, diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 15ba1f73cb9..763fd600e6f 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -4,8 +4,8 @@ use anyhow::Context; use zksync_config::{ configs::{ - da_client::DAClient, eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, - Secrets, + da_client::DAClient, eth_sender::PubdataSendingMode, gateway::GatewayChainConfig, + wallets::Wallets, GeneralConfig, Secrets, }, ContractsConfig, GenesisConfig, }; @@ -90,7 +90,7 @@ pub struct MainNodeBuilder { wallets: Wallets, genesis_config: GenesisConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, secrets: Secrets, } @@ -100,7 +100,7 @@ impl MainNodeBuilder { wallets: Wallets, genesis_config: GenesisConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, secrets: Secrets, ) -> anyhow::Result { Ok(Self { diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 7eb67144860..bec5a55ced1 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -2,13 +2,14 @@ use std::{net::IpAddr, ops::Add, str::FromStr}; use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString}; use crate::{ basic_fri_types::AggregationRound, protocol_version::ProtocolVersionId, L1BatchNumber, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct FriProverJobMetadata { pub id: u32, pub block_number: L1BatchNumber, @@ -27,7 +28,7 @@ pub struct ExtendedJobCountStatistics { pub successful: usize, } -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct JobCountStatistics { pub queued: usize, pub in_progress: usize, diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs index 7ef0137e078..ac31cae64f0 100644 --- a/core/lib/basic_types/src/vm.rs +++ b/core/lib/basic_types/src/vm.rs @@ -33,8 +33,9 @@ pub enum FastVmMode { /// Run only the old VM. #[default] Old, - /// Run only the new Vm. + /// Run only the new VM. New, /// Run both the new and old VM and compare their outputs for each transaction execution. + /// The VM will panic on divergence. Shadow, } diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index ca42cd5e5f8..2e783f7a645 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -216,6 +216,9 @@ pub struct Web3JsonRpcConfig { /// (hundreds or thousands RPS). #[serde(default)] pub extended_api_tracing: bool, + + #[serde(default)] + pub settlement_layer_url: Option, } impl Web3JsonRpcConfig { @@ -254,6 +257,7 @@ impl Web3JsonRpcConfig { whitelisted_tokens_for_aa: Default::default(), api_namespaces: None, extended_api_tracing: false, + settlement_layer_url: None, } } diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 759e1312833..2e277341b07 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -92,6 +92,8 @@ pub struct GenesisSpec { pub leader: ValidatorPublicKey, /// Address of the registry contract. pub registry_address: Option, + /// Recommended list of peers to connect to. + pub seed_peers: BTreeMap, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/configs/gateway.rs b/core/lib/config/src/configs/gateway.rs new file mode 100644 index 00000000000..bf2f362ae26 --- /dev/null +++ b/core/lib/config/src/configs/gateway.rs @@ -0,0 +1,69 @@ +use zksync_basic_types::{web3::Bytes, Address}; + +use super::ContractsConfig; + +/// Config that is only stored for the gateway chain. +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq)] +pub struct GatewayConfig { + pub state_transition_proxy_addr: Address, + pub state_transition_implementation_addr: Address, + pub verifier_addr: Address, + pub validator_timelock_addr: Address, + pub admin_facet_addr: Address, + pub mailbox_facet_addr: Address, + pub executor_facet_addr: Address, + pub getters_facet_addr: Address, + pub diamond_init_addr: Address, + pub genesis_upgrade_addr: Address, + pub default_upgrade_addr: Address, + pub multicall3_addr: Address, + pub relayed_sl_da_validator: Address, + pub validium_da_validator: Address, + pub diamond_cut_data: Bytes, +} + +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq)] +pub struct GatewayChainConfig { + pub state_transition_proxy_addr: Address, + pub validator_timelock_addr: Address, + pub multicall3_addr: Address, + pub diamond_proxy_addr: Address, + pub chain_admin_addr: Option
, + pub governance_addr: Address, +} + +impl GatewayChainConfig { + pub fn from_gateway_and_chain_data( + gateway_config: &GatewayConfig, + diamond_proxy_addr: Address, + chain_admin_addr: Address, + ) -> Self { + // FIXME: there is no "governnace" for a chain, only an admin, we + // need to figure out what we mean here + + Self { + state_transition_proxy_addr: gateway_config.state_transition_proxy_addr, + validator_timelock_addr: gateway_config.validator_timelock_addr, + multicall3_addr: gateway_config.multicall3_addr, + diamond_proxy_addr, + chain_admin_addr: Some(chain_admin_addr), + governance_addr: chain_admin_addr, + } + } +} + +impl From for GatewayChainConfig { + fn from(value: ContractsConfig) -> Self { + Self { + state_transition_proxy_addr: value + .ecosystem_contracts + .unwrap() + .state_transition_proxy_addr, + validator_timelock_addr: value.validator_timelock_addr, + multicall3_addr: value.l1_multicall3_addr, + diamond_proxy_addr: value.diamond_proxy_addr, + chain_admin_addr: value.chain_admin_addr, + governance_addr: value.governance_addr, + } + } +} diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 1ad503e0687..9ece81dc7cd 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -18,6 +18,7 @@ pub use self::{ fri_prover_gateway::FriProverGatewayConfig, fri_witness_generator::FriWitnessGeneratorConfig, fri_witness_vector_generator::FriWitnessVectorGeneratorConfig, + gateway::{GatewayChainConfig, GatewayConfig}, general::GeneralConfig, genesis::GenesisConfig, object_store::ObjectStoreConfig, @@ -54,6 +55,7 @@ pub mod fri_prover_gateway; pub mod fri_prover_group; pub mod fri_witness_generator; pub mod fri_witness_vector_generator; +pub mod gateway; mod general; pub mod genesis; pub mod house_keeper; diff --git a/core/lib/config/src/configs/prover_job_monitor.rs b/core/lib/config/src/configs/prover_job_monitor.rs index c16b1db81b7..d60a0e90c20 100644 --- a/core/lib/config/src/configs/prover_job_monitor.rs +++ b/core/lib/config/src/configs/prover_job_monitor.rs @@ -61,6 +61,8 @@ pub struct ProverJobMonitorConfig { /// The interval between runs for Witness Job Queuer. #[serde(default = "ProverJobMonitorConfig::default_witness_job_queuer_run_interval_ms")] pub witness_job_queuer_run_interval_ms: u64, + /// HTTP port of the ProverJobMonitor to send requests to. + pub http_port: u16, } impl ProverJobMonitorConfig { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 348c2a95848..f8a1105fe70 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -106,6 +106,7 @@ impl Distribution for EncodeDist { api_namespaces: self .sample_opt(|| self.sample_range(rng).map(|_| self.sample(rng)).collect()), extended_api_tracing: self.sample(rng), + settlement_layer_url: self.sample(rng), } } } @@ -781,7 +782,9 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::GenesisSpec { - use configs::consensus::{GenesisSpec, ProtocolVersion, ValidatorPublicKey}; + use configs::consensus::{ + GenesisSpec, Host, NodePublicKey, ProtocolVersion, ValidatorPublicKey, + }; GenesisSpec { chain_id: L2ChainId::default(), protocol_version: ProtocolVersion(self.sample(rng)), @@ -789,6 +792,10 @@ impl Distribution for EncodeDist { attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), registry_address: self.sample_opt(|| rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (NodePublicKey(self.sample(rng)), Host(self.sample(rng)))) + .collect(), } } } @@ -1122,6 +1129,7 @@ impl Distribution for Encod prover_queue_reporter_run_interval_ms: self.sample(rng), witness_generator_queue_reporter_run_interval_ms: self.sample(rng), witness_job_queuer_run_interval_ms: self.sample(rng), + http_port: self.sample(rng), } } } diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs index bd4167b3d02..e2cc58444f3 100644 --- a/core/lib/constants/src/system_logs.rs +++ b/core/lib/constants/src/system_logs.rs @@ -1,8 +1,8 @@ /// The key of the system log with value of the L2->L1 logs tree root hash pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; -/// The key of the system log with value of the state diff hash -pub const STATE_DIFF_HASH_KEY: u32 = 2; +/// The key of the system log with value of the state diff hash for pre-gateway protocol versions +pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u64 = 2; /// The key of the system log with value of the first blob linear hash pub const BLOB1_LINEAR_HASH_KEY: u32 = 7; diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index aaa890f5cdd..5f98d65fcf2 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -237,7 +237,7 @@ pub fn known_codes_contract() -> Contract { } /// Reads bytecode from a given path. -fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { +pub fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { let artifact = read_file_to_json_value(&artifact_path); let bytecode = artifact["bytecode"] diff --git a/core/lib/dal/.sqlx/query-3a3a5250bd4d7cd1a33d05ab5f36513433f7b08622b87c9da839d90fba551254.json b/core/lib/dal/.sqlx/query-0ae56114d62c82dc02d41612e6138fea4287992ffe2bfb1930025fae7c95c12c.json similarity index 80% rename from core/lib/dal/.sqlx/query-3a3a5250bd4d7cd1a33d05ab5f36513433f7b08622b87c9da839d90fba551254.json rename to core/lib/dal/.sqlx/query-0ae56114d62c82dc02d41612e6138fea4287992ffe2bfb1930025fae7c95c12c.json index 9ed7796fb90..8ec2e65b007 100644 --- a/core/lib/dal/.sqlx/query-3a3a5250bd4d7cd1a33d05ab5f36513433f7b08622b87c9da839d90fba551254.json +++ b/core/lib/dal/.sqlx/query-0ae56114d62c82dc02d41612e6138fea4287992ffe2bfb1930025fae7c95c12c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -186,8 +191,9 @@ true, true, true, + true, true ] }, - "hash": "3a3a5250bd4d7cd1a33d05ab5f36513433f7b08622b87c9da839d90fba551254" + "hash": "0ae56114d62c82dc02d41612e6138fea4287992ffe2bfb1930025fae7c95c12c" } diff --git a/core/lib/dal/.sqlx/query-e446b94b844dc4f2b53dc36c979e57cd16ac6082da7f301216d175456da69131.json b/core/lib/dal/.sqlx/query-153bbcc608edd2c67dd33cdbc69ebf33597db483e1e675a312e6b670abc3e38f.json similarity index 85% rename from core/lib/dal/.sqlx/query-e446b94b844dc4f2b53dc36c979e57cd16ac6082da7f301216d175456da69131.json rename to core/lib/dal/.sqlx/query-153bbcc608edd2c67dd33cdbc69ebf33597db483e1e675a312e6b670abc3e38f.json index cdba8295930..c50a13f6636 100644 --- a/core/lib/dal/.sqlx/query-e446b94b844dc4f2b53dc36c979e57cd16ac6082da7f301216d175456da69131.json +++ b/core/lib/dal/.sqlx/query-153bbcc608edd2c67dd33cdbc69ebf33597db483e1e675a312e6b670abc3e38f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -181,8 +186,9 @@ true, true, true, + true, true ] }, - "hash": "e446b94b844dc4f2b53dc36c979e57cd16ac6082da7f301216d175456da69131" + "hash": "153bbcc608edd2c67dd33cdbc69ebf33597db483e1e675a312e6b670abc3e38f" } diff --git a/core/lib/dal/.sqlx/query-46ffbcf8675ae90bceb4dd8c9d4dc08d8a8da10da5b2c0fd8b445e8c5a71202c.json b/core/lib/dal/.sqlx/query-46ffbcf8675ae90bceb4dd8c9d4dc08d8a8da10da5b2c0fd8b445e8c5a71202c.json new file mode 100644 index 00000000000..1b4c5b816da --- /dev/null +++ b/core/lib/dal/.sqlx/query-46ffbcf8675ae90bceb4dd8c9d4dc08d8a8da10da5b2c0fd8b445e8c5a71202c.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number AS batch_number,\n eth_txs.chain_id AS settlement_layer_id,\n eth_txs_history.tx_hash AS settlement_layer_tx_hash\n FROM\n l1_batches\n JOIN eth_txs ON l1_batches.eth_execute_tx_id = eth_txs.id\n JOIN eth_txs_history ON (\n eth_txs.id = eth_txs_history.eth_tx_id\n AND eth_txs_history.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "settlement_layer_id", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "settlement_layer_tx_hash", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "46ffbcf8675ae90bceb4dd8c9d4dc08d8a8da10da5b2c0fd8b445e8c5a71202c" +} diff --git a/core/lib/dal/.sqlx/query-49bcff5472dab81164bfbdf461c574098a589144d15fef47f297fa1a3dd4ef1d.json b/core/lib/dal/.sqlx/query-4b00685601a7d7feaa6fdabd59589a7b9dcd9e9f61555a48926a85cad25a42ec.json similarity index 79% rename from core/lib/dal/.sqlx/query-49bcff5472dab81164bfbdf461c574098a589144d15fef47f297fa1a3dd4ef1d.json rename to core/lib/dal/.sqlx/query-4b00685601a7d7feaa6fdabd59589a7b9dcd9e9f61555a48926a85cad25a42ec.json index 0ba5af81a4f..c0c32d642ce 100644 --- a/core/lib/dal/.sqlx/query-49bcff5472dab81164bfbdf461c574098a589144d15fef47f297fa1a3dd4ef1d.json +++ b/core/lib/dal/.sqlx/query-4b00685601a7d7feaa6fdabd59589a7b9dcd9e9f61555a48926a85cad25a42ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -184,8 +189,9 @@ true, true, true, + true, true ] }, - "hash": "49bcff5472dab81164bfbdf461c574098a589144d15fef47f297fa1a3dd4ef1d" + "hash": "4b00685601a7d7feaa6fdabd59589a7b9dcd9e9f61555a48926a85cad25a42ec" } diff --git a/core/lib/dal/.sqlx/query-0913a46bc87a0be3b48444ad42f8d3e46ebbf30c6ea69d040182c70b314868ea.json b/core/lib/dal/.sqlx/query-5d0b99341c8d41c7048fb1fd26793d59a98cb744920f0748aa35f4953e3b1cef.json similarity index 77% rename from core/lib/dal/.sqlx/query-0913a46bc87a0be3b48444ad42f8d3e46ebbf30c6ea69d040182c70b314868ea.json rename to core/lib/dal/.sqlx/query-5d0b99341c8d41c7048fb1fd26793d59a98cb744920f0748aa35f4953e3b1cef.json index 712f02af2df..292567121b9 100644 --- a/core/lib/dal/.sqlx/query-0913a46bc87a0be3b48444ad42f8d3e46ebbf30c6ea69d040182c70b314868ea.json +++ b/core/lib/dal/.sqlx/query-5d0b99341c8d41c7048fb1fd26793d59a98cb744920f0748aa35f4953e3b1cef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -187,8 +192,9 @@ true, true, true, + true, true ] }, - "hash": "0913a46bc87a0be3b48444ad42f8d3e46ebbf30c6ea69d040182c70b314868ea" + "hash": "5d0b99341c8d41c7048fb1fd26793d59a98cb744920f0748aa35f4953e3b1cef" } diff --git a/core/lib/dal/.sqlx/query-3a04e3759262b43f18653f60d084102986af953b92f104f54b5ea7ed824146d6.json b/core/lib/dal/.sqlx/query-85b8ac8999ceb655d7e216552992834e5ab749bc7822a87394c5eb716072a995.json similarity index 88% rename from core/lib/dal/.sqlx/query-3a04e3759262b43f18653f60d084102986af953b92f104f54b5ea7ed824146d6.json rename to core/lib/dal/.sqlx/query-85b8ac8999ceb655d7e216552992834e5ab749bc7822a87394c5eb716072a995.json index 3b9dc4b0a74..0e05a0d84e9 100644 --- a/core/lib/dal/.sqlx/query-3a04e3759262b43f18653f60d084102986af953b92f104f54b5ea7ed824146d6.json +++ b/core/lib/dal/.sqlx/query-85b8ac8999ceb655d7e216552992834e5ab749bc7822a87394c5eb716072a995.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -183,8 +188,9 @@ true, true, true, + true, true ] }, - "hash": "3a04e3759262b43f18653f60d084102986af953b92f104f54b5ea7ed824146d6" + "hash": "85b8ac8999ceb655d7e216552992834e5ab749bc7822a87394c5eb716072a995" } diff --git a/core/lib/dal/.sqlx/query-a94062da88511445408caae77900e49be5a9824e1428765086afcb1da76799de.json b/core/lib/dal/.sqlx/query-92c795370e83b3a3efb590e68c123160039fdf27223dabac51af1a7853ce3b53.json similarity index 85% rename from core/lib/dal/.sqlx/query-a94062da88511445408caae77900e49be5a9824e1428765086afcb1da76799de.json rename to core/lib/dal/.sqlx/query-92c795370e83b3a3efb590e68c123160039fdf27223dabac51af1a7853ce3b53.json index f73c1330b23..6f8e738c5eb 100644 --- a/core/lib/dal/.sqlx/query-a94062da88511445408caae77900e49be5a9824e1428765086afcb1da76799de.json +++ b/core/lib/dal/.sqlx/query-92c795370e83b3a3efb590e68c123160039fdf27223dabac51af1a7853ce3b53.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -183,8 +188,9 @@ true, true, true, + true, true ] }, - "hash": "a94062da88511445408caae77900e49be5a9824e1428765086afcb1da76799de" + "hash": "92c795370e83b3a3efb590e68c123160039fdf27223dabac51af1a7853ce3b53" } diff --git a/core/lib/dal/.sqlx/query-adbb1d0cc1ddf99bb987c5432143f3676ef0f8e75b37818a3c56270f67341224.json b/core/lib/dal/.sqlx/query-b3c0456b90f13fac4e599d5484cb7198f6336e1d6f293c858875bad30e335abf.json similarity index 86% rename from core/lib/dal/.sqlx/query-adbb1d0cc1ddf99bb987c5432143f3676ef0f8e75b37818a3c56270f67341224.json rename to core/lib/dal/.sqlx/query-b3c0456b90f13fac4e599d5484cb7198f6336e1d6f293c858875bad30e335abf.json index bff081f16bc..b7cac955415 100644 --- a/core/lib/dal/.sqlx/query-adbb1d0cc1ddf99bb987c5432143f3676ef0f8e75b37818a3c56270f67341224.json +++ b/core/lib/dal/.sqlx/query-b3c0456b90f13fac4e599d5484cb7198f6336e1d6f293c858875bad30e335abf.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -183,8 +188,9 @@ true, true, true, + true, true ] }, - "hash": "adbb1d0cc1ddf99bb987c5432143f3676ef0f8e75b37818a3c56270f67341224" + "hash": "b3c0456b90f13fac4e599d5484cb7198f6336e1d6f293c858875bad30e335abf" } diff --git a/core/lib/dal/.sqlx/query-ae0690b0e128a0f302cfbd584c25044b3bf9d54afe9af6b4a305a45773e61c71.json b/core/lib/dal/.sqlx/query-cd52d81a05c32340d0d27fe1a498b9ef7764efc182b97cae721f63edff42e270.json similarity index 86% rename from core/lib/dal/.sqlx/query-ae0690b0e128a0f302cfbd584c25044b3bf9d54afe9af6b4a305a45773e61c71.json rename to core/lib/dal/.sqlx/query-cd52d81a05c32340d0d27fe1a498b9ef7764efc182b97cae721f63edff42e270.json index 071e69c1c2e..b699502db51 100644 --- a/core/lib/dal/.sqlx/query-ae0690b0e128a0f302cfbd584c25044b3bf9d54afe9af6b4a305a45773e61c71.json +++ b/core/lib/dal/.sqlx/query-cd52d81a05c32340d0d27fe1a498b9ef7764efc182b97cae721f63edff42e270.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "state_diff_hash", "type_info": "Bytea" + }, + { + "ordinal": 29, + "name": "inclusion_data", + "type_info": "Bytea" } ], "parameters": { @@ -185,8 +190,9 @@ true, true, true, + true, true ] }, - "hash": "ae0690b0e128a0f302cfbd584c25044b3bf9d54afe9af6b4a305a45773e61c71" + "hash": "cd52d81a05c32340d0d27fe1a498b9ef7764efc182b97cae721f63edff42e270" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 9c13eeb3014..ccca49525e4 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -19,6 +19,7 @@ zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true zksync_types.workspace = true +zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true zksync_protobuf.workspace = true diff --git a/core/lib/dal/migrations/20240619090013_state-diff-hash-for-batch.down.sql b/core/lib/dal/migrations/20240619090013_state-diff-hash-for-batch.down.sql deleted file mode 100644 index e1040a2b6f6..00000000000 --- a/core/lib/dal/migrations/20240619090013_state-diff-hash-for-batch.down.sql +++ /dev/null @@ -1,2 +0,0 @@ - -ALTER TABLE l1_batches DROP COLUMN state_diff_hash BYTEA; diff --git a/core/lib/dal/migrations/20240619090013_state-diff-hash-for-batch.up.sql b/core/lib/dal/migrations/20240619090013_state-diff-hash-for-batch.up.sql deleted file mode 100644 index 99af4746e69..00000000000 --- a/core/lib/dal/migrations/20240619090013_state-diff-hash-for-batch.up.sql +++ /dev/null @@ -1,2 +0,0 @@ - -ALTER TABLE l1_batches ADD COLUMN state_diff_hash BYTEA; diff --git a/core/lib/dal/migrations/20240626101657_aggregated-root-in-batch.down.sql b/core/lib/dal/migrations/20240626101657_aggregated-root-in-batch.down.sql deleted file mode 100644 index ee3a710a7bb..00000000000 --- a/core/lib/dal/migrations/20240626101657_aggregated-root-in-batch.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE l1_batches DROP COLUMN aggregation_root; -ALTER TABLE l1_batches DROP COLUMN local_root; diff --git a/core/lib/dal/migrations/20240626101657_aggregated-root-in-batch.up.sql b/core/lib/dal/migrations/20240626101657_aggregated-root-in-batch.up.sql deleted file mode 100644 index ecb9094d886..00000000000 --- a/core/lib/dal/migrations/20240626101657_aggregated-root-in-batch.up.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE l1_batches ADD COLUMN aggregation_root BYTEA; -ALTER TABLE l1_batches ADD COLUMN local_root BYTEA; diff --git a/core/lib/dal/migrations/20240807102710_l2_da_validator_in_batch.down.sql b/core/lib/dal/migrations/20240807102710_l2_da_validator_in_batch.down.sql deleted file mode 100644 index 2b6bc08cab2..00000000000 --- a/core/lib/dal/migrations/20240807102710_l2_da_validator_in_batch.down.sql +++ /dev/null @@ -1,5 +0,0 @@ --- ALTER TABLE l1_batches --- ALTER COLUMN fee_account_address DROP DEFAULT, --- ALTER COLUMN is_finished DROP DEFAULT; -ALTER TABLE miniblocks - DROP COLUMN IF EXISTS fee_account_address; diff --git a/core/lib/dal/migrations/20240807102710_l2_da_validator_in_batch.up.sql b/core/lib/dal/migrations/20240807102710_l2_da_validator_in_batch.up.sql deleted file mode 100644 index cdfc86248e4..00000000000 --- a/core/lib/dal/migrations/20240807102710_l2_da_validator_in_batch.up.sql +++ /dev/null @@ -1,12 +0,0 @@ -ALTER TABLE miniblocks - ADD COLUMN l2_da_validator_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea, - -- There are miniblocks that used the `Rollup' type, but were actually used on a Validium chain. - -- This is okay, since this field represents how the VM works with the DA, rather what is committed on L1. - ADD COLUMN pubdata_type TEXT NOT NULL DEFAULT 'Rollup'; --- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. - --- Set default values for columns in `l1_batches` that will be removed, so that INSERTs can work --- w/o setting these columns. --- ALTER TABLE l1_batches --- ALTER COLUMN l2_da_validator_address SET DEFAULT '\x0000000000000000000000000000000000000000'::bytea, --- ALTER COLUMN is_finished SET DEFAULT true; diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql new file mode 100644 index 00000000000..9af34d7dc8e --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE l1_batches DROP COLUMN IF EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches DROP COLUMN IF EXISTS aggregation_root; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS local_root; + +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS l2_da_validator_address, + DROP COLUMN IF EXISTS pubdata_type; diff --git a/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql new file mode 100644 index 00000000000..a58464f6ebb --- /dev/null +++ b/core/lib/dal/migrations/20240925103531_gateway_upgrade.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS state_diff_hash BYTEA; + +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS aggregation_root BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS local_root BYTEA; + +ALTER TABLE miniblocks + ADD COLUMN IF NOT EXISTS l2_da_validator_address BYTEA NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea, + -- There are miniblocks that used the `Rollup' type, but were actually used on a Validium chain. + -- This is okay, since this field represents how the VM works with the DA, rather what is committed on L1. + ADD COLUMN IF NOT EXISTS pubdata_type TEXT NOT NULL DEFAULT 'Rollup'; +-- ^ Add a default value so that DB queries don't fail even if the DB migration is not completed. diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 113e186dbbe..e347947d296 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -336,10 +336,12 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE number = $1 "#, @@ -1064,10 +1066,12 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE number = 0 OR eth_commit_tx_id IS NOT NULL @@ -1247,10 +1251,12 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL @@ -1330,7 +1336,8 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM ( SELECT @@ -1351,6 +1358,7 @@ impl BlocksDal<'_, '_> { $2 ) inn LEFT JOIN commitments ON commitments.l1_batch_number = inn.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number WHERE number - ROW_NUMBER = $1 "#, @@ -1406,10 +1414,12 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL @@ -1558,10 +1568,12 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE number BETWEEN $1 AND $2 ORDER BY @@ -1625,11 +1637,13 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version + LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NULL AND number != 0 @@ -1706,7 +1720,8 @@ impl BlocksDal<'_, '_> { pubdata_input, aggregation_root, local_root, - state_diff_hash + state_diff_hash, + data_availability.inclusion_data FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index e5476e00c5c..763c13cc387 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -5,8 +5,11 @@ mod testonly; #[cfg(test)] mod tests; +use std::collections::BTreeMap; + use anyhow::{anyhow, Context as _}; -use zksync_consensus_roles::{attester, validator}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node, validator}; use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ abi, @@ -29,6 +32,23 @@ use crate::models::{parse_h160, parse_h256}; pub struct GlobalConfig { pub genesis: validator::Genesis, pub registry_address: Option, + pub seed_peers: BTreeMap, +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } } impl ProtoFmt for GlobalConfig { @@ -43,6 +63,13 @@ impl ProtoFmt for GlobalConfig { .map(|a| parse_h160(a)) .transpose() .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } @@ -50,6 +77,11 @@ impl ProtoFmt for GlobalConfig { Self::Proto { genesis: Some(self.genesis.build()), registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -444,9 +476,10 @@ impl ProtoRepr for proto::Transaction { } }, execute: Execute { - contract_address: required(&execute.contract_address) - .and_then(|x| parse_h160(x)) - .context("execute.contract_address")?, + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), calldata: required(&execute.calldata).context("calldata")?.clone(), value: required(&execute.value) .and_then(|x| parse_h256(x)) @@ -530,7 +563,7 @@ impl ProtoRepr for proto::Transaction { } }; let execute = proto::Execute { - contract_address: Some(this.execute.contract_address.as_bytes().into()), + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), calldata: Some(this.execute.calldata.clone()), value: Some(u256_to_h256(this.execute.value).as_bytes().into()), factory_deps: this.execute.factory_deps.clone(), diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index e573a7a2a4d..24a8297031d 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -4,6 +4,7 @@ package zksync.dal; import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; +import "zksync/roles/node.proto"; enum L1BatchCommitDataGeneratorMode { Rollup = 0; @@ -113,7 +114,7 @@ message ProtocolUpgradeTxCommonData { } message Execute { - optional bytes contract_address = 1; // required; H160 + optional bytes contract_address = 1; // optional; H160 optional bytes calldata = 2; // required optional bytes value = 3; // required; U256 repeated bytes factory_deps = 4; @@ -133,9 +134,15 @@ message AttesterCommittee { repeated roles.attester.WeightedAttester members = 1; // required } +message NodeAddr { + optional roles.node.PublicKey key = 1; // required + optional string addr = 2; // required; Host +} + message GlobalConfig { optional roles.validator.Genesis genesis = 1; // required optional bytes registry_address = 2; // optional; H160 + repeated NodeAddr seed_peers = 3; } message AttestationStatus { diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index d9ab9310a2b..c99f8a37f7c 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -19,7 +19,7 @@ use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { Execute { - contract_address: rng.gen(), + contract_address: Some(rng.gen()), value: rng.gen::().into(), calldata: (0..10 * 32).map(|_| rng.gen()).collect(), // TODO: find a way to generate valid random bytecode. diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 2dca58e2a6a..711ce3ddf39 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -66,6 +66,7 @@ impl ConsensusDal<'_, '_> { return Ok(Some(GlobalConfig { genesis, registry_address: None, + seed_peers: [].into(), })); } Ok(None) @@ -184,6 +185,7 @@ impl ConsensusDal<'_, '_> { } .with_hash(), registry_address: old.registry_address, + seed_peers: old.seed_peers, }; txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; @@ -681,6 +683,7 @@ mod tests { let cfg = GlobalConfig { genesis: genesis.with_hash(), registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host }; conn.consensus_dal() .try_update_global_config(&cfg) @@ -715,6 +718,7 @@ mod tests { let cfg = GlobalConfig { genesis: setup.genesis.clone(), registry_address: Some(rng.gen()), + seed_peers: [].into(), }; conn.consensus_dal() .try_update_global_config(&cfg) diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 2266d6fb60f..d4176210b1e 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -2,16 +2,20 @@ use std::{convert::TryFrom, str::FromStr}; use anyhow::Context as _; use sqlx::types::chrono::{DateTime, Utc}; -use zksync_db_connection::{connection::Connection, interpolate_query, match_query_as}; +use zksync_db_connection::{ + connection::Connection, error::DalResult, instrument::InstrumentExt, interpolate_query, + match_query_as, +}; use zksync_types::{ aggregated_operations::AggregatedActionType, - eth_sender::{EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, + eth_sender::{BatchSettlementInfo, EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, Address, L1BatchNumber, H256, U256, }; use crate::{ models::storage_eth_tx::{ L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, + StoredBatchSettlementInfo, }, Core, }; @@ -746,6 +750,43 @@ impl EthSenderDal<'_, '_> { Ok(()) } + + pub async fn get_batch_finalization_info( + &mut self, + batch_number: L1BatchNumber, + ) -> DalResult> { + let mut info = sqlx::query_as!( + StoredBatchSettlementInfo, + r#" + SELECT + number AS batch_number, + eth_txs.chain_id AS settlement_layer_id, + eth_txs_history.tx_hash AS settlement_layer_tx_hash + FROM + l1_batches + JOIN eth_txs ON l1_batches.eth_execute_tx_id = eth_txs.id + JOIN eth_txs_history ON ( + eth_txs.id = eth_txs_history.eth_tx_id + AND eth_txs_history.confirmed_at IS NOT NULL + ) + WHERE + l1_batches.number = $1 + "#, + i64::from(batch_number.0) + ) + .instrument("get_batch_finalization_info") + .with_arg("batch_number", &batch_number) + .fetch_all(self.storage) + .await?; + + assert!( + info.len() <= 1, + "Batch number must be unique in the database {:#?}", + info + ); + + Ok(info.pop().and_then(Into::into)) + } } /// These methods should only be used for tests. diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 3e96b816024..3d9264ddd9e 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -151,6 +151,7 @@ pub(crate) struct StorageL1Batch { pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, pub state_diff_hash: Option>, + pub inclusion_data: Option>, } impl StorageL1Batch { @@ -253,21 +254,10 @@ impl TryFrom for L1BatchMetadata { bootloader_initial_content_commitment: batch .bootloader_initial_content_commitment .map(|v| H256::from_slice(&v)), - state_diff_hash: H256::from_slice( - &batch - .state_diff_hash - .ok_or(L1BatchMetadataError::Incomplete("state_diff_hash"))?, - ), - local_root: H256::from_slice( - &batch - .local_root - .ok_or(L1BatchMetadataError::Incomplete("local_root"))?, - ), - aggregation_root: H256::from_slice( - &batch - .aggregation_root - .ok_or(L1BatchMetadataError::Incomplete("aggregation_root"))?, - ), + state_diff_hash: batch.state_diff_hash.map(|v| H256::from_slice(&v)), + local_root: batch.local_root.map(|v| H256::from_slice(&v)), + aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)), + da_inclusion_data: batch.inclusion_data, }) } } diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index a47f6acfff4..df76f8cea4e 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use sqlx::types::chrono::NaiveDateTime; use zksync_types::{ aggregated_operations::AggregatedActionType, - eth_sender::{EthTx, TxHistory, TxHistoryToSend}, + eth_sender::{BatchSettlementInfo, EthTx, TxHistory, TxHistoryToSend}, Address, L1BatchNumber, Nonce, SLChainId, H256, }; @@ -126,3 +126,24 @@ impl From for TxHistoryToSend { } } } + +#[derive(Debug)] +pub struct StoredBatchSettlementInfo { + pub batch_number: i64, + pub settlement_layer_id: Option, + pub settlement_layer_tx_hash: Option, +} + +impl From for Option { + fn from(info: StoredBatchSettlementInfo) -> Option { + let settlement_layer_id = info.settlement_layer_id?; + let settlement_layer_tx_hash = info.settlement_layer_tx_hash?; + + Some(BatchSettlementInfo { + batch_number: info.batch_number as u32, + settlement_layer_id: SLChainId(settlement_layer_id as u64), + settlement_layer_tx_hash: H256::from_str(&settlement_layer_tx_hash) + .expect("Incorrect hash"), + }) + } +} diff --git a/core/lib/dal/src/models/tests.rs b/core/lib/dal/src/models/tests.rs index 34cfde108f1..b4949dc101d 100644 --- a/core/lib/dal/src/models/tests.rs +++ b/core/lib/dal/src/models/tests.rs @@ -13,7 +13,7 @@ use crate::{models::storage_transaction::StorageTransaction, BigDecimal}; fn default_execute() -> Execute { Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: U256::from(10i32), calldata: hex::decode( "a9059cbb00000000000000000000000058d595f318167d5af45d9e44ade4348dd4e\ diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 0c56dd227df..f628638eeb6 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -76,7 +76,7 @@ pub(crate) fn mock_l2_transaction() -> L2Tx { gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), }; let mut l2_tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], zksync_types::Nonce(0), fee, @@ -112,7 +112,7 @@ pub(crate) fn mock_l1_execute() -> L1Tx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], @@ -140,7 +140,7 @@ pub(crate) fn mock_protocol_upgrade_transaction() -> ProtocolUpgradeTx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 72129bb500a..a1c2c946118 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -90,7 +90,8 @@ impl TransactionsDal<'_, '_> { tx: &L1Tx, l1_block_number: L1BlockNumber, ) -> DalResult { - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.hash(); let tx_hash_bytes = tx_hash.as_bytes(); let json_data = serde_json::to_value(&tx.execute) @@ -175,7 +176,7 @@ impl TransactionsDal<'_, '_> { serial_id, full_fee, layer_2_tip_fee, - contract_address, + contract_address_as_bytes, l1_block_number.0 as i32, value, empty_address.as_bytes(), @@ -220,7 +221,8 @@ impl TransactionsDal<'_, '_> { } pub async fn insert_system_transaction(&mut self, tx: &ProtocolUpgradeTx) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes().to_vec(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.common_data.hash().0.to_vec(); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.common_data.hash())); @@ -297,7 +299,7 @@ impl TransactionsDal<'_, '_> { gas_per_pubdata_limit, json_data, upgrade_id, - contract_address, + contract_address_as_bytes, l1_block_number, value, &Address::default().0.to_vec(), @@ -343,7 +345,8 @@ impl TransactionsDal<'_, '_> { } let initiator_address = tx.initiator_account(); - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.fee.gas_limit); @@ -472,7 +475,7 @@ impl TransactionsDal<'_, '_> { input_data, &json_data, tx_format, - contract_address, + contract_address_as_bytes, value, &paymaster, &paymaster_input, @@ -756,8 +759,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -877,7 +882,7 @@ impl TransactionsDal<'_, '_> { &l2_inputs as &[&[u8]], &l2_datas, &l2_tx_formats, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_values, &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], @@ -960,8 +965,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -1072,7 +1079,7 @@ impl TransactionsDal<'_, '_> { &l2_datas, &l2_refunded_gas, &l2_values, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], l2_block_number.0 as i32, @@ -1142,6 +1149,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; l1_hashes.push(tx_res.hash.as_bytes()); l1_initiator_address.push(common_data.sender.as_bytes()); @@ -1155,7 +1164,7 @@ impl TransactionsDal<'_, '_> { l1_priority_op_id.push(common_data.serial_id.0 as i64); l1_full_fee.push(u256_to_big_decimal(common_data.full_fee)); l1_layer_2_tip_fee.push(u256_to_big_decimal(common_data.layer_2_tip_fee)); - l1_contract_address.push(tx.execute.contract_address.as_bytes()); + l1_contract_address.push(contract_address_as_bytes); l1_l1_block_number.push(common_data.eth_block as i32); l1_value.push(u256_to_big_decimal(tx.execute.value)); l1_tx_format.push(common_data.tx_format() as i32); @@ -1262,7 +1271,7 @@ impl TransactionsDal<'_, '_> { &l1_priority_op_id, &l1_full_fee, &l1_layer_2_tip_fee, - &l1_contract_address as &[&[u8]], + &l1_contract_address as &[Option>], &l1_l1_block_number, &l1_value, &l1_tx_format, @@ -1432,6 +1441,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; upgrade_hashes.push(tx_res.hash.as_bytes()); upgrade_initiator_address.push(common_data.sender.as_bytes()); @@ -1444,7 +1455,7 @@ impl TransactionsDal<'_, '_> { .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())), ); upgrade_upgrade_id.push(common_data.upgrade_id as i32); - upgrade_contract_address.push(tx.execute.contract_address.as_bytes()); + upgrade_contract_address.push(contract_address_as_bytes); upgrade_l1_block_number.push(common_data.eth_block as i32); upgrade_value.push(u256_to_big_decimal(tx.execute.value)); upgrade_tx_format.push(common_data.tx_format() as i32); @@ -1543,7 +1554,7 @@ impl TransactionsDal<'_, '_> { &upgrade_gas_per_pubdata_limit, &upgrade_data, &upgrade_upgrade_id, - &upgrade_contract_address as &[&[u8]], + &upgrade_contract_address as &[Option>], &upgrade_l1_block_number, &upgrade_value, &upgrade_tx_format, diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index c6485d54d6b..f8bdf491819 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -95,6 +95,7 @@ mod tests { ], api_namespaces: Some(vec!["debug".to_string()]), extended_api_tracing: true, + settlement_layer_url: Some("http://127.0.0.1:9011".into()), }, prometheus: PrometheusConfig { listener_port: 3312, @@ -140,6 +141,7 @@ mod tests { API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10 API_WEB3_JSON_RPC_MEMPOOL_CACHE_SIZE=10000 API_WEB3_JSON_RPC_MEMPOOL_CACHE_UPDATE_INTERVAL=50 + API_WEB3_JSON_RPC_SETTLEMENT_LAYER_URL="http://127.0.0.1:9011" API_CONTRACT_VERIFICATION_PORT="3070" API_CONTRACT_VERIFICATION_URL="http://127.0.0.1:3070" API_WEB3_JSON_RPC_MAX_RESPONSE_BODY_SIZE_MB=10 diff --git a/core/lib/env_config/src/prover_job_monitor.rs b/core/lib/env_config/src/prover_job_monitor.rs index 3a8f80473eb..884ebecacbb 100644 --- a/core/lib/env_config/src/prover_job_monitor.rs +++ b/core/lib/env_config/src/prover_job_monitor.rs @@ -31,6 +31,7 @@ mod tests { prover_queue_reporter_run_interval_ms: 10000, witness_generator_queue_reporter_run_interval_ms: 10000, witness_job_queuer_run_interval_ms: 10000, + http_port: 3074, } } @@ -55,6 +56,7 @@ mod tests { fn from_env_with_default() { let config = r#" PROVER_JOB_MONITOR_PROMETHEUS_PORT=3317 + PROVER_JOB_MONITOR_HTTP_PORT=3074 PROVER_JOB_MONITOR_MAX_DB_CONNECTIONS=9 "#; let mut lock = MUTEX.lock(); @@ -80,6 +82,7 @@ mod tests { PROVER_JOB_MONITOR_PROVER_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_GENERATOR_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_JOB_QUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_HTTP_PORT=3074 "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 8b68df854e7..4a190f82efc 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -13,6 +13,7 @@ categories.workspace = true [dependencies] zksync_types.workspace = true zksync_prover_interface.workspace = true +zksync_system_constants.workspace = true # Used to serialize proof data crypto_codegen.workspace = true diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index f51f2244527..e39681d2e04 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -8,7 +8,7 @@ use zksync_types::{ ethabi::{ParamType, Token}, pubdata_da::PubdataDA, web3::{contract::Error as ContractError, keccak256}, - ProtocolVersionId, H256, U256, + ProtocolVersionId, H256, STATE_DIFF_HASH_KEY_PRE_GATEWAY, U256, }; use crate::{ @@ -19,7 +19,6 @@ use crate::{ /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; -const PUBDATA_SOURCE_CUSTOM: u8 = 2; /// Encoding for `CommitBatchInfo` from `IExecutor.sol` for a contract running in rollup mode. #[derive(Debug)] @@ -217,45 +216,54 @@ impl Tokenizable for CommitBatchInfo<'_> { L1BatchCommitmentMode::Validium => vec![], })); } else { + let state_diff_hash = if protocol_version.is_pre_gateway() { + self.l1_batch_with_metadata + .header + .system_logs + .iter() + .find_map(|log| { + (log.0.key == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY)) + .then_some(log.0.value) + }) + .expect("Failed to get state_diff_hash from system logs") + } else { + self.l1_batch_with_metadata + .metadata + .state_diff_hash + .expect("Failed to get state_diff_hash from metadata") + }; tokens.push(Token::Bytes(match (self.mode, self.pubdata_da) { + // Validiums with custom DA need the inclusion data to be part of operator_da_input + (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + let mut operator_da_input: Vec = state_diff_hash.0.into(); + + operator_da_input.extend( + &self + .l1_batch_with_metadata + .metadata + .da_inclusion_data + .clone() + .unwrap_or_default(), + ); + + operator_da_input + } // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, - ) => self - .l1_batch_with_metadata - .metadata - .state_diff_hash - .0 - .into(), - (L1BatchCommitmentMode::Validium, PubdataDA::Blobs) => self - .l1_batch_with_metadata - .metadata - .state_diff_hash - .0 - .into(), - + PubdataDA::Calldata | PubdataDA::RelayedL2Calldata | PubdataDA::Blobs, + ) => state_diff_hash.0.into(), (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { - vec![PUBDATA_SOURCE_CUSTOM] - } - ( L1BatchCommitmentMode::Rollup, PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, ) => { let pubdata = self.pubdata_input(); - let header = compose_header_for_l1_commit_rollup( - self.l1_batch_with_metadata - .metadata - .state_diff_hash - .0 - .into(), - pubdata.clone(), - ); + let header = + compose_header_for_l1_commit_rollup(state_diff_hash, pubdata.clone()); // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. @@ -270,14 +278,8 @@ impl Tokenizable for CommitBatchInfo<'_> { (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { let pubdata = self.pubdata_input(); - let header = compose_header_for_l1_commit_rollup( - self.l1_batch_with_metadata - .metadata - .state_diff_hash - .0 - .into(), - pubdata.clone(), - ); + let header = + compose_header_for_l1_commit_rollup(state_diff_hash, pubdata.clone()); let pubdata_commitments: Vec = pubdata .chunks(ZK_SYNC_BYTES_PER_BLOB) diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 6ea1be3b514..96ef600984f 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -371,7 +371,7 @@ fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) -> Transaction { let mut txn = L2Tx::new( - Address::default(), + Some(Address::default()), Vec::new(), nonce, Fee::default(), @@ -386,7 +386,7 @@ fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { let execute = Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index f48522a76f0..67a00d064ad 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -35,7 +35,6 @@ anyhow.workspace = true hex.workspace = true itertools.workspace = true once_cell.workspace = true -pretty_assertions.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 77851a1df00..be740d6b378 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -22,5 +22,5 @@ pub use crate::{ mod glue; pub mod tracers; pub mod utils; -pub mod versions; +mod versions; mod vm_instance; diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index 81358a482f1..bcb246cece4 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,5 +1,8 @@ -pub mod shadow; mod shared; +#[cfg(test)] +mod testonly; +#[cfg(test)] +mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs deleted file mode 100644 index 871258f43b8..00000000000 --- a/core/lib/multivm/src/versions/shadow.rs +++ /dev/null @@ -1,305 +0,0 @@ -use std::{ - collections::{BTreeMap, HashSet}, - fmt, -}; - -use anyhow::Context as _; -use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; - -use crate::{ - interface::{ - storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, - }, - vm_fast, -}; - -#[derive(Debug)] -pub struct ShadowVm { - main: T, - shadow: vm_fast::Vm>, -} - -impl VmFactory> for ShadowVm -where - S: ReadStorage, - T: VmFactory>, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - Self { - main: T::new(batch_env.clone(), system_env.clone(), storage.clone()), - shadow: vm_fast::Vm::new(batch_env, system_env, ImmutableStorageView::new(storage)), - } - } -} - -impl VmInterface for ShadowVm -where - S: ReadStorage, - T: VmInterface, -{ - type TracerDispatcher = T::TracerDispatcher; - - fn push_transaction(&mut self, tx: Transaction) { - self.shadow.push_transaction(tx.clone()); - self.main.push_transaction(tx); - } - - fn inspect( - &mut self, - dispatcher: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - let shadow_result = self.shadow.inspect((), execution_mode); - let main_result = self.main.inspect(dispatcher, execution_mode); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result, &shadow_result); - errors - .into_result() - .with_context(|| format!("executing VM with mode {execution_mode:?}")) - .unwrap(); - main_result - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.shadow.start_new_l2_block(l2_block_env); - self.main.start_new_l2_block(l2_block_env); - } - - fn inspect_transaction_with_bytecode_compression( - &mut self, - tracer: Self::TracerDispatcher, - tx: Transaction, - with_compression: bool, - ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); - let main_result = self.main.inspect_transaction_with_bytecode_compression( - tracer, - tx.clone(), - with_compression, - ); - let shadow_result = - self.shadow - .inspect_transaction_with_bytecode_compression((), tx, with_compression); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result.1, &shadow_result.1); - errors - .into_result() - .with_context(|| { - format!("inspecting transaction {tx_hash:?}, with_compression={with_compression:?}") - }) - .unwrap(); - main_result - } - - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); - let shadow_batch = self.shadow.finish_batch(); - - let mut errors = DivergenceErrors::default(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - errors.into_result().unwrap(); - main_batch - } -} - -#[must_use = "Should be converted to a `Result`"] -#[derive(Debug, Default)] -pub struct DivergenceErrors(Vec); - -impl DivergenceErrors { - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - } - - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { - if main != shadow { - let comparison = pretty_assertions::Comparison::new(main, shadow); - let err = anyhow::anyhow!("`{context}` mismatch: {comparison}"); - self.0.push(err); - } - } - - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { - logs.iter() - .filter(|log| log.is_write()) - .map(|log| (log.key, log)) - .collect() - } - - fn into_result(self) -> anyhow::Result<()> { - if self.0.is_empty() { - Ok(()) - } else { - Err(anyhow::anyhow!( - "divergence between old VM and new VM execution: [{:?}]", - self.0 - )) - } - } -} - -// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them -// inside the VM, hence this auxiliary struct. -#[derive(PartialEq)] -struct UniqueStorageLogs(BTreeMap); - -impl fmt::Debug for UniqueStorageLogs { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut map = formatter.debug_map(); - for log in self.0.values() { - map.entry( - &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), - &format!("{:?} -> {:?}", log.previous_value, log.log.value), - ); - } - map.finish() - } -} - -impl UniqueStorageLogs { - fn new(logs: &[StorageLogWithPreviousValue]) -> Self { - let mut unique_logs = BTreeMap::::new(); - for log in logs { - if !log.log.is_write() { - continue; - } - if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { - existing_log.log.value = log.log.value; - } else { - unique_logs.insert(log.log.key, *log); - } - } - - // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. - unique_logs.retain(|_, log| log.previous_value != log.log.value); - Self(unique_logs) - } -} - -impl VmInterfaceHistoryEnabled for ShadowVm -where - S: ReadStorage, - T: VmInterfaceHistoryEnabled, -{ - fn make_snapshot(&mut self) { - self.shadow.make_snapshot(); - self.main.make_snapshot(); - } - - fn rollback_to_the_latest_snapshot(&mut self) { - self.shadow.rollback_to_the_latest_snapshot(); - self.main.rollback_to_the_latest_snapshot(); - } - - fn pop_snapshot_no_rollback(&mut self) { - self.shadow.pop_snapshot_no_rollback(); - self.main.pop_snapshot_no_rollback(); - } -} diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs new file mode 100644 index 00000000000..adfdbd0b327 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly.rs @@ -0,0 +1,96 @@ +use zksync_contracts::BaseSystemContracts; +use zksync_test_account::Account; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, + helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + pubdata_params: Default::default(), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + let timestamp = unix_timestamp_ms(); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { + let key = storage_key_for_eth_balance(&account.address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + } + } + + // FIXME: restore this method if needed in the main branch + // pub fn account(bytecode: Vec, address: Address) -> Self { + // Self { + // bytecode, + // address, + // is_account: true, + // } + // } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); + } + + // FIXME: restore this method if needed in the main branch + // /// Inserts the contracts into the test environment, bypassing the deployer system contract. + // pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + // for contract in contracts { + // contract.insert(storage); + // } + // } +} diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/tests.rs new file mode 100644 index 00000000000..2af32322519 --- /dev/null +++ b/core/lib/multivm/src/versions/tests.rs @@ -0,0 +1,276 @@ +//! Shadow VM tests. Since there are no real VM implementations in the `vm_interface` crate where `ShadowVm` is defined, +//! these tests are placed here. + +use assert_matches::assert_matches; +use ethabi::Contract; +use zksync_contracts::{ + get_loadnext_contract, load_contract, read_bytecode, + test_contracts::LoadnextContractExecutionParams, +}; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + block::L2BlockHasher, fee::Fee, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, StorageKey, H256, U256, +}; +use zksync_utils::bytecode::hash_bytecode; + +use crate::{ + interface::{ + storage::{InMemoryStorage, StorageView}, + utils::ShadowVm, + ExecutionResult, L1BatchEnv, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, + }, + utils::get_max_gas_per_pubdata_byte, + versions::testonly::{ + default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + }, + vm_fast, + vm_latest::{self, HistoryEnabled}, +}; + +type ReferenceVm = vm_latest::Vm, HistoryEnabled>; + +fn hash_block(block_env: L2BlockEnv, tx_hashes: &[H256]) -> H256 { + let mut hasher = L2BlockHasher::new( + L2BlockNumber(block_env.number), + block_env.timestamp, + block_env.prev_block_hash, + ); + for &tx_hash in tx_hashes { + hasher.push_tx_hash(tx_hash); + } + hasher.finalize(ProtocolVersionId::latest()) +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} + +#[derive(Debug)] +struct Harness { + alice: Account, + bob: Account, + storage_contract: ContractToDeploy, + storage_contract_abi: Contract, + current_block: L2BlockEnv, +} + +impl Harness { + const STORAGE_CONTRACT_PATH: &'static str = + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json"; + const STORAGE_CONTRACT_ADDRESS: Address = Address::repeat_byte(23); + + fn new(l1_batch_env: &L1BatchEnv) -> Self { + Self { + alice: Account::random(), + bob: Account::random(), + storage_contract: ContractToDeploy::new( + read_bytecode(Self::STORAGE_CONTRACT_PATH), + Self::STORAGE_CONTRACT_ADDRESS, + ), + storage_contract_abi: load_contract(Self::STORAGE_CONTRACT_PATH), + current_block: l1_batch_env.first_l2_block, + } + } + + fn setup_storage(&self, storage: &mut InMemoryStorage) { + make_account_rich(storage, &self.alice); + make_account_rich(storage, &self.bob); + + self.storage_contract.insert(storage); + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + storage.set_value_hashed_enum( + storage_contract_key.hashed_key(), + 999, + H256::from_low_u64_be(42), + ); + } + + // fn assert_dump(dump: &mut VmDump) { + // assert_eq!(dump.l1_batch_number(), L1BatchNumber(1)); + // let tx_counts_per_block: Vec<_> = + // dump.l2_blocks.iter().map(|block| block.txs.len()).collect(); + // assert_eq!(tx_counts_per_block, [1, 2, 2, 0]); + + // let storage_contract_key = StorageKey::new( + // AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + // H256::zero(), + // ); + // let value = dump.storage.read_value(&storage_contract_key); + // assert_eq!(value, H256::from_low_u64_be(42)); + // let enum_index = dump.storage.get_enumeration_index(&storage_contract_key); + // assert_eq!(enum_index, Some(999)); + // } + + fn new_block(&mut self, vm: &mut impl VmInterface, tx_hashes: &[H256]) { + self.current_block = L2BlockEnv { + number: self.current_block.number + 1, + timestamp: self.current_block.timestamp + 1, + prev_block_hash: hash_block(self.current_block, tx_hashes), + max_virtual_blocks_to_create: self.current_block.max_virtual_blocks_to_create, + }; + vm.start_new_l2_block(self.current_block); + } + + fn execute_on_vm(&mut self, vm: &mut impl VmInterface) { + let transfer_exec = Execute { + contract_address: Some(self.bob.address()), + calldata: vec![], + value: 1_000_000_000.into(), + factory_deps: vec![], + }; + let transfer_to_bob = self + .alice + .get_l2_tx_for_execute(transfer_exec.clone(), None); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(transfer_to_bob.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + self.new_block(vm, &[transfer_to_bob.hash()]); + + let out_of_gas_transfer = self.bob.get_l2_tx_for_execute( + transfer_exec.clone(), + Some(tx_fee(200_000)), // high enough to pass validation + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(out_of_gas_transfer.clone(), true); + compression_result.unwrap(); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + + let write_fn = self.storage_contract_abi.function("simpleWrite").unwrap(); + let simple_write_tx = self.alice.get_l2_tx_for_execute( + Execute { + contract_address: Some(Self::STORAGE_CONTRACT_ADDRESS), + calldata: write_fn.encode_input(&[]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(simple_write_tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + let storage_logs = &exec_result.logs.storage_logs; + assert!(storage_logs.iter().any(|log| { + log.log.key == storage_contract_key && log.previous_value == H256::from_low_u64_be(42) + })); + + self.new_block(vm, &[out_of_gas_transfer.hash(), simple_write_tx.hash()]); + + let deploy_tx = self.alice.get_deploy_tx( + &get_loadnext_contract().bytecode, + Some(&[ethabi::Token::Uint(100.into())]), + TxType::L2, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(deploy_tx.tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + let load_test_tx = self.bob.get_loadnext_transaction( + deploy_tx.address, + LoadnextContractExecutionParams::default(), + TxType::L2, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(load_test_tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); + vm.finish_batch(); + } +} + +fn sanity_check_vm() -> (Vm, Harness) +where + Vm: VmFactory>, +{ + let system_env = default_system_env(); + let l1_batch_env = default_l1_batch(L1BatchNumber(1)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut harness = Harness::new(&l1_batch_env); + harness.setup_storage(&mut storage); + + let storage = StorageView::new(storage).to_rc_ptr(); + let mut vm = Vm::new(l1_batch_env, system_env, storage); + harness.execute_on_vm(&mut vm); + (vm, harness) +} + +#[test] +fn sanity_check_harness() { + sanity_check_vm::(); +} + +#[test] +fn sanity_check_harness_on_new_vm() { + sanity_check_vm::>(); +} + +#[test] +fn sanity_check_shadow_vm() { + let system_env = default_system_env(); + let l1_batch_env = default_l1_batch(L1BatchNumber(1)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut harness = Harness::new(&l1_batch_env); + harness.setup_storage(&mut storage); + + // We need separate storage views since they are mutated by the VM execution + let main_storage = StorageView::new(&storage).to_rc_ptr(); + let shadow_storage = StorageView::new(&storage).to_rc_ptr(); + let mut vm = ShadowVm::<_, ReferenceVm<_>, ReferenceVm<_>>::with_custom_shadow( + l1_batch_env, + system_env, + main_storage, + shadow_storage, + ); + harness.execute_on_vm(&mut vm); +} + +// FIXME: gateway is not supported in fast vm +// #[test] +// fn shadow_vm_basics() { +// let (vm, harness) = sanity_check_vm::(); +// let mut dump = vm.dump_state(); +// Harness::assert_dump(&mut dump); + +// // Test standard playback functionality. +// let replayed_dump = dump.clone().play_back::>().dump_state(); +// pretty_assertions::assert_eq!(replayed_dump, dump); + +// // Check that the VM executes identically when reading from the original storage and one restored from the dump. +// let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); +// harness.setup_storage(&mut storage); +// let storage = StorageView::new(storage).to_rc_ptr(); + +// let vm = dump +// .clone() +// .play_back_custom(|l1_batch_env, system_env, dump_storage| { +// ShadowVm::<_, ReferenceVm, ReferenceVm<_>>::with_custom_shadow( +// l1_batch_env, +// system_env, +// storage, +// dump_storage, +// ) +// }); +// let new_dump = vm.dump_state(); +// pretty_assertions::assert_eq!(new_dump, dump); +// } diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index a29e1101d52..34c70e0f9c4 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -153,7 +153,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index 788a52206e8..0285320daa3 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_3_2::vm_with_bootloader::{ pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -170,7 +170,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -593,7 +593,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs index 0ec921450da..9dfda9e1a68 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs index e98fc23c6eb..83e0f1715b8 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs @@ -166,7 +166,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS)DRESS)DRESS), value: 0.into(), factory_deps: None, calldata, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs index 1379b853a54..f7384da76d0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_1::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs index 6a57fd07ae7..b84e9d32126 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs @@ -20,10 +20,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs index 021f5554873..40915cf931c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs @@ -165,7 +165,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: None, calldata, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs index 3498e51ec30..38280aa8051 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_2::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs index 30a65097111..637fd94c1c8 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs index ad740a279dc..8bf575effe0 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_boojum_integration::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index de6ead71e65..e6b1a53e9d0 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -17,7 +17,7 @@ pub(crate) struct CircuitsTracer { keccak256_cycles: u32, ecrecover_cycles: u32, sha256_cycles: u32, - secp256k1_verify_cycles: u32, + secp256r1_verify_cycles: u32, transient_storage_checker_cycles: u32, } @@ -115,7 +115,7 @@ impl Tracer for CircuitsTracer { CycleStats::Keccak256(cycles) => self.keccak256_cycles += cycles, CycleStats::Sha256(cycles) => self.sha256_cycles += cycles, CycleStats::EcRecover(cycles) => self.ecrecover_cycles += cycles, - CycleStats::Secp256k1Verify(cycles) => self.secp256k1_verify_cycles += cycles, + CycleStats::Secp256r1Verify(cycles) => self.secp256r1_verify_cycles += cycles, CycleStats::Decommit(cycles) => self.code_decommitter_cycles += cycles, CycleStats::StorageRead => self.storage_application_cycles += 1, CycleStats::StorageWrite => self.storage_application_cycles += 2, @@ -146,7 +146,7 @@ impl CircuitsTracer { ecrecover: self.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: self.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, - secp256k1_verify: self.secp256k1_verify_cycles as f32 + secp256k1_verify: self.secp256r1_verify_cycles as f32 / GEOMETRY_CONFIG.cycles_per_secp256r1_verify_circuit as f32, transient_storage_checker: self.transient_storage_checker_cycles as f32 / GEOMETRY_CONFIG.cycles_per_transient_storage_sorter as f32, diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 2312c3d97b4..294e8adce32 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,6 +1,6 @@ use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; -use zksync_vm2::Event; +use zksync_vm2::interface::Event; use crate::interface::VmEvent; @@ -23,18 +23,21 @@ impl EventAccumulator { } } -pub(crate) fn merge_events(events: &[Event], block_number: L1BatchNumber) -> Vec { +pub(crate) fn merge_events( + events: impl Iterator, + block_number: L1BatchNumber, +) -> Vec { let mut result = vec![]; let mut current: Option<(usize, u32, EventAccumulator)> = None; - for message in events.iter() { + for event in events { let Event { shard_id, is_first, tx_number, key, value, - } = message.clone(); + } = event; if !is_first { if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index f24c82af11e..c2d38f351c0 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -1,18 +1,19 @@ use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; use zksync_utils::u256_to_h256; +use zksync_vm2::interface; use crate::glue::GlueFrom; -impl GlueFrom<&zksync_vm2::L2ToL1Log> for SystemL2ToL1Log { - fn glue_from(value: &zksync_vm2::L2ToL1Log) -> Self { - let zksync_vm2::L2ToL1Log { +impl GlueFrom for SystemL2ToL1Log { + fn glue_from(value: interface::L2ToL1Log) -> Self { + let interface::L2ToL1Log { key, value, is_service, address, shard_id, tx_number, - } = *value; + } = value; Self(L2ToL1Log { shard_id, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 15af9d868ad..dd407c61668 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -13,11 +13,12 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use super::{ - tester::{default_l1_batch, get_empty_storage, VmTesterBuilder}, + tester::{get_empty_storage, VmTesterBuilder}, utils::{get_complex_upgrade_abi, read_complex_upgrade}, }; use crate::{ interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::default_l1_batch, vm_latest::constants::{ BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, @@ -147,7 +148,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 5c1158a5909..48e1b10de44 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use zksync_types::U256; -use zksync_vm2::HeapId; +use zksync_vm2::interface::HeapId; use crate::{ interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index 0270ac35475..a119a31618e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -21,7 +21,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index caea07617dd..156af43dcf2 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -6,6 +6,7 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::{ circuits_tracer::CircuitsTracer, tests::{ @@ -41,10 +42,9 @@ fn test_code_oracle() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode, precompiles_contract_address, - false, )]) .with_storage(storage) .build(); @@ -58,7 +58,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -82,7 +82,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -134,10 +134,9 @@ fn test_code_oracle_big_bytecode() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode, precompiles_contract_address, - false, )]) .with_storage(storage) .build(); @@ -152,7 +151,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -198,10 +197,9 @@ fn refunds_in_code_oracle() { let mut vm = VmTesterBuilder::new() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode.clone(), precompiles_contract_address, - false, )]) .with_storage(storage.clone()) .build(); @@ -220,7 +218,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index b7a2154bdc7..3f0a47b980e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -18,7 +18,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 3fcef71add0..b8942dcbb6a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -14,6 +14,7 @@ use crate::{ storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, }, + versions::testonly::ContractToDeploy, vm_fast::{ tests::{ tester::{TxType, VmTester, VmTesterBuilder}, @@ -65,7 +66,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -123,7 +124,10 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionRe let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) .build(); @@ -150,7 +154,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionRe let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), @@ -197,7 +201,7 @@ fn get_used_contracts_with_out_of_gas_far_call() { let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: data.proxy_counter_address, + contract_address: Some(data.proxy_counter_address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index b87a38bf733..5897ec5f266 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -175,7 +175,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index a374f63608b..fde94d9da6c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -18,10 +18,8 @@ use crate::{ storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, - vm_fast::{ - tests::tester::{default_l1_batch, VmTesterBuilder}, - vm::Vm, - }, + versions::testonly::default_l1_batch, + vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, vm_latest::{ constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, utils::l2_blocks::get_l2_block_hash_key, @@ -37,7 +35,7 @@ fn get_l1_noop() -> Transaction { ..Default::default() }), execute: Execute { - contract_address: H160::zero(), + contract_address: Some(H160::zero()), calldata: vec![], value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 43cceb8b0a0..6d1e0f016e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -5,6 +5,7 @@ use crate::{ ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, VmRevertReason, }, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{Account, VmTesterBuilder}, utils::read_nonce_holder_tester, @@ -42,10 +43,9 @@ fn test_nonce_holder() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::account( read_nonce_holder_tester().to_vec(), account.address, - true, )]) .with_rich_accounts(vec![account.clone()]) .build(); @@ -60,7 +60,7 @@ fn test_nonce_holder() { vm.reset_state(true); let mut transaction = account.get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index f77eeb4f126..5bc3f614d61 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -4,6 +4,7 @@ use zksync_types::{Address, Execute}; use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + versions::testonly::ContractToDeploy, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -18,7 +19,7 @@ fn test_keccak() { .with_deployer() .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) .build(); // calldata for `doKeccak(1000)`. @@ -28,7 +29,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -55,7 +56,7 @@ fn test_sha256() { .with_deployer() .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) .build(); // calldata for `doSha256(1000)`. @@ -65,7 +66,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -95,7 +96,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![], value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 5ad6e3fa4f3..1856995149a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -3,6 +3,7 @@ use zksync_types::{Address, Execute, U256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::{read_expensive_contract, read_test_contract}, @@ -172,16 +173,15 @@ fn negative_pubdata_for_transaction() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( expensive_contract_bytecode, expensive_contract_address, - false, )]) .build(); let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -200,7 +200,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index 68e49b202a9..1fd2ebd523b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -12,6 +12,7 @@ use crate::{ interface::{ storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{Account, VmTester, VmTesterBuilder}, utils::read_many_owners_custom_account_contract, @@ -50,7 +51,10 @@ async fn test_require_eip712() { let (bytecode, contract) = read_many_owners_custom_account_contract(); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account( + bytecode, + account_abstraction.address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) .build(); @@ -68,7 +72,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -125,7 +129,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index f912203737f..548bf8daadf 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -101,7 +101,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -119,7 +119,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs index a61a0a2bd91..55ca372c4a9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 7fe15ca7bcd..2cfadb640e7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -6,6 +6,7 @@ use crate::{ interface::{ TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + versions::testonly::ContractToDeploy, vm_fast::tests::tester::VmTesterBuilder, }; @@ -23,14 +24,14 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() .with_random_rich_accounts(1) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) .build(); let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: first_tx_calldata, value: 0.into(), factory_deps: vec![], @@ -40,7 +41,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: second_tx_calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs index 781069ddf49..212e569d510 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs @@ -1,5 +1,5 @@ pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, get_empty_storage, VmTester, VmTesterBuilder}; +pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; mod transaction_test_info; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 38eaa31bac0..7e76c86b480 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,4 +1,7 @@ -use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160, U256}; +use std::fmt; + +use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_vm2::interface::{Event, StateInterface}; use super::VmTester; use crate::{ @@ -179,7 +182,7 @@ impl TransactionTestInfo { struct VmStateDump { state: S, storage_writes: Vec<((H160, U256), U256)>, - events: Box<[zksync_vm2::Event]>, + events: Box<[Event]>, } impl PartialEq for VmStateDump { @@ -194,14 +197,8 @@ impl Vm { fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.dump_state(), - storage_writes: self - .inner - .world_diff() - .get_storage_state() - .iter() - .map(|(k, v)| (*k, *v)) - .collect(), - events: self.inner.world_diff().events().into(), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), } } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs index 259db0a11b5..dd82b73839b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -3,13 +3,8 @@ use std::{cell::RefCell, rc::Rc}; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; use zksync_types::{ - block::L2BlockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, U256, + block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, + L2BlockNumber, Nonce, StorageKey, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use zksync_vm2::WorldDiff; @@ -20,8 +15,11 @@ use crate::{ L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, - versions::vm_fast::{tests::utils::read_test_contract, vm::Vm}, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, utils::l2_blocks::load_last_l2_block}, + versions::{ + testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, + vm_fast::{tests::utils::read_test_contract, vm::Vm}, + }, + vm_latest::utils::l2_blocks::load_last_l2_block, }; pub(crate) struct VmTester { @@ -31,7 +29,7 @@ pub(crate) struct VmTester { pub(crate) test_contract: Option
, pub(crate) fee_account: Address, pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, + pub(crate) custom_contracts: Vec, } impl VmTester { @@ -63,10 +61,10 @@ impl VmTester { pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { for account in self.rich_accounts.iter_mut() { account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); + make_account_rich(&mut self.storage.borrow_mut(), account); } if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); + make_account_rich(&mut self.storage.borrow_mut(), deployer); } if !self.custom_contracts.is_empty() { @@ -99,7 +97,7 @@ impl VmTester { }; } - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), storage); + let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); if self.test_contract.is_some() { self.deploy_test_contract(); @@ -108,15 +106,13 @@ impl VmTester { } } -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - pub(crate) struct VmTesterBuilder { storage: Option, l1_batch_env: Option, system_env: SystemEnv, deployer: Option, rich_accounts: Vec, - custom_contracts: Vec, + custom_contracts: Vec, } impl Clone for VmTesterBuilder { @@ -132,22 +128,12 @@ impl Clone for VmTesterBuilder { } } -#[allow(dead_code)] impl VmTesterBuilder { pub(crate) fn new() -> Self { Self { storage: None, l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), - }, + system_env: default_system_env(), deployer: None, rich_accounts: vec![], custom_contracts: vec![], @@ -159,11 +145,6 @@ impl VmTesterBuilder { self } - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { self.storage = Some(storage); self @@ -211,7 +192,7 @@ impl VmTesterBuilder { self } - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { self.custom_contracts = contracts; self } @@ -222,17 +203,17 @@ impl VmTesterBuilder { .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); let storage_ptr = Rc::new(RefCell::new(raw_storage)); for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); + make_account_rich(&mut storage_ptr.borrow_mut(), account); } if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); + make_account_rich(&mut storage_ptr.borrow_mut(), deployer); } let fee_account = l1_batch_env.fee_account; - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); + let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); VmTester { vm, @@ -246,53 +227,6 @@ impl VmTesterBuilder { } } -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - pub(crate) fn get_empty_storage() -> InMemoryStorage { InMemoryStorage::with_system_contracts(hash_bytecode) } - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 75144839006..89f0fa23620 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -2,6 +2,7 @@ use zksync_types::{Execute, H160}; use crate::{ interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, @@ -14,7 +15,10 @@ fn test_tracing_of_execution_errors() { let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + read_error_contract(), + contract_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() .with_random_rich_accounts(1) @@ -24,7 +28,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 57877854031..ef510546f11 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -6,6 +6,7 @@ use zksync_utils::u256_to_h256; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{get_empty_storage, VmTesterBuilder}, utils::get_balance, @@ -21,7 +22,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let test_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", ); - let recipeint_bytecode = read_bytecode( + let recipient_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", ); let test_abi = load_contract( @@ -62,15 +63,15 @@ fn test_send_or_transfer(test_option: TestOptions) { .with_deployer() .with_random_rich_accounts(1) .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(recipient_bytecode, recipient_address), ]) .build(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -110,7 +111,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let test_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", ); - let reentrant_recipeint_bytecode = read_bytecode( + let reentrant_recipient_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", ); let test_abi = load_contract( @@ -121,7 +122,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { ); let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); + let reentrant_recipient_address = Address::random(); let (value, calldata) = match test_option { TestOptions::Send(value) => ( @@ -130,7 +131,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .function("send") .unwrap() .encode_input(&[ - Token::Address(reentrant_recipeint_address), + Token::Address(reentrant_recipient_address), Token::Uint(value), ]) .unwrap(), @@ -141,7 +142,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .function("transfer") .unwrap() .encode_input(&[ - Token::Address(reentrant_recipeint_address), + Token::Address(reentrant_recipient_address), Token::Uint(value), ]) .unwrap(), @@ -154,12 +155,8 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .with_deployer() .with_random_rich_accounts(1) .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), ]) .build(); @@ -167,7 +164,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipient_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -188,7 +185,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index dd25c209740..ba4863f7c45 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -265,7 +265,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -315,7 +315,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index d91e1307651..5ab5aa0dec9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -10,7 +10,7 @@ use zksync_types::{ U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::{HeapId, StateInterface}; +use zksync_vm2::interface::{HeapId, StateInterface}; use crate::interface::storage::ReadStorage; diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs index 502be0dc22c..2ec86eb3cea 100644 --- a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 1c1da44c9e6..778662f6558 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -11,13 +11,13 @@ use zksync_types::{ BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ - decode::decode_program, CallframeInterface, ExecutionEnd, FatPointer, HeapId, Program, - Settings, StateInterface, Tracer, VirtualMachine, + interface::{CallframeInterface, HeapId, StateInterface, Tracer}, + ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, }; use super::{ @@ -31,11 +31,12 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BytecodeCompressionError, BytecodeCompressionResult, - CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, - Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, - VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmRevertReason, + storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, + TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, + VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmRevertReason, VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ @@ -68,6 +69,65 @@ pub struct Vm { } impl Vm { + pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + let default_aa_code_hash = system_env + .base_system_smart_contracts + .default_aa + .hash + .into(); + + let program_cache = HashMap::from([World::convert_system_contract_code( + &system_env.base_system_smart_contracts.default_aa, + false, + )]); + + let (_, bootloader) = World::convert_system_contract_code( + &system_env.base_system_smart_contracts.bootloader, + true, + ); + let bootloader_memory = bootloader_initial_memory(&batch_env); + + let mut inner = VirtualMachine::new( + BOOTLOADER_ADDRESS, + bootloader, + H160::zero(), + &[], + system_env.bootloader_gas_limit, + Settings { + default_aa_code_hash, + // this will change after 1.5 + evm_interpreter_code_hash: default_aa_code_hash, + hook_address: get_vm_hook_position(VM_VERSION) * 32, + }, + ); + + inner.current_frame().set_stack_pointer(0); + // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. + inner.current_frame().set_heap_bound(u32::MAX); + inner.current_frame().set_aux_heap_bound(u32::MAX); + inner + .current_frame() + .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); + + let mut this = Self { + world: World::new(storage, program_cache), + inner, + gas_for_account_validation: system_env.default_validation_computational_gas_limit, + bootloader_state: BootloaderState::new( + system_env.execution_mode, + bootloader_memory.clone(), + batch_env.first_l2_block, + ), + system_env, + batch_env, + snapshot: None, + #[cfg(test)] + enforced_state_diffs: None, + }; + this.write_to_bootloader_heap(bootloader_memory); + this + } + fn run( &mut self, execution_mode: VmExecutionMode, @@ -79,7 +139,7 @@ impl Vm { operator_suggested_refund: 0, }; let mut last_tx_result = None; - let mut pubdata_before = self.inner.world_diff().pubdata() as u32; + let mut pubdata_before = self.inner.pubdata() as u32; let result = loop { let hook = match self.inner.run(&mut self.world, tracer) { @@ -125,7 +185,7 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.world_diff().pubdata() as u32; + let pubdata_published = self.inner.pubdata() as u32; refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -186,8 +246,7 @@ impl Vm { unreachable!("We do not provide the pubdata when executing the block tip or a single transaction"); } - let events = - merge_events(self.inner.world_diff().events(), self.batch_env.number); + let events = merge_events(self.inner.events(), self.batch_env.number); let published_bytecodes = events .iter() @@ -395,74 +454,12 @@ impl Vm { pub(super) fn gas_remaining(&mut self) -> u32 { self.inner.current_frame().gas() } -} - -// We don't implement `VmFactory` trait because, unlike old VMs, the new VM doesn't require storage to be writable; -// it maintains its own storage cache and a write buffer. -impl Vm { - pub fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { - let default_aa_code_hash = system_env - .base_system_smart_contracts - .default_aa - .hash - .into(); - - let program_cache = HashMap::from([World::convert_system_contract_code( - &system_env.base_system_smart_contracts.default_aa, - false, - )]); - - let (_, bootloader) = World::convert_system_contract_code( - &system_env.base_system_smart_contracts.bootloader, - true, - ); - let bootloader_memory = bootloader_initial_memory(&batch_env); - - let mut inner = VirtualMachine::new( - BOOTLOADER_ADDRESS, - bootloader, - H160::zero(), - vec![], - system_env.bootloader_gas_limit, - Settings { - default_aa_code_hash, - // this will change after 1.5 - evm_interpreter_code_hash: default_aa_code_hash, - hook_address: get_vm_hook_position(VM_VERSION) * 32, - }, - ); - - inner.current_frame().set_stack_pointer(0); - // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. - inner.current_frame().set_heap_bound(u32::MAX); - inner.current_frame().set_aux_heap_bound(u32::MAX); - inner - .current_frame() - .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); - - let mut this = Self { - world: World::new(storage, program_cache), - inner, - gas_for_account_validation: system_env.default_validation_computational_gas_limit, - bootloader_state: BootloaderState::new( - system_env.execution_mode, - bootloader_memory.clone(), - batch_env.first_l2_block, - ), - system_env, - batch_env, - snapshot: None, - #[cfg(test)] - enforced_state_diffs: None, - }; - this.write_to_bootloader_heap(bootloader_memory); - this - } // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let world_diff = self.inner.world_diff(); - let events = merge_events(world_diff.events(), self.batch_env.number); + let vm = &self.inner; + let events = merge_events(vm.events(), self.batch_env.number); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) .into_iter() @@ -481,21 +478,22 @@ impl Vm { }) .collect(), used_contract_hashes: self.decommitted_hashes().collect(), - system_logs: world_diff - .l2_to_l1_logs() - .iter() - .map(|x| x.glue_into()) - .collect(), + system_logs: vm.l2_to_l1_logs().map(GlueInto::glue_into).collect(), user_l2_to_l1_logs, storage_refunds: world_diff.storage_refunds().to_vec(), pubdata_costs: world_diff.pubdata_costs().to_vec(), } } +} - fn delete_history_if_appropriate(&mut self) { - if self.snapshot.is_none() && !self.has_previous_far_calls() { - self.inner.delete_history(); - } +impl VmFactory> for Vm> { + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) } } @@ -520,7 +518,7 @@ impl VmInterface for Vm { let mut tracer = CircuitsTracer::default(); let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.world_diff().pubdata(); + let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); @@ -550,7 +548,7 @@ impl VmInterface for Vm { }) .collect(); let events = merge_events( - self.inner.world_diff().events_after(&start), + self.inner.world_diff().events_after(&start).iter().copied(), self.batch_env.number, ); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -563,7 +561,7 @@ impl VmInterface for Vm { .world_diff() .l2_to_l1_logs_after(&start) .iter() - .map(|x| x.glue_into()) + .map(|&log| log.glue_into()) .collect(); VmExecutionLogs { storage_logs, @@ -574,7 +572,7 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.world_diff().pubdata(); + let pubdata_after = self.inner.pubdata(); let circuit_statistic = tracer.circuit_statistic(); let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { @@ -649,7 +647,6 @@ impl VmInterface for Vm { #[derive(Debug)] struct VmSnapshot { - vm_snapshot: zksync_vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, gas_for_account_validation: u32, } @@ -661,9 +658,8 @@ impl VmInterfaceHistoryEnabled for Vm { "cannot create a VM snapshot until a previous snapshot is rolled back to or popped" ); - self.delete_history_if_appropriate(); + self.inner.make_snapshot(); self.snapshot = Some(VmSnapshot { - vm_snapshot: self.inner.snapshot(), bootloader_snapshot: self.bootloader_state.get_snapshot(), gas_for_account_validation: self.gas_for_account_validation, }); @@ -671,21 +667,24 @@ impl VmInterfaceHistoryEnabled for Vm { fn rollback_to_the_latest_snapshot(&mut self) { let VmSnapshot { - vm_snapshot, bootloader_snapshot, gas_for_account_validation, } = self.snapshot.take().expect("no snapshots to rollback to"); - self.inner.rollback(vm_snapshot); + self.inner.rollback(); self.bootloader_state.apply_snapshot(bootloader_snapshot); self.gas_for_account_validation = gas_for_account_validation; - - self.delete_history_if_appropriate(); } fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot(); self.snapshot = None; - self.delete_history_if_appropriate(); + } +} + +impl VmTrackingContracts for Vm { + fn used_contract_hashes(&self) -> Vec { + self.decommitted_hashes().map(u256_to_h256).collect() } } @@ -722,39 +721,13 @@ impl World { } } - fn bytecode_to_program(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) - } - fn convert_system_contract_code( code: &SystemContractCode, is_bootloader: bool, ) -> (U256, Program) { ( h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), + Program::from_words(code.code.clone(), is_bootloader), ) } } @@ -809,11 +782,12 @@ impl zksync_vm2::World for World { self.program_cache .entry(hash) .or_insert_with(|| { - Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + let bytecode = self.bytecode_cache.entry(hash).or_insert_with(|| { self.storage .load_factory_dep(u256_to_h256(hash)) .expect("vm tried to decommit nonexistent bytecode") - })) + }); + Program::new(bytecode, false) }) .clone() } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 66fc1a8bfd7..1c62a3962ca 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -62,6 +62,7 @@ impl Vm { self.batch_env.clone(), execution_mode, self.subversion, + self.system_env.version, )) }), self.subversion, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index bed348afd2d..f11c22133e3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -9,7 +9,8 @@ use zksync_system_constants::{ }; use zksync_types::{ commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, ProtocolVersionId, H256, + U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; @@ -164,7 +165,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], @@ -193,6 +194,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { VmExecutionMode::Batch, test_data.state_diffs.clone(), crate::vm_latest::MultiVMSubversion::latest(), + ProtocolVersionId::Version25, ); let result = vm.vm.inspect_inner( diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index a4d0eb2d17e..df7a7885542 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -34,7 +34,7 @@ fn test_max_depth() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -69,7 +69,7 @@ fn test_basic_behavior() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(increment_by_6_calldata).unwrap(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index 02ec2dc58aa..35412ee4d1b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -22,7 +22,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 0708d67e27a..b15ef7fde2b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -69,7 +69,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -93,7 +93,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -169,7 +169,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -251,7 +251,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index 34e1e2d25f3..cc9aac5bb91 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -21,7 +21,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index a42037a7f5b..ef19717a627 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -82,7 +82,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -208,7 +208,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecut let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs index 7f79f46dbe8..f8e4934050b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -99,7 +99,7 @@ fn test_publish_and_clear_state() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 2852bebb6d3..b424567aab0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -172,7 +172,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 97fa8599bbb..15c678ba953 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -66,7 +66,7 @@ fn test_nonce_holder() { let mut transaction_data: TransactionData = account .get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 2ab40faf22c..9388d016184 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -31,7 +31,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -75,7 +75,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -112,7 +112,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: Vec::new(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index eb3104fd637..8bf5e991988 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -88,7 +88,7 @@ fn test_prestate_tracer_diff_mode() { //enter ether to contract to see difference in the balance post execution let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), + contract_address: Some(vm.test_contract.unwrap()), calldata: Default::default(), value: U256::from(100000), factory_deps: vec![], @@ -98,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); let tx1 = Execute { - contract_address: deployed_address2, + contract_address: Some(deployed_address2), calldata: Default::default(), value: U256::from(200000), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index 1cc950b3704..e494a45f35b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -194,7 +194,7 @@ fn negative_pubdata_for_transaction() { let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -213,7 +213,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 779e9b5c629..cdd71354c8d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -63,7 +63,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -120,7 +120,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 5346a8c91b0..5b150366ed2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -105,41 +105,41 @@ use crate::{ // TxType::L2, // ); -// let loadnext_tx_1 = account.get_l2_tx_for_execute( -// Execute { -// contract_address: address, -// calldata: LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// } -// .to_bytes(), -// value: Default::default(), -// factory_deps: vec![], -// }, -// None, -// ); - -// let loadnext_tx_2 = account.get_l2_tx_for_execute( -// Execute { -// contract_address: address, -// calldata: LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// } -// .to_bytes(), -// value: Default::default(), -// factory_deps: vec![], -// }, -// None, -// ); +// let loadnext_tx_1 = account.get_l2_tx_for_execute( +// Execute { +// contract_address: Some(address), +// calldata: LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// } +// .to_bytes(), +// value: Default::default(), +// factory_deps: vec![], +// }, +// None, +// ); + +// let loadnext_tx_2 = account.get_l2_tx_for_execute( +// Execute { +// contract_address: Some(address), +// calldata: LoadnextContractExecutionParams { +// reads: 100, +// writes: 100, +// events: 100, +// hashes: 500, +// recursive_calls: 10, +// deploys: 60, +// } +// .to_bytes(), +// value: Default::default(), +// factory_deps: vec![], +// }, +// None, +// ); // // let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ // // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs index 6cc731a1387..93be9506a3b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 0fe0b0220fa..126d174a646 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -50,7 +50,7 @@ fn test_storage(txs: Vec) -> u32 { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 58c5ef77dc4..2db37881352 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -27,7 +27,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 31f6c3291ef..2c380623636 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -73,7 +73,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -169,7 +169,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipeint_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -190,7 +190,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 7c3ebff4a77..d85a504de40 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -276,7 +276,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -326,7 +326,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 030c4362615..1701c501cd3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -6,7 +6,8 @@ use zk_evm_1_5_0::{ tracing::{BeforeExecutionData, VmLocalStateData}, }; use zksync_types::{ - writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS, U256, + l2_to_l1_log::l2_to_l1_logs_tree_size, writes::StateDiffRecord, AccountTreeId, + ProtocolVersionId, StorageKey, L1_MESSENGER_ADDRESS, }; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; @@ -43,6 +44,7 @@ pub(crate) struct PubdataTracer { // to the L1Messenger. enforced_state_diffs: Option>, subversion: MultiVMSubversion, + protocol_version: ProtocolVersionId, _phantom_data: PhantomData, } @@ -51,6 +53,7 @@ impl PubdataTracer { l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, subversion: MultiVMSubversion, + protocol_version: ProtocolVersionId, ) -> Self { Self { l1_batch_env, @@ -58,6 +61,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: None, subversion, + protocol_version, _phantom_data: Default::default(), } } @@ -70,6 +74,7 @@ impl PubdataTracer { execution_mode: VmExecutionMode, forced_state_diffs: Vec, subversion: MultiVMSubversion, + protocol_version: ProtocolVersionId, ) -> Self { Self { l1_batch_env, @@ -77,6 +82,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: Some(forced_state_diffs), subversion, + protocol_version, _phantom_data: Default::default(), } } @@ -160,7 +166,7 @@ impl PubdataTracer { .into_iter() .filter(|log| log.rw_flag) .filter(|log| log.read_value != log.written_value) - .filter(|log| log.address != L1_MESSENGER_ADDRESS || log.key == U256::from(4u32)) + .filter(|log| log.address != L1_MESSENGER_ADDRESS) .map(|log| StateDiffRecord { address: log.address, key: log.key, @@ -186,6 +192,7 @@ impl PubdataTracer { l2_to_l1_messages: self.get_total_l1_messenger_messages(state), published_bytecodes: self.get_total_published_bytecodes(state), state_diffs: self.get_state_diffs(&state.storage), + l2_to_l1_logs_tree_size: l2_to_l1_logs_tree_size(self.protocol_version), } } } diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs index 42f1f7af309..fbd01f2b7a7 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs @@ -3,10 +3,8 @@ use zksync_contracts::load_sys_contract_interface; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_types::{ ethabi, - l2_to_l1_log::l2_to_l1_logs_tree_size, web3::keccak256, writes::{compress_state_diffs, StateDiffRecord}, - ProtocolVersionId, }; use zksync_utils::bytecode::hash_bytecode; @@ -18,6 +16,7 @@ pub(crate) struct PubdataInput { pub(crate) l2_to_l1_messages: Vec>, pub(crate) published_bytecodes: Vec>, pub(crate) state_diffs: Vec, + pub(crate) l2_to_l1_logs_tree_size: usize, } pub trait PubdataBuilder { @@ -59,11 +58,12 @@ impl PubdataBuilder for RollupPubdataBuilder { l2_to_l1_messages, published_bytecodes, state_diffs, + l2_to_l1_logs_tree_size, } = input; if l2_version { let chained_log_hash = build_chained_log_hash(user_logs.clone()); - let log_root_hash = build_logs_root(user_logs.clone()); + let log_root_hash = build_logs_root(user_logs.clone(), l2_to_l1_logs_tree_size); let chained_msg_hash = build_chained_message_hash(l2_to_l1_messages.clone()); let chained_bytecodes_hash = build_chained_bytecode_hash(published_bytecodes.clone()); @@ -128,8 +128,52 @@ impl ValidiumPubdataBuilder { } impl PubdataBuilder for ValidiumPubdataBuilder { - fn build_pubdata(&self, _: PubdataInput, _: bool) -> Vec { - todo!() + fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec { + let mut l1_messenger_pubdata = vec![]; + let mut l2_da_header = vec![]; + + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + l2_to_l1_logs_tree_size, + } = input; + + if l2_version { + let chained_log_hash = build_chained_log_hash(user_logs.clone()); + let log_root_hash = build_logs_root(user_logs.clone(), l2_to_l1_logs_tree_size); + let chained_msg_hash = build_chained_message_hash(l2_to_l1_messages.clone()); + let chained_bytecodes_hash = build_chained_bytecode_hash(published_bytecodes.clone()); + + l2_da_header.push(Token::FixedBytes(chained_log_hash)); + l2_da_header.push(Token::FixedBytes(log_root_hash)); + l2_da_header.push(Token::FixedBytes(chained_msg_hash)); + l2_da_header.push(Token::FixedBytes(chained_bytecodes_hash)); + } + + l1_messenger_pubdata.extend(encode_user_logs(user_logs)); + + if l2_version { + let func_selector = load_sys_contract_interface("IL2DAValidator") + .function("validatePubdata") + .expect("validatePubdata Function does not exist on IL2DAValidator") + .short_signature() + .to_vec(); + + l2_da_header.push(ethabi::Token::Bytes(l1_messenger_pubdata)); + + [func_selector, ethabi::encode(&l2_da_header)] + .concat() + .to_vec() + } else { + let state_diffs_packed = state_diffs + .into_iter() + .flat_map(|diff| diff.encode_padded()) + .collect::>(); + + keccak256(&state_diffs_packed).to_vec() + } } } @@ -146,20 +190,20 @@ fn build_chained_log_hash(user_logs: Vec) -> Vec { chained_log_hash } -fn build_logs_root(user_logs: Vec) -> Vec { +fn build_logs_root( + user_logs: Vec, + l2_to_l1_logs_tree_size: usize, +) -> Vec { let logs = user_logs.iter().map(|log| { let encoded = log.packed_encoding(); let mut slice = [0u8; 88]; slice.copy_from_slice(&encoded); slice }); - MiniMerkleTree::new( - logs, - Some(l2_to_l1_logs_tree_size(ProtocolVersionId::latest())), - ) - .merkle_root() - .as_bytes() - .to_vec() + MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) + .merkle_root() + .as_bytes() + .to_vec() } fn build_chained_message_hash(l2_to_l1_messages: Vec>) -> Vec { diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 502be0dc22c..2ec86eb3cea 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 8e9bfd9b13c..91af2f4b0b6 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -2,8 +2,9 @@ use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_querie use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, - Transaction, + Transaction, H256, }; +use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, @@ -12,7 +13,7 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + VmMemoryMetrics, VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ @@ -235,3 +236,12 @@ impl VmInterfaceHistoryEnabled for Vm { self.snapshots.pop(); } } + +impl VmTrackingContracts for Vm { + fn used_contract_hashes(&self) -> Vec { + self.get_used_contracts() + .into_iter() + .map(u256_to_h256) + .collect() + } +} diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 785eb49835f..d7c0dfb9f6d 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index 7ef739fd5bf..b64e3f77018 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -22,7 +22,7 @@ const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -144,7 +144,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -479,7 +479,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index ecad7d911b4..4bd39bc56dd 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index 99ce4671c29..a8f80ea3255 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -23,7 +23,7 @@ pub(crate) const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -171,7 +171,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -592,7 +592,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs index 0ea1669cf21..1ff6ce12557 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 205090ba633..22ab09296c9 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_refunds_enhancement::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs index 01ebe4c0d22..e51b8cab570 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index b42950399f6..c96004163a6 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_virtual_blocks::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index ca591b98005..79dd42ca629 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -4,15 +4,19 @@ use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, + utils::ShadowVm, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, - versions::shadow::ShadowVm, }; -pub type ShadowedFastVm = ShadowVm, H>>; +pub(crate) type ShadowedVmFast = ShadowVm< + S, + crate::vm_latest::Vm, H>, + crate::vm_fast::Vm>, +>; #[derive(Debug)] pub enum VmInstance { @@ -26,7 +30,7 @@ pub enum VmInstance { Vm1_4_2(crate::vm_1_4_2::Vm, H>), Vm1_5_0(crate::vm_latest::Vm, H>), VmFast(crate::vm_fast::Vm>), - ShadowedVmFast(ShadowedFastVm), + ShadowedVmFast(ShadowedVmFast), } macro_rules! dispatch_vm { @@ -234,10 +238,15 @@ impl VmInstance { FastVmMode::Old => Self::new(l1_batch_env, system_env, storage_view), FastVmMode::New => { let storage = ImmutableStorageView::new(storage_view); - Self::VmFast(crate::vm_fast::Vm::new(l1_batch_env, system_env, storage)) + Self::VmFast(crate::vm_fast::Vm::custom( + l1_batch_env, + system_env, + storage, + )) } FastVmMode::Shadow => { - Self::ShadowedVmFast(ShadowVm::new(l1_batch_env, system_env, storage_view)) + let vm = ShadowVm::new(l1_batch_env, system_env, storage_view); + Self::ShadowedVmFast(vm) } }, _ => Self::new(l1_batch_env, system_env, storage_view), diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index decba534d23..308cd65427f 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -43,6 +43,7 @@ impl FileBackedObjectStore { Bucket::ProofsFri, Bucket::StorageSnapshot, Bucket::TeeVerifierInput, + Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); fs::create_dir_all(&bucket_path).await?; diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index f5bb3706d9d..ff5fae2a81f 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -95,7 +95,6 @@ where type Key<'a> = SnapshotStorageLogsStorageKey; fn encode_key(key: Self::Key<'_>) -> String { - // FIXME: should keys be separated by version? format!( "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.proto.gzip", key.l1_batch_number, key.chunk_id diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 3c5a89f160a..740e8d76e24 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -20,6 +20,7 @@ pub enum Bucket { StorageSnapshot, DataAvailability, TeeVerifierInput, + VmDumps, } impl Bucket { @@ -39,6 +40,7 @@ impl Bucket { Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", Self::TeeVerifierInput => "tee_verifier_inputs", + Self::VmDumps => "vm_dumps", } } } diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index f4718c9f796..dab6b9b15b7 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -145,6 +145,7 @@ impl ProtoRepr for proto::Web3JsonRpc { .context("whitelisted_tokens_for_aa")?, extended_api_tracing: self.extended_api_tracing.unwrap_or_default(), api_namespaces, + settlement_layer_url: self.settlement_layer_url.clone(), }) } @@ -209,6 +210,7 @@ impl ProtoRepr for proto::Web3JsonRpc { .collect(), extended_api_tracing: Some(this.extended_api_tracing), api_namespaces: this.api_namespaces.clone().unwrap_or_default(), + settlement_layer_url: this.settlement_layer_url.clone(), } } } diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index f5eb5c5b2f1..a5b552dffc4 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -71,6 +71,13 @@ impl ProtoRepr for proto::GenesisSpec { .map(|x| parse_h160(x)) .transpose() .context("registry_address")?, + seed_peers: self + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } fn build(this: &Self::Type) -> Self { @@ -81,6 +88,11 @@ impl ProtoRepr for proto::GenesisSpec { attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), registry_address: this.registry_address.map(|a| format!("{:?}", a)), + seed_peers: this + .seed_peers + .iter() + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -99,15 +111,25 @@ impl ProtoRepr for proto::RpcConfig { } } +impl ProtoRepr for proto::NodeAddr { + type Type = (NodePublicKey, Host); + fn read(&self) -> anyhow::Result { + Ok(( + NodePublicKey(required(&self.key).context("key")?.clone()), + Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0 .0.clone()), + addr: Some(this.1 .0.clone()), + } + } +} + impl ProtoRepr for proto::Config { type Type = ConsensusConfig; fn read(&self) -> anyhow::Result { - let read_addr = |e: &proto::NodeAddr| { - let key = NodePublicKey(required(&e.key).context("key")?.clone()); - let addr = Host(required(&e.addr).context("addr")?.clone()); - anyhow::Ok((key, addr)) - }; - let max_payload_size = required(&self.max_payload_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_payload_size")?; @@ -144,8 +166,9 @@ impl ProtoRepr for proto::Config { .gossip_static_outbound .iter() .enumerate() - .map(|(i, e)| read_addr(e).context(i)) - .collect::>()?, + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("gossip_static_outbound")?, genesis_spec: read_optional_repr(&self.genesis_spec), rpc: read_optional_repr(&self.rpc_config), }) @@ -168,10 +191,7 @@ impl ProtoRepr for proto::Config { gossip_static_outbound: this .gossip_static_outbound .iter() - .map(|x| proto::NodeAddr { - key: Some(x.0 .0.clone()), - addr: Some(x.1 .0.clone()), - }) + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) .collect(), genesis_spec: this.genesis_spec.as_ref().map(ProtoRepr::build), rpc_config: this.rpc.as_ref().map(ProtoRepr::build), diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 219d2344bf2..4d41ee5036d 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs::{self}; use zksync_protobuf::{required, ProtoRepr}; +use zksync_types::settlement::SettlementMode; use crate::{proto::eth as proto, read_optional_repr}; @@ -46,6 +47,24 @@ impl proto::PubdataSendingMode { } } +impl proto::SettlementMode { + fn new(x: &SettlementMode) -> Self { + use SettlementMode as From; + match x { + From::SettlesToL1 => Self::SettlesToL1, + From::Gateway => Self::Gateway, + } + } + + fn parse(&self) -> SettlementMode { + use SettlementMode as To; + match self { + Self::SettlesToL1 => To::SettlesToL1, + Self::Gateway => To::Gateway, + } + } +} + impl ProtoRepr for proto::Eth { type Type = configs::eth_sender::EthConfig; @@ -183,8 +202,12 @@ impl ProtoRepr for proto::GasAdjuster { ) .context("internal_pubdata_pricing_multiplier")?, max_blob_base_fee: self.max_blob_base_fee, - // TODO(EVM-676): support this field - settlement_mode: Default::default(), + settlement_mode: self + .settlement_mode + .map(proto::SettlementMode::try_from) + .transpose()? + .map(|x| x.parse()) + .unwrap_or_default(), }) } @@ -206,6 +229,7 @@ impl ProtoRepr for proto::GasAdjuster { ), internal_pubdata_pricing_multiplier: Some(this.internal_pubdata_pricing_multiplier), max_blob_base_fee: this.max_blob_base_fee, + settlement_mode: Some(proto::SettlementMode::new(&this.settlement_mode).into()), } } } diff --git a/core/lib/protobuf_config/src/gateway.rs b/core/lib/protobuf_config/src/gateway.rs new file mode 100644 index 00000000000..3e536a9edb6 --- /dev/null +++ b/core/lib/protobuf_config/src/gateway.rs @@ -0,0 +1,50 @@ +use anyhow::Context as _; +use zksync_config::configs::gateway::GatewayChainConfig; +use zksync_protobuf::{repr::ProtoRepr, required}; + +use crate::{parse_h160, proto::gateway as proto}; + +impl ProtoRepr for proto::GatewayChainConfig { + type Type = GatewayChainConfig; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + state_transition_proxy_addr: required(&self.state_transition_proxy_addr) + .and_then(|x| parse_h160(x)) + .context("state_transition_proxy_addr")?, + + validator_timelock_addr: required(&self.validator_timelock_addr) + .and_then(|x| parse_h160(x)) + .context("validator_timelock_addr")?, + + multicall3_addr: required(&self.multicall3_addr) + .and_then(|x| parse_h160(x)) + .context("multicall3_addr")?, + + diamond_proxy_addr: required(&self.diamond_proxy_addr) + .and_then(|x| parse_h160(x)) + .context("diamond_proxy_addr")?, + + chain_admin_addr: self + .chain_admin_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose()?, + + governance_addr: required(&self.governance_addr) + .and_then(|x| parse_h160(x)) + .context("governance_addr")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + state_transition_proxy_addr: Some(format!("{:?}", this.state_transition_proxy_addr)), + validator_timelock_addr: Some(format!("{:?}", this.validator_timelock_addr)), + multicall3_addr: Some(format!("{:?}", this.multicall3_addr)), + diamond_proxy_addr: Some(format!("{:?}", this.diamond_proxy_addr)), + chain_admin_addr: this.chain_admin_addr.map(|x| format!("{:?}", x)), + governance_addr: Some(format!("{:?}", this.governance_addr)), + } + } +} diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 59896aa244d..58d0448d83e 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -7,6 +7,7 @@ use zksync_basic_types::{ }; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_types::SLChainId; use crate::{parse_h160, parse_h256, proto::genesis as proto}; @@ -78,7 +79,7 @@ impl ProtoRepr for proto::Genesis { l1_chain_id: required(&self.l1_chain_id) .map(|x| L1ChainId(*x)) .context("l1_chain_id")?, - sl_chain_id: None, + sl_chain_id: self.sl_chain_id.map(SLChainId), l2_chain_id: required(&self.l2_chain_id) .and_then(|x| L2ChainId::try_from(*x).map_err(|a| anyhow::anyhow!(a))) .context("l2_chain_id")?, @@ -108,6 +109,7 @@ impl ProtoRepr for proto::Genesis { fee_account: Some(format!("{:?}", this.fee_account)), l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), + sl_chain_id: this.sl_chain_id.map(|x| x.0), prover: Some(proto::Prover { recursion_scheduler_level_vk_hash: None, // Deprecated field. dummy_verifier: Some(this.dummy_verifier), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index a4822edbe8e..26a02bf6140 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -17,6 +17,7 @@ mod database; mod en; mod eth; mod experimental; +mod gateway; mod general; mod genesis; mod house_keeper; diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index e08677adc44..019912b0bdf 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -40,6 +40,7 @@ message Web3JsonRpc { repeated MaxResponseSizeOverride max_response_body_size_overrides = 31; repeated string api_namespaces = 32; // Optional, if empty all namespaces are available optional bool extended_api_tracing = 33; // optional, default false + optional string settlement_layer_url = 34; // optional reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; reserved 11; reserved "request_timeout"; reserved 12; reserved "account_pks"; diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index ce965510e45..33270efd1f2 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -27,6 +27,11 @@ enum PubdataSendingMode { RELAYED_L2_CALLDATA = 3; } +enum SettlementMode { + SettlesToL1 = 0; + Gateway = 1; +} + message Sender { repeated uint64 aggregated_proof_sizes = 1; // ? optional uint64 wait_confirmations = 2; // optional @@ -64,6 +69,7 @@ message GasAdjuster { optional uint64 num_samples_for_blob_base_fee_estimate = 9; // required; optional double internal_pubdata_pricing_multiplier = 10; // required; optional uint64 max_blob_base_fee = 11; // optional; wei + optional SettlementMode settlement_mode = 13; // optional } message ETHWatch { diff --git a/core/lib/protobuf_config/src/proto/config/gateway.proto b/core/lib/protobuf_config/src/proto/config/gateway.proto new file mode 100644 index 00000000000..f39e0ab506e --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/gateway.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; + +package zksync.config.gateway; + +message GatewayChainConfig { + optional string state_transition_proxy_addr = 1; + optional string validator_timelock_addr = 2; + optional string multicall3_addr = 3; + optional string diamond_proxy_addr = 4; + optional string chain_admin_addr = 5; + optional string governance_addr = 6; +} diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 08cbb954fcb..54af7a459f9 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -28,5 +28,6 @@ message Genesis { optional Prover prover = 10; optional L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 29; // optional, default to rollup optional string genesis_protocol_semantic_version = 12; // optional; + optional uint64 sl_chain_id = 13; // required; reserved 11; reserved "shared_bridge"; } diff --git a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto index 7b505aa3bcf..9aabf6e3483 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto @@ -17,4 +17,5 @@ message ProverJobMonitor { optional uint64 prover_queue_reporter_run_interval_ms = 12; // optional; ms optional uint64 witness_generator_queue_reporter_run_interval_ms = 13; // optional; ms optional uint64 witness_job_queuer_run_interval_ms = 14; // optional; ms + optional uint32 http_port = 15; // required; u32 } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 835ead1ab65..6cabc45fc12 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -31,10 +31,10 @@ package zksync.core.consensus; import "zksync/std.proto"; -// (public key, ip address) of a gossip network node. +// (public key, host address) of a gossip network node. message NodeAddr { optional string key = 1; // required; NodePublicKey - optional string addr = 2; // required; IpAddr + optional string addr = 2; // required; Host } // Weighted member of a validator committee. @@ -58,6 +58,7 @@ message GenesisSpec { repeated WeightedAttester attesters = 5; // can be empty; attester committee. // Currently not in consensus genesis, but still a part of the global configuration. optional string registry_address = 6; // optional; H160 + repeated NodeAddr seed_peers = 7; } // Per peer connection RPC rate limits. diff --git a/core/lib/protobuf_config/src/prover_job_monitor.rs b/core/lib/protobuf_config/src/prover_job_monitor.rs index a1c5a7c0599..a174d088240 100644 --- a/core/lib/protobuf_config/src/prover_job_monitor.rs +++ b/core/lib/protobuf_config/src/prover_job_monitor.rs @@ -95,6 +95,9 @@ impl ProtoRepr for proto::ProverJobMonitor { .or_else(|| Some(Self::Type::default_witness_job_queuer_run_interval_ms())), ) .context("witness_job_queuer_run_interval_ms")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, }) } @@ -126,6 +129,7 @@ impl ProtoRepr for proto::ProverJobMonitor { this.witness_generator_queue_reporter_run_interval_ms, ), witness_job_queuer_run_interval_ms: Some(this.witness_job_queuer_run_interval_ms), + http_port: Some(this.http_port.into()), } } } diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 8c73c2c6ac3..889b80b4fbe 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -20,7 +20,7 @@ circuit_sequencer_api_1_5_0.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } -serde_with = { workspace = true, features = ["base64"] } +serde_with = { workspace = true, features = ["base64", "hex"] } chrono = { workspace = true, features = ["serde"] } [dev-dependencies] diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index bc95345bbba..776cd3141cb 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -2,6 +2,7 @@ //! This module defines the types used in the API. use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, tee_types::TeeType, @@ -71,8 +72,11 @@ pub struct VerifyProofRequest(pub Box); #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct SubmitTeeProofRequest(pub Box); +#[serde_as] #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RegisterTeeAttestationRequest { + #[serde_as(as = "Hex")] pub attestation: Vec, + #[serde_as(as = "Hex")] pub pubkey: Vec, } diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs index 9672bfb2142..60a9eaba760 100644 --- a/core/lib/prover_interface/src/outputs.rs +++ b/core/lib/prover_interface/src/outputs.rs @@ -2,6 +2,7 @@ use core::fmt; use circuit_sequencer_api_1_5_0::proof::FinalProof; use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{protocol_version::ProtocolSemanticVersion, tee_types::TeeType, L1BatchNumber}; @@ -14,14 +15,18 @@ pub struct L1BatchProofForL1 { } /// A "final" TEE proof that can be sent to the L1 contract. +#[serde_as] #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct L1BatchTeeProofForL1 { // signature generated within the TEE enclave, using the privkey corresponding to the pubkey + #[serde_as(as = "Hex")] pub signature: Vec, // pubkey used for signature verification; each key pair is attested by the TEE attestation // stored in the db + #[serde_as(as = "Hex")] pub pubkey: Vec, // data that was signed + #[serde_as(as = "Hex")] pub proof: Vec, // type of TEE used for attestation pub tee_type: TeeType, diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2aee0c2733..ead59749abe 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -167,9 +167,9 @@ fn test_proof_request_serialization() { #[test] fn test_tee_proof_request_serialization() { let tee_proof_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 205579552a3..fa06599357c 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -21,6 +21,7 @@ pub use self::{ shadow_storage::ShadowStorage, storage_factory::{ BatchDiff, CommonStorage, OwnedStorage, ReadStorageFactory, RocksdbWithMemory, + SnapshotStorage, }, }; diff --git a/core/lib/state/src/storage_factory/metrics.rs b/core/lib/state/src/storage_factory/metrics.rs new file mode 100644 index 00000000000..822db90820c --- /dev/null +++ b/core/lib/state/src/storage_factory/metrics.rs @@ -0,0 +1,37 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics, Unit}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(super) enum SnapshotStage { + BatchHeader, + ProtectiveReads, + TouchedSlots, + PreviousValues, + InitialWrites, + Bytecodes, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "kind", rename_all = "snake_case")] +pub(super) enum AccessKind { + ReadValue, + IsWriteInitial, + LoadFactoryDep, + GetEnumerationIndex, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "state_snapshot")] +pub(super) struct SnapshotMetrics { + /// Latency of loading a batch snapshot split by stage. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub load_latency: Family>, + /// Latency of accessing the fallback storage for a batch snapshot. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub fallback_access_latency: Family>, +} + +#[vise::register] +pub(super) static SNAPSHOT_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/state/src/storage_factory.rs b/core/lib/state/src/storage_factory/mod.rs similarity index 80% rename from core/lib/state/src/storage_factory.rs rename to core/lib/state/src/storage_factory/mod.rs index 2ef9b249af2..0b514f8f964 100644 --- a/core/lib/state/src/storage_factory.rs +++ b/core/lib/state/src/storage_factory/mod.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, -}; +use std::{collections::HashSet, fmt}; use anyhow::Context as _; use async_trait::async_trait; @@ -10,64 +7,18 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_storage::RocksDB; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; use zksync_utils::u256_to_h256; -use zksync_vm_interface::storage::{ReadStorage, StorageSnapshot, StorageWithSnapshot}; +use zksync_vm_interface::storage::{ReadStorage, StorageSnapshot}; +use self::metrics::{SnapshotStage, SNAPSHOT_METRICS}; +pub use self::{ + rocksdb_with_memory::{BatchDiff, RocksdbWithMemory}, + snapshot::SnapshotStorage, +}; use crate::{PostgresStorage, RocksdbStorage, RocksdbStorageBuilder, StateKeeperColumnFamily}; -/// Storage with a static lifetime that can be sent to Tokio tasks etc. -pub type OwnedStorage = CommonStorage<'static>; - -/// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param -/// (mostly for testing purposes); the default is [`OwnedStorage`]. -#[async_trait] -pub trait ReadStorageFactory: Debug + Send + Sync + 'static { - /// Creates a storage instance, e.g. over a Postgres connection or a RocksDB instance. - /// The specific criteria on which one are left up to the implementation. - /// - /// Implementations may be cancel-aware and return `Ok(None)` iff `stop_receiver` receives - /// a stop signal; this is the only case in which `Ok(None)` should be returned. - async fn access_storage( - &self, - stop_receiver: &watch::Receiver, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result>; -} - -/// [`ReadStorageFactory`] producing Postgres-backed storage instances. Hence, it is slower than more advanced -/// alternatives with RocksDB caches and should be used sparingly (e.g., for testing). -#[async_trait] -impl ReadStorageFactory for ConnectionPool { - async fn access_storage( - &self, - _stop_receiver: &watch::Receiver, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let connection = self.connection().await?; - let storage = OwnedStorage::postgres(connection, l1_batch_number).await?; - Ok(Some(storage.into())) - } -} - -/// DB difference introduced by one batch. -#[derive(Debug, Clone)] -pub struct BatchDiff { - /// Storage slots touched by this batch along with new values there. - pub state_diff: HashMap, - /// Initial write indices introduced by this batch. - pub enum_index_diff: HashMap, - /// Factory dependencies introduced by this batch. - pub factory_dep_diff: HashMap>, -} - -/// A RocksDB cache instance with in-memory DB diffs that gives access to DB state at batches `N` to -/// `N + K`, where `K` is the number of diffs. -#[derive(Debug)] -pub struct RocksdbWithMemory { - /// RocksDB cache instance caught up to batch `N`. - pub rocksdb: RocksdbStorage, - /// Diffs for batches `N + 1` to `N + K`. - pub batch_diffs: Vec, -} +mod metrics; +mod rocksdb_with_memory; +mod snapshot; /// Union of all [`ReadStorage`] implementations that are returned by [`ReadStorageFactory`], such as /// Postgres- and RocksDB-backed storages. @@ -83,7 +34,7 @@ pub enum CommonStorage<'a> { /// Implementation over a RocksDB cache instance with in-memory DB diffs. RocksdbWithMemory(RocksdbWithMemory), /// In-memory storage snapshot with the Postgres storage fallback. - Snapshot(StorageWithSnapshot>), + Snapshot(SnapshotStorage<'a>), /// Generic implementation. Should be used for testing purposes only since it has performance penalty because /// of the dynamic dispatch. Boxed(Box), @@ -176,6 +127,7 @@ impl CommonStorage<'static> { connection: &mut Connection<'static, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::BatchHeader].start(); let Some(header) = connection .blocks_dal() .get_l1_batch_header(l1_batch_number) @@ -188,8 +140,10 @@ impl CommonStorage<'static> { .into_iter() .map(u256_to_h256) .collect(); + latency.observe(); // Check protective reads early on. + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::ProtectiveReads].start(); let protective_reads = connection .storage_logs_dedup_dal() .get_protective_reads_for_l1_batch(l1_batch_number) @@ -199,14 +153,18 @@ impl CommonStorage<'static> { return Ok(None); } let protective_reads_len = protective_reads.len(); - tracing::debug!("Loaded {protective_reads_len} protective reads"); + let latency = latency.observe(); + tracing::debug!("Loaded {protective_reads_len} protective reads in {latency:?}"); + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::TouchedSlots].start(); let touched_slots = connection .storage_logs_dal() .get_touched_slots_for_l1_batch(l1_batch_number) .await?; - tracing::debug!("Loaded {} touched keys", touched_slots.len()); + let latency = latency.observe(); + tracing::debug!("Loaded {} touched keys in {latency:?}", touched_slots.len()); + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::PreviousValues].start(); let all_accessed_keys: Vec<_> = protective_reads .into_iter() .map(|key| key.hashed_key()) @@ -216,21 +174,31 @@ impl CommonStorage<'static> { .storage_logs_dal() .get_previous_storage_values(&all_accessed_keys, l1_batch_number) .await?; + let latency = latency.observe(); tracing::debug!( - "Obtained {} previous values for accessed keys", + "Obtained {} previous values for accessed keys in {latency:?}", previous_values.len() ); + + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::InitialWrites].start(); let initial_write_info = connection .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&all_accessed_keys) .await?; - tracing::debug!("Obtained initial write info for accessed keys"); + let latency = latency.observe(); + tracing::debug!("Obtained initial write info for accessed keys in {latency:?}"); + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::Bytecodes].start(); let bytecodes = connection .factory_deps_dal() .get_factory_deps(&bytecode_hashes) .await; - tracing::debug!("Loaded {} bytecodes used in the batch", bytecodes.len()); + let latency = latency.observe(); + tracing::debug!( + "Loaded {} bytecodes used in the batch in {latency:?}", + bytecodes.len() + ); + let factory_deps = bytecodes .into_iter() .map(|(hash_u256, words)| { @@ -256,54 +224,6 @@ impl CommonStorage<'static> { } } -impl ReadStorage for RocksdbWithMemory { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - let hashed_key = key.hashed_key(); - match self - .batch_diffs - .iter() - .rev() - .find_map(|b| b.state_diff.get(&hashed_key)) - { - None => self.rocksdb.read_value(key), - Some(value) => *value, - } - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - match self - .batch_diffs - .iter() - .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) - { - None => self.rocksdb.is_write_initial(key), - Some(_) => false, - } - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - match self - .batch_diffs - .iter() - .find_map(|b| b.factory_dep_diff.get(&hash)) - { - None => self.rocksdb.load_factory_dep(hash), - Some(value) => Some(value.clone()), - } - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - match self - .batch_diffs - .iter() - .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) - { - None => self.rocksdb.get_enumeration_index(key), - Some(value) => Some(*value), - } - } -} - impl ReadStorage for CommonStorage<'_> { fn read_value(&mut self, key: &StorageKey) -> StorageValue { match self { @@ -358,8 +278,42 @@ impl From for CommonStorage<'_> { } } -impl<'a> From>> for CommonStorage<'a> { - fn from(value: StorageWithSnapshot>) -> Self { +impl<'a> From> for CommonStorage<'a> { + fn from(value: SnapshotStorage<'a>) -> Self { Self::Snapshot(value) } } + +/// Storage with a static lifetime that can be sent to Tokio tasks etc. +pub type OwnedStorage = CommonStorage<'static>; + +/// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param +/// (mostly for testing purposes); the default is [`OwnedStorage`]. +#[async_trait] +pub trait ReadStorageFactory: fmt::Debug + Send + Sync + 'static { + /// Creates a storage instance, e.g. over a Postgres connection or a RocksDB instance. + /// The specific criteria on which one are left up to the implementation. + /// + /// Implementations may be cancel-aware and return `Ok(None)` iff `stop_receiver` receives + /// a stop signal; this is the only case in which `Ok(None)` should be returned. + async fn access_storage( + &self, + stop_receiver: &watch::Receiver, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result>; +} + +/// [`ReadStorageFactory`] producing Postgres-backed storage instances. Hence, it is slower than more advanced +/// alternatives with RocksDB caches and should be used sparingly (e.g., for testing). +#[async_trait] +impl ReadStorageFactory for ConnectionPool { + async fn access_storage( + &self, + _stop_receiver: &watch::Receiver, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let connection = self.connection().await?; + let storage = OwnedStorage::postgres(connection, l1_batch_number).await?; + Ok(Some(storage.into())) + } +} diff --git a/core/lib/state/src/storage_factory/rocksdb_with_memory.rs b/core/lib/state/src/storage_factory/rocksdb_with_memory.rs new file mode 100644 index 00000000000..411460dad18 --- /dev/null +++ b/core/lib/state/src/storage_factory/rocksdb_with_memory.rs @@ -0,0 +1,75 @@ +use std::collections::HashMap; + +use zksync_types::{StorageKey, StorageValue, H256}; +use zksync_vm_interface::storage::ReadStorage; + +use crate::RocksdbStorage; + +/// DB difference introduced by one batch. +#[derive(Debug, Clone)] +pub struct BatchDiff { + /// Storage slots touched by this batch along with new values there. + pub state_diff: HashMap, + /// Initial write indices introduced by this batch. + pub enum_index_diff: HashMap, + /// Factory dependencies introduced by this batch. + pub factory_dep_diff: HashMap>, +} + +/// A RocksDB cache instance with in-memory DB diffs that gives access to DB state at batches `N` to +/// `N + K`, where `K` is the number of diffs. +#[derive(Debug)] +pub struct RocksdbWithMemory { + /// RocksDB cache instance caught up to batch `N`. + pub rocksdb: RocksdbStorage, + /// Diffs for batches `N + 1` to `N + K`. + pub batch_diffs: Vec, +} + +impl ReadStorage for RocksdbWithMemory { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let hashed_key = key.hashed_key(); + match self + .batch_diffs + .iter() + .rev() + .find_map(|b| b.state_diff.get(&hashed_key)) + { + None => self.rocksdb.read_value(key), + Some(value) => *value, + } + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + match self + .batch_diffs + .iter() + .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) + { + None => self.rocksdb.is_write_initial(key), + Some(_) => false, + } + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + match self + .batch_diffs + .iter() + .find_map(|b| b.factory_dep_diff.get(&hash)) + { + None => self.rocksdb.load_factory_dep(hash), + Some(value) => Some(value.clone()), + } + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + match self + .batch_diffs + .iter() + .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) + { + None => self.rocksdb.get_enumeration_index(key), + Some(value) => Some(*value), + } + } +} diff --git a/core/lib/state/src/storage_factory/snapshot.rs b/core/lib/state/src/storage_factory/snapshot.rs new file mode 100644 index 00000000000..05a79125dd3 --- /dev/null +++ b/core/lib/state/src/storage_factory/snapshot.rs @@ -0,0 +1,49 @@ +use zksync_types::{StorageKey, StorageValue, H256}; +use zksync_vm_interface::storage::StorageWithSnapshot; + +use super::metrics::{AccessKind, SNAPSHOT_METRICS}; +use crate::{interface::ReadStorage, PostgresStorage}; + +/// Wrapper around [`PostgresStorage`] used to track frequency of fallback access. +#[derive(Debug)] +pub struct FallbackStorage<'a>(PostgresStorage<'a>); + +impl<'a> From> for FallbackStorage<'a> { + fn from(storage: PostgresStorage<'a>) -> Self { + Self(storage) + } +} + +impl ReadStorage for FallbackStorage<'_> { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let latency = SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::ReadValue].start(); + let output = self.0.read_value(key); + latency.observe(); + output + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let latency = SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::IsWriteInitial].start(); + let output = self.0.is_write_initial(key); + latency.observe(); + output + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + let latency = SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::LoadFactoryDep].start(); + let output = self.0.load_factory_dep(hash); + latency.observe(); + output + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let latency = + SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::GetEnumerationIndex].start(); + let output = self.0.get_enumeration_index(key); + latency.observe(); + output + } +} + +/// Snapshot-backed storage used for batch processing. +pub type SnapshotStorage<'a> = StorageWithSnapshot>; diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 92950017cba..84ad10b5bbb 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -28,6 +28,7 @@ once_cell.workspace = true rlp.workspace = true serde.workspace = true serde_json.workspace = true +serde_with = { workspace = true, features = ["hex"] } bigdecimal.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index eb86c4f0664..65249f9bab6 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,6 +1,7 @@ use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use serde_json::Value; +use serde_with::{hex::Hex, serde_as}; use strum::Display; use zksync_basic_types::{ tee_types::TeeType, @@ -830,6 +831,17 @@ pub struct L1BatchDetails { pub base: BlockDetailsBase, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct L1ProcessingDetails { + pub commit_tx_hash: Option, + pub committed_at: Option>, + pub prove_tx_hash: Option, + pub proven_at: Option>, + pub execute_tx_hash: Option, + pub executed_at: Option>, +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct StorageProof { @@ -846,15 +858,20 @@ pub struct Proof { pub storage_proof: Vec, } +#[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TeeProof { pub l1_batch_number: L1BatchNumber, pub tee_type: Option, + #[serde_as(as = "Option")] pub pubkey: Option>, + #[serde_as(as = "Option")] pub signature: Option>, + #[serde_as(as = "Option")] pub proof: Option>, pub proved_at: DateTime, + #[serde_as(as = "Option")] pub attestation: Option>, } diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index f9f28eeb35c..aad81c56918 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -84,9 +84,6 @@ pub struct L1BatchMetadata { pub meta_parameters_hash: H256, pub pass_through_data_hash: H256, - // FIXME: it may not be present for old batches - pub state_diff_hash: H256, - /// The commitment to the final events queue state after the batch is committed. /// Practically, it is a commitment to all events that happened on L2 during the batch execution. pub events_queue_commitment: Option, @@ -94,9 +91,16 @@ pub struct L1BatchMetadata { /// commitment to the transactions in the batch. pub bootloader_initial_content_commitment: Option, pub state_diffs_compressed: Vec, - - pub aggregation_root: H256, - pub local_root: H256, + /// Hash of packed state diffs. It's present only for post-gateway batches. + pub state_diff_hash: Option, + /// Root hash of the local logs tree. Tree contains logs that were produced on this chain. + /// It's present only for post-gateway batches. + pub local_root: Option, + /// Root hash of the aggregated logs tree. Tree aggregates `local_root`s of chains that settle on this chain. + /// It's present only for post-gateway batches. + pub aggregation_root: Option, + /// Data Availability inclusion proof, that has to be verified on the settlement layer. + pub da_inclusion_data: Option>, } #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] diff --git a/core/lib/types/src/eth_sender.rs b/core/lib/types/src/eth_sender.rs index 12a5a5a8fb1..5bda7e3ce6c 100644 --- a/core/lib/types/src/eth_sender.rs +++ b/core/lib/types/src/eth_sender.rs @@ -93,3 +93,10 @@ pub struct TxHistoryToSend { pub signed_raw_tx: Vec, pub nonce: Nonce, } + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct BatchSettlementInfo { + pub batch_number: u32, + pub settlement_layer_id: SLChainId, + pub settlement_layer_tx_hash: H256, +} diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index ac88c9e58e9..90bb28c600a 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -282,7 +282,9 @@ impl From for abi::NewPriorityRequest { transaction: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&t.common_data.sender), - to: address_to_u256(&t.execute.contract_address), + // Unwrap used here because the contract address should always be present for L1 transactions. + // TODO: Consider restricting the contract address to not be optional in L1Tx. + to: address_to_u256(&t.execute.contract_address.unwrap()), gas_limit: t.common_data.gas_limit, gas_per_pubdata_byte_limit: t.common_data.gas_per_pubdata_limit, max_fee_per_gas: t.common_data.max_fee_per_gas, @@ -353,7 +355,7 @@ impl TryFrom for L1Tx { }; let execute = Execute { - contract_address: u256_to_account_address(&req.transaction.to), + contract_address: Some(u256_to_account_address(&req.transaction.to)), calldata: req.transaction.data, factory_deps: req.factory_deps, value: req.transaction.value, diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 5a527640752..036d2a7a036 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -153,7 +153,7 @@ pub struct L2Tx { impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -185,7 +185,7 @@ impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new_signed( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -232,7 +232,7 @@ impl L2Tx { } /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -324,7 +324,7 @@ impl From for TransactionRequest { let mut base_tx_req = TransactionRequest { nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: tx.common_data.fee.max_fee_per_gas, max_priority_fee_per_gas: None, @@ -400,7 +400,7 @@ impl From for api::Transaction { chain_id: U256::from(tx.common_data.extract_chain_id().unwrap_or_default()), nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: Some(tx.common_data.fee.max_fee_per_gas), max_priority_fee_per_gas: Some(tx.common_data.fee.max_priority_fee_per_gas), diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 402e16afd43..86b2e3f03d5 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -104,7 +104,7 @@ impl Eq for Transaction {} impl Transaction { /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -253,7 +253,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -284,7 +284,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PROTOCOL_UPGRADE_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -377,7 +377,7 @@ impl TryFrom for Transaction { unknown_type => anyhow::bail!("unknown tx type {unknown_type}"), }, execute: Execute { - contract_address: u256_to_account_address(&tx.to), + contract_address: Some(u256_to_account_address(&tx.to)), calldata: tx.data, factory_deps, value: tx.value, diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index c71e6e4206c..5f26b1d6a6a 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -108,8 +108,8 @@ impl CallRequestBuilder { } /// Set to address (None allowed for eth_estimateGas) - pub fn to(mut self, to: Address) -> Self { - self.call_request.to = Some(to); + pub fn to(mut self, to: Option
) -> Self { + self.call_request.to = to; self } @@ -817,10 +817,13 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; + // TODO: Remove this check when evm equivalence gets enabled + if value.to.is_none() { + return Err(SerializationTransactionError::ToAddressIsNull); + } + let mut tx = L2Tx::new( - value - .to - .ok_or(SerializationTransactionError::ToAddressIsNull)?, + value.to, value.input.0.clone(), nonce, fee, diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index 03762040a6b..c133261bc23 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -15,7 +15,7 @@ use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ExecuteSerde { - contract_address: Address, + contract_address: Option
, #[serde(with = "ZeroPrefixHexSerde")] calldata: Vec, value: U256, @@ -25,7 +25,7 @@ struct ExecuteSerde { /// `Execute` transaction executes a previously deployed smart contract in the L2 rollup. #[derive(Clone, Default, PartialEq)] pub struct Execute { - pub contract_address: Address, + pub contract_address: Option
, pub calldata: Vec, pub value: U256, /// Factory dependencies: list of contract bytecodes associated with the deploy transaction. @@ -72,7 +72,9 @@ impl EIP712TypedStructure for Execute { const TYPE_NAME: &'static str = "Transaction"; fn build_structure(&self, builder: &mut BUILDER) { - builder.add_member("to", &U256::from(self.contract_address.as_bytes())); + if let Some(contract_address) = self.contract_address { + builder.add_member("to", &contract_address); + } builder.add_member("value", &self.value); builder.add_member("data", &self.calldata.as_slice()); // Factory deps are not included into the transaction signature, since they are parsed from the diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 268fbd0b39e..598d17879b8 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -4,6 +4,7 @@ use std::time::Duration; use ::sentry::ClientInitGuard; +use anyhow::Context as _; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; pub use crate::{logs::Logs, opentelemetry::OpenTelemetry, sentry::Sentry}; @@ -126,8 +127,9 @@ impl ObservabilityBuilder { self } - /// Initializes the observability subsystem. - pub fn build(self) -> ObservabilityGuard { + /// Tries to initialize the observability subsystem. Returns an error if it's already initialized. + /// This is mostly useful in tests. + pub fn try_build(self) -> anyhow::Result { let logs = self.logs.unwrap_or_default(); logs.install_panic_hook(); @@ -151,14 +153,20 @@ impl ObservabilityBuilder { .with(logs_layer) .with(otlp_tracing_layer) .with(otlp_logging_layer) - .init(); + .try_init() + .context("failed installing global tracer / logger")?; let sentry_guard = self.sentry.map(|sentry| sentry.install()); - ObservabilityGuard { + Ok(ObservabilityGuard { otlp_tracing_provider, otlp_logging_provider, sentry_guard, - } + }) + } + + /// Initializes the observability subsystem. + pub fn build(self) -> ObservabilityGuard { + self.try_build().unwrap() } } diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 68a3769ee62..62bab29fea8 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, rc::Rc, sync::Arc}; +use std::{marker::PhantomData, rc::Rc, sync::Arc, time::Duration}; use anyhow::Context as _; use once_cell::sync::OnceCell; @@ -6,7 +6,8 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, - storage::{ReadStorage, StorageView}, + storage::{ReadStorage, StorageView, StorageViewStats}, + utils::DivergenceHandler, BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface, VmInterfaceHistoryEnabled, }, @@ -20,7 +21,7 @@ use super::{ executor::{Command, MainBatchExecutor}, metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, }; -use crate::shared::InteractionType; +use crate::shared::{InteractionType, STORAGE_METRICS}; /// The default implementation of [`BatchExecutorFactory`]. /// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). @@ -35,6 +36,8 @@ pub struct MainBatchExecutorFactory { /// regardless of its configuration, this flag should be set to `true`. optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, + observe_storage_metrics: bool, + divergence_handler: Option, } impl MainBatchExecutorFactory { @@ -43,9 +46,12 @@ impl MainBatchExecutorFactory { save_call_traces, optional_bytecode_compression, fast_vm_mode: FastVmMode::Old, + observe_storage_metrics: false, + divergence_handler: None, } } + /// Sets the fast VM mode used by this executor. pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { if !matches!(fast_vm_mode, FastVmMode::Old) { tracing::warn!( @@ -54,6 +60,18 @@ impl MainBatchExecutorFactory { } self.fast_vm_mode = fast_vm_mode; } + + /// Enables storage metrics reporting for this executor. Storage metrics will be reported for each transaction. + // The reason this isn't on by default is that storage metrics don't distinguish between "batch-executed" and "oneshot-executed" transactions; + // this optimally needs some improvements in `vise` (ability to add labels for groups of metrics). + pub fn observe_storage_metrics(&mut self) { + self.observe_storage_metrics = true; + } + + pub fn set_divergence_handler(&mut self, handler: DivergenceHandler) { + tracing::info!("Set VM divergence handler"); + self.divergence_handler = Some(handler); + } } impl BatchExecutorFactory for MainBatchExecutorFactory { @@ -70,6 +88,8 @@ impl BatchExecutorFactory for MainBatchExecu save_call_traces: self.save_call_traces, optional_bytecode_compression: self.optional_bytecode_compression, fast_vm_mode: self.fast_vm_mode, + observe_storage_metrics: self.observe_storage_metrics, + divergence_handler: self.divergence_handler.clone(), commands: commands_receiver, _storage: PhantomData, }; @@ -91,6 +111,8 @@ struct CommandReceiver { save_call_traces: bool, optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, + observe_storage_metrics: bool, + divergence_handler: Option, commands: mpsc::Receiver, _storage: PhantomData, } @@ -112,14 +134,28 @@ impl CommandReceiver { self.fast_vm_mode, ); let mut batch_finished = false; + let mut prev_storage_stats = StorageViewStats::default(); + + if let VmInstance::ShadowedVmFast(vm) = &mut vm { + if let Some(handler) = self.divergence_handler.take() { + vm.set_divergence_handler(handler); + } + } while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { let tx_hash = tx.hash(); - let result = self.execute_tx(*tx, &mut vm).with_context(|| { + let (result, latency) = self.execute_tx(*tx, &mut vm).with_context(|| { format!("fatal error executing transaction {tx_hash:?}") })?; + + if self.observe_storage_metrics { + let storage_stats = storage_view.borrow().stats(); + let stats_diff = storage_stats.saturating_sub(&prev_storage_stats); + STORAGE_METRICS.observe(&format!("Tx {tx_hash:?}"), latency, &stats_diff); + prev_storage_stats = storage_stats; + } if resp.send(result).is_err() { break; } @@ -152,11 +188,11 @@ impl CommandReceiver { .context("storage view leaked")? .into_inner(); if batch_finished { - let metrics = storage_view.metrics(); + let stats = storage_view.stats(); EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::GetValue] - .observe(metrics.time_spent_on_get_value); + .observe(stats.time_spent_on_get_value); EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] - .observe(metrics.time_spent_on_set_value); + .observe(stats.time_spent_on_set_value); } else { // State keeper can exit because of stop signal, so it's OK to exit mid-batch. tracing::info!("State keeper exited with an unfinished L1 batch"); @@ -168,7 +204,7 @@ impl CommandReceiver { &self, transaction: Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result<(BatchTransactionExecutionResult, Duration)> { // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot // was already removed), or that we build on top of it (in which case, it can be removed now). vm.pop_snapshot_no_rollback(); @@ -182,9 +218,8 @@ impl CommandReceiver { } else { self.execute_tx_in_vm(&transaction, vm)? }; - latency.observe(); - Ok(result) + Ok((result, latency.observe())) } fn rollback_last_tx(&self, vm: &mut VmInstance) { diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs index 8a89ce0a9a4..475463300f1 100644 --- a/core/lib/vm_executor/src/oneshot/metrics.rs +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -1,9 +1,9 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use zksync_multivm::interface::{storage::StorageViewMetrics, VmMemoryMetrics}; +use zksync_multivm::interface::{storage::StorageViewStats, VmMemoryMetrics}; -use crate::shared::InteractionType; +use crate::shared::STORAGE_METRICS; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "type", rename_all = "snake_case")] @@ -46,29 +46,11 @@ struct RuntimeContextMemoryMetrics { #[vise::register] static MEMORY_METRICS: vise::Global = vise::Global::new(); -const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_storage_interaction")] -struct RuntimeContextStorageMetrics { - #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] - amount: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration_per_unit: Family>, - #[metrics(buckets = Buckets::ZERO_TO_ONE)] - ratio: Histogram, -} - -#[vise::register] -static STORAGE_METRICS: vise::Global = vise::Global::new(); - pub(super) fn report_vm_memory_metrics( tx_id: &str, memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - storage_metrics: StorageViewMetrics, + storage_metrics: &StorageViewStats, ) { MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); @@ -88,56 +70,5 @@ pub(super) fn report_vm_memory_metrics( .full .observe(memory_metrics.full_size() + storage_metrics.cache_size); - let total_storage_invocations = storage_metrics.get_value_storage_invocations - + storage_metrics.set_value_storage_invocations; - let total_time_spent_in_storage = - storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; - - STORAGE_METRICS.amount[&InteractionType::Missed] - .observe(storage_metrics.storage_invocations_missed); - STORAGE_METRICS.amount[&InteractionType::GetValue] - .observe(storage_metrics.get_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::SetValue] - .observe(storage_metrics.set_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); - - STORAGE_METRICS.duration[&InteractionType::Missed] - .observe(storage_metrics.time_spent_on_storage_missed); - STORAGE_METRICS.duration[&InteractionType::GetValue] - .observe(storage_metrics.time_spent_on_get_value); - STORAGE_METRICS.duration[&InteractionType::SetValue] - .observe(storage_metrics.time_spent_on_set_value); - STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); - - if total_storage_invocations > 0 { - STORAGE_METRICS.duration_per_unit[&InteractionType::Total] - .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); - } - if storage_metrics.storage_invocations_missed > 0 { - let duration_per_unit = storage_metrics - .time_spent_on_storage_missed - .div_f64(storage_metrics.storage_invocations_missed as f64); - STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); - } - - STORAGE_METRICS - .ratio - .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); - - const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; - - if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - tracing::info!( - "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ - {} get_value_storage_invocations, {} set_value_storage_invocations, \ - vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ - (missed: {:?} get: {:?} set: {:?})", - storage_metrics.storage_invocations_missed, - storage_metrics.get_value_storage_invocations, - storage_metrics.set_value_storage_invocations, - storage_metrics.time_spent_on_storage_missed, - storage_metrics.time_spent_on_get_value, - storage_metrics.time_spent_on_set_value, - ); - } + STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_metrics); } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index cac8edfdfdf..1838381d2a0 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -284,7 +284,7 @@ impl VmSandbox { &tx_id, &memory_metrics, vm_execution_took, - self.storage_view.as_ref().borrow_mut().metrics(), + &self.storage_view.borrow().stats(), ); result } diff --git a/core/lib/vm_executor/src/shared.rs b/core/lib/vm_executor/src/shared.rs index 420005be05d..8ac4dce2e01 100644 --- a/core/lib/vm_executor/src/shared.rs +++ b/core/lib/vm_executor/src/shared.rs @@ -1,6 +1,9 @@ //! Functionality shared among different types of executors. -use vise::{EncodeLabelSet, EncodeLabelValue}; +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::storage::StorageViewStats; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "interaction", rename_all = "snake_case")] @@ -10,3 +13,79 @@ pub(crate) enum InteractionType { SetValue, Total, } + +const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_storage_interaction")] +pub(crate) struct RuntimeContextStorageMetrics { + #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] + amount: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration_per_unit: Family>, + #[metrics(buckets = Buckets::ZERO_TO_ONE)] + ratio: Histogram, +} + +impl RuntimeContextStorageMetrics { + pub fn observe( + &self, + op: &str, + total_vm_latency: Duration, + storage_metrics: &StorageViewStats, + ) { + const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; + + let total_storage_invocations = storage_metrics.get_value_storage_invocations + + storage_metrics.set_value_storage_invocations; + let total_time_spent_in_storage = + storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; + + self.amount[&InteractionType::Missed].observe(storage_metrics.storage_invocations_missed); + self.amount[&InteractionType::GetValue] + .observe(storage_metrics.get_value_storage_invocations); + self.amount[&InteractionType::SetValue] + .observe(storage_metrics.set_value_storage_invocations); + self.amount[&InteractionType::Total].observe(total_storage_invocations); + + self.duration[&InteractionType::Missed] + .observe(storage_metrics.time_spent_on_storage_missed); + self.duration[&InteractionType::GetValue].observe(storage_metrics.time_spent_on_get_value); + self.duration[&InteractionType::SetValue].observe(storage_metrics.time_spent_on_set_value); + self.duration[&InteractionType::Total].observe(total_time_spent_in_storage); + + if total_storage_invocations > 0 { + self.duration_per_unit[&InteractionType::Total] + .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); + } + if storage_metrics.storage_invocations_missed > 0 { + let duration_per_unit = storage_metrics + .time_spent_on_storage_missed + .div_f64(storage_metrics.storage_invocations_missed as f64); + self.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); + } + + self.ratio + .observe(total_time_spent_in_storage.as_secs_f64() / total_vm_latency.as_secs_f64()); + + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + tracing::info!( + "{op} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ + {} get_value_storage_invocations, {} set_value_storage_invocations, \ + vm execution took {total_vm_latency:?}, storage interaction took {total_time_spent_in_storage:?} \ + (missed: {:?} get: {:?} set: {:?})", + storage_metrics.storage_invocations_missed, + storage_metrics.get_value_storage_invocations, + storage_metrics.set_value_storage_invocations, + storage_metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_set_value, + ); + } + } +} + +#[vise::register] +pub(crate) static STORAGE_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index 5be9016e8fb..861ee0649b3 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -92,7 +92,15 @@ pub struct L1BatchParamsProvider { } impl L1BatchParamsProvider { - pub fn new() -> Self { + /// Creates a new provider. + pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { + let mut this = Self::uninitialized(); + this.initialize(storage).await?; + Ok(this) + } + + /// Creates an uninitialized provider. Before use, it must be [`initialize`](Self::initialize())d. + pub fn uninitialized() -> Self { Self { snapshot: None } } @@ -333,4 +341,34 @@ impl L1BatchParamsProvider { }, )) } + + /// Combines [`Self::load_first_l2_block_in_batch()`] and [Self::load_l1_batch_params()`]. Returns `Ok(None)` + /// iff the requested batch doesn't have any persisted blocks. + /// + /// Prefer using this method unless you need to manipulate / inspect the first block in the batch. + pub async fn load_l1_batch_env( + &self, + storage: &mut Connection<'_, Core>, + number: L1BatchNumber, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result> { + let first_l2_block = self + .load_first_l2_block_in_batch(storage, number) + .await + .with_context(|| format!("failed loading first L2 block for L1 batch #{number}"))?; + let Some(first_l2_block) = first_l2_block else { + return Ok(None); + }; + + self.load_l1_batch_params( + storage, + &first_l2_block, + validation_computational_gas_limit, + chain_id, + ) + .await + .with_context(|| format!("failed loading params for L1 batch #{number}")) + .map(Some) + } } diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 694576dca3b..8bff19ddc47 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -18,6 +18,7 @@ zksync_types.workspace = true anyhow.workspace = true async-trait.workspace = true hex.workspace = true +pretty_assertions.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 2b30f82e0ce..645e3e7c856 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -37,10 +37,11 @@ pub use crate::{ }, tracer, }, - vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled}, + vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts}, }; pub mod executor; pub mod storage; mod types; +pub mod utils; mod vm; diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 9b92ef8b770..6cdcd33db68 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -6,7 +6,7 @@ pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, snapshot::{StorageSnapshot, StorageWithSnapshot}, - view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewMetrics}, + view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewStats}, }; mod in_memory; diff --git a/core/lib/vm_interface/src/storage/snapshot.rs b/core/lib/vm_interface/src/storage/snapshot.rs index a0175ff478a..78b57a31f13 100644 --- a/core/lib/vm_interface/src/storage/snapshot.rs +++ b/core/lib/vm_interface/src/storage/snapshot.rs @@ -12,7 +12,7 @@ use super::ReadStorage; /// In contrast, `StorageSnapshot` cannot be modified once created and is intended to represent a complete or almost complete snapshot /// for a particular VM execution. It can serve as a preloaded cache for a certain [`ReadStorage`] implementation /// that significantly reduces the number of storage accesses. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StorageSnapshot { // `Option` encompasses entire map value for more efficient serialization storage: HashMap>, @@ -60,6 +60,36 @@ impl StorageSnapshot { } } +/// When used as a storage, a snapshot is assumed to be *complete*; [`ReadStorage`] methods will panic when called +/// with storage slots not present in the snapshot. +impl ReadStorage for StorageSnapshot { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let entry = self + .storage + .get(&key.hashed_key()) + .unwrap_or_else(|| panic!("attempted to read from unknown storage slot: {key:?}")); + entry.unwrap_or_default().0 + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let entry = self.storage.get(&key.hashed_key()).unwrap_or_else(|| { + panic!("attempted to check initialness for unknown storage slot: {key:?}") + }); + entry.is_none() + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.factory_deps.get(&hash).map(|bytes| bytes.0.clone()) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let entry = self.storage.get(&key.hashed_key()).unwrap_or_else(|| { + panic!("attempted to get enum index for unknown storage slot: {key:?}") + }); + entry.map(|(_, idx)| idx) + } +} + /// [`StorageSnapshot`] wrapper implementing [`ReadStorage`] trait. Created using [`with_fallback()`](StorageSnapshot::with_fallback()). /// /// # Why fallback? diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index 101f5c82f49..ec9267609e2 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -10,9 +10,9 @@ use zksync_types::{StorageKey, StorageValue, H256}; use super::{ReadStorage, StoragePtr, WriteStorage}; -/// Metrics for [`StorageView`]. +/// Statistics for [`StorageView`]. #[derive(Debug, Default, Clone, Copy)] -pub struct StorageViewMetrics { +pub struct StorageViewStats { /// Estimated byte size of the cache used by the `StorageView`. pub cache_size: usize, /// Number of read / write ops for which the value was read from the underlying storage. @@ -29,6 +29,33 @@ pub struct StorageViewMetrics { pub time_spent_on_set_value: Duration, } +impl StorageViewStats { + /// Subtracts two sets of statistics. This can be used to measure increment between these stats and older stats for the same VM. + pub fn saturating_sub(&self, older: &Self) -> Self { + Self { + cache_size: self.cache_size.saturating_sub(older.cache_size), + storage_invocations_missed: self + .storage_invocations_missed + .saturating_sub(older.storage_invocations_missed), + get_value_storage_invocations: self + .get_value_storage_invocations + .saturating_sub(older.get_value_storage_invocations), + set_value_storage_invocations: self + .set_value_storage_invocations + .saturating_sub(older.set_value_storage_invocations), + time_spent_on_storage_missed: self + .time_spent_on_storage_missed + .saturating_sub(older.time_spent_on_storage_missed), + time_spent_on_get_value: self + .time_spent_on_get_value + .saturating_sub(older.time_spent_on_get_value), + time_spent_on_set_value: self + .time_spent_on_set_value + .saturating_sub(older.time_spent_on_set_value), + } + } +} + /// `StorageView` is a buffer for `StorageLog`s between storage and transaction execution code. /// In order to commit transactions logs should be submitted to the underlying storage /// after a transaction is executed. @@ -46,7 +73,7 @@ pub struct StorageView { // Used for caching and to get the list/count of modified keys modified_storage_keys: HashMap, cache: StorageViewCache, - metrics: StorageViewMetrics, + stats: StorageViewStats, } /// `StorageViewCache` is a struct for caching storage reads and `contains_key()` checks. @@ -112,7 +139,7 @@ impl StorageView { read_storage_keys: HashMap::new(), initial_writes: HashMap::new(), }, - metrics: StorageViewMetrics::default(), + stats: StorageViewStats::default(), } } @@ -126,8 +153,8 @@ impl StorageView { cached_value.copied().unwrap_or_else(|| { let value = self.storage_handle.read_value(key); self.cache.read_storage_keys.insert(*key, value); - self.metrics.time_spent_on_storage_missed += started_at.elapsed(); - self.metrics.storage_invocations_missed += 1; + self.stats.time_spent_on_storage_missed += started_at.elapsed(); + self.stats.storage_invocations_missed += 1; value }) } @@ -138,11 +165,11 @@ impl StorageView { + self.cache.read_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() } - /// Returns the current metrics. - pub fn metrics(&self) -> StorageViewMetrics { - StorageViewMetrics { + /// Returns the current storage access stats. + pub fn stats(&self) -> StorageViewStats { + StorageViewStats { cache_size: self.cache_size(), - ..self.metrics + ..self.stats } } @@ -155,7 +182,7 @@ impl StorageView { impl ReadStorage for StorageView { fn read_value(&mut self, key: &StorageKey) -> StorageValue { let started_at = Instant::now(); - self.metrics.get_value_storage_invocations += 1; + self.stats.get_value_storage_invocations += 1; let value = self.get_value_no_log(key); tracing::trace!( @@ -166,7 +193,7 @@ impl ReadStorage for StorageView { key.key() ); - self.metrics.time_spent_on_get_value += started_at.elapsed(); + self.stats.time_spent_on_get_value += started_at.elapsed(); value } @@ -198,7 +225,7 @@ impl WriteStorage for StorageView { fn set_value(&mut self, key: StorageKey, value: StorageValue) -> StorageValue { let started_at = Instant::now(); - self.metrics.set_value_storage_invocations += 1; + self.stats.set_value_storage_invocations += 1; let original = self.get_value_no_log(&key); tracing::trace!( @@ -210,7 +237,7 @@ impl WriteStorage for StorageView { key.key() ); self.modified_storage_keys.insert(key, value); - self.metrics.time_spent_on_set_value += started_at.elapsed(); + self.stats.time_spent_on_set_value += started_at.elapsed(); original } @@ -220,7 +247,7 @@ impl WriteStorage for StorageView { } fn missed_storage_invocations(&self) -> usize { - self.metrics.storage_invocations_missed + self.stats.storage_invocations_missed } } @@ -245,8 +272,8 @@ impl ReadStorage for ImmutableStorageView { cached_value.copied().unwrap_or_else(|| { let value = this.storage_handle.read_value(key); this.cache.read_storage_keys.insert(*key, value); - this.metrics.time_spent_on_storage_missed += started_at.elapsed(); - this.metrics.storage_invocations_missed += 1; + this.stats.time_spent_on_storage_missed += started_at.elapsed(); + this.stats.storage_invocations_missed += 1; value }) } @@ -289,7 +316,7 @@ mod test { assert_eq!(storage_view.read_value(&key), value); assert!(storage_view.is_write_initial(&key)); // key was inserted during the view lifetime - assert_eq!(storage_view.metrics().storage_invocations_missed, 1); + assert_eq!(storage_view.stats().storage_invocations_missed, 1); // ^ We should only read a value at `key` once, and then used the cached value. raw_storage.set_value(key, value); @@ -307,10 +334,10 @@ mod test { assert_eq!(storage_view.read_value(&new_key), new_value); assert!(storage_view.is_write_initial(&new_key)); - let metrics = storage_view.metrics(); - assert_eq!(metrics.storage_invocations_missed, 2); - assert_eq!(metrics.get_value_storage_invocations, 3); - assert_eq!(metrics.set_value_storage_invocations, 2); + let stats = storage_view.stats(); + assert_eq!(stats.storage_invocations_missed, 2); + assert_eq!(stats.get_value_storage_invocations, 3); + assert_eq!(stats.set_value_storage_invocations, 2); } #[test] diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs new file mode 100644 index 00000000000..f7dce38ee89 --- /dev/null +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -0,0 +1,249 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; + +use crate::{ + storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + VmMemoryMetrics, VmTrackingContracts, +}; + +fn create_storage_snapshot( + storage: &StoragePtr>, + used_contract_hashes: Vec, +) -> StorageSnapshot { + let mut storage = storage.borrow_mut(); + let storage_cache = storage.cache(); + let mut storage_slots: HashMap<_, _> = storage_cache + .read_storage_keys() + .into_iter() + .map(|(key, value)| { + let enum_index = storage.get_enumeration_index(&key); + let value_and_index = enum_index.map(|idx| (value, idx)); + (key.hashed_key(), value_and_index) + }) + .collect(); + + // Normally, all writes are internally read in order to calculate their gas costs, so the code below + // is defensive programming. + for (key, _) in storage_cache.initial_writes() { + let hashed_key = key.hashed_key(); + if storage_slots.contains_key(&hashed_key) { + continue; + } + + let enum_index = storage.get_enumeration_index(&key); + let value_and_index = enum_index.map(|idx| (storage.read_value(&key), idx)); + storage_slots.insert(hashed_key, value_and_index); + } + + let factory_deps = used_contract_hashes + .into_iter() + .filter_map(|hash| Some((hash, storage.load_factory_dep(hash)?))) + .collect(); + + StorageSnapshot::new(storage_slots, factory_deps) +} + +/// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VmDump { + pub l1_batch_env: L1BatchEnv, + pub system_env: SystemEnv, + pub l2_blocks: Vec, + pub storage: StorageSnapshot, +} + +impl VmDump { + pub fn l1_batch_number(&self) -> L1BatchNumber { + self.l1_batch_env.number + } + + /// Plays back this dump on the specified VM. + pub fn play_back>>(self) -> Vm { + self.play_back_custom(Vm::new) + } + + /// Plays back this dump on a VM created using the provided closure. + #[doc(hidden)] // too low-level + pub fn play_back_custom( + self, + create_vm: impl FnOnce(L1BatchEnv, SystemEnv, StoragePtr>) -> Vm, + ) -> Vm { + let storage = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = create_vm(self.l1_batch_env, self.system_env, storage); + + for (i, l2_block) in self.l2_blocks.into_iter().enumerate() { + if i > 0 { + // First block is already set. + vm.start_new_l2_block(L2BlockEnv { + number: l2_block.number.0, + timestamp: l2_block.timestamp, + prev_block_hash: l2_block.prev_block_hash, + max_virtual_blocks_to_create: l2_block.virtual_blocks, + }); + } + + for tx in l2_block.txs { + let tx_hash = tx.hash(); + let (compression_result, _) = + vm.execute_transaction_with_bytecode_compression(tx, true); + if let Err(err) = compression_result { + panic!("Failed compressing bytecodes for transaction {tx_hash:?}: {err}"); + } + } + } + vm.finish_batch(); + vm + } +} + +#[derive(Debug, Clone, Copy)] +struct L2BlocksSnapshot { + block_count: usize, + tx_count_in_last_block: usize, +} + +/// VM wrapper that can create [`VmDump`]s during execution. +#[derive(Debug)] +pub(super) struct DumpingVm { + storage: StoragePtr>, + inner: Vm, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + l2_blocks: Vec, + l2_blocks_snapshot: Option, +} + +impl DumpingVm { + fn last_block_mut(&mut self) -> &mut L2BlockExecutionData { + self.l2_blocks.last_mut().unwrap() + } + + fn record_transaction(&mut self, tx: Transaction) { + self.last_block_mut().txs.push(tx); + } + + pub fn dump_state(&self) -> VmDump { + VmDump { + l1_batch_env: self.l1_batch_env.clone(), + system_env: self.system_env.clone(), + l2_blocks: self.l2_blocks.clone(), + storage: create_storage_snapshot(&self.storage, self.inner.used_contract_hashes()), + } + } +} + +impl VmInterface for DumpingVm { + type TracerDispatcher = Vm::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + self.record_transaction(tx.clone()); + self.inner.push_transaction(tx); + } + + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inner.inspect(dispatcher, execution_mode) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.l2_blocks.push(L2BlockExecutionData { + number: L2BlockNumber(l2_block_env.number), + timestamp: l2_block_env.timestamp, + prev_block_hash: l2_block_env.prev_block_hash, + virtual_blocks: l2_block_env.max_virtual_blocks_to_create, + txs: vec![], + }); + self.inner.start_new_l2_block(l2_block_env); + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + self.record_transaction(tx.clone()); + self.inner + .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) + } + + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + self.inner.record_vm_memory_metrics() + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + self.inner.finish_batch() + } +} + +impl VmInterfaceHistoryEnabled for DumpingVm +where + S: ReadStorage, + Vm: VmInterfaceHistoryEnabled + VmTrackingContracts, +{ + fn make_snapshot(&mut self) { + self.l2_blocks_snapshot = Some(L2BlocksSnapshot { + block_count: self.l2_blocks.len(), + tx_count_in_last_block: self.last_block_mut().txs.len(), + }); + self.inner.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + self.inner.rollback_to_the_latest_snapshot(); + let snapshot = self + .l2_blocks_snapshot + .take() + .expect("rollback w/o snapshot"); + self.l2_blocks.truncate(snapshot.block_count); + assert_eq!( + self.l2_blocks.len(), + snapshot.block_count, + "L2 blocks were removed after creating a snapshot" + ); + self.last_block_mut() + .txs + .truncate(snapshot.tx_count_in_last_block); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot_no_rollback(); + self.l2_blocks_snapshot = None; + } +} + +impl VmFactory> for DumpingVm +where + S: ReadStorage, + Vm: VmFactory> + VmTrackingContracts, +{ + fn new( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let inner = Vm::new(l1_batch_env.clone(), system_env.clone(), storage.clone()); + let first_block = L2BlockExecutionData { + number: L2BlockNumber(l1_batch_env.first_l2_block.number), + timestamp: l1_batch_env.first_l2_block.timestamp, + prev_block_hash: l1_batch_env.first_l2_block.prev_block_hash, + virtual_blocks: l1_batch_env.first_l2_block.max_virtual_blocks_to_create, + txs: vec![], + }; + Self { + l1_batch_env, + system_env, + l2_blocks: vec![first_block], + l2_blocks_snapshot: None, + storage, + inner, + } + } +} diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs new file mode 100644 index 00000000000..80a51c7b144 --- /dev/null +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -0,0 +1,9 @@ +//! Miscellaneous VM utils. + +pub use self::{ + dump::VmDump, + shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, +}; + +mod dump; +mod shadow; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs new file mode 100644 index 00000000000..7dfe31f6b68 --- /dev/null +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -0,0 +1,475 @@ +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + fmt, + sync::Arc, +}; + +use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; + +use super::dump::{DumpingVm, VmDump}; +use crate::{ + storage::{ReadStorage, StoragePtr, StorageView}, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, VmTrackingContracts, +}; + +/// Handler for VM divergences. +#[derive(Clone)] +pub struct DivergenceHandler(Arc); + +impl fmt::Debug for DivergenceHandler { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_tuple("DivergenceHandler") + .field(&"_") + .finish() + } +} + +/// Default handler that panics. +impl Default for DivergenceHandler { + fn default() -> Self { + Self(Arc::new(|err, _| { + // There's no easy way to output the VM dump; it's too large to be logged. + panic!("{err}"); + })) + } +} + +impl DivergenceHandler { + /// Creates a new handler from the provided closure. + pub fn new(f: impl Fn(DivergenceErrors, VmDump) + Send + Sync + 'static) -> Self { + Self(Arc::new(f)) + } + + fn handle(&self, err: DivergenceErrors, dump: VmDump) { + self.0(err, dump); + } +} + +#[derive(Debug)] +struct VmWithReporting { + vm: Shadow, + divergence_handler: DivergenceHandler, +} + +impl VmWithReporting { + fn report(self, err: DivergenceErrors, dump: VmDump) { + tracing::error!("{err}"); + self.divergence_handler.handle(err, dump); + tracing::warn!( + "New VM is dropped; following VM actions will be executed only on the main VM" + ); + } +} + +/// Shadowed VM that executes 2 VMs for each operation and compares their outputs. +/// +/// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), +/// after which the VM drops the shadowed VM (since it's assumed that its state can contain arbitrary garbage at this point). +#[derive(Debug)] +pub struct ShadowVm { + main: DumpingVm, + shadow: RefCell>>, +} + +impl ShadowVm +where + S: ReadStorage, + Main: VmTrackingContracts, + Shadow: VmInterface, +{ + /// Sets the divergence handler to be used by this VM. + pub fn set_divergence_handler(&mut self, handler: DivergenceHandler) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.divergence_handler = handler; + } + } + + /// Mutable ref is not necessary, but it automatically drops potential borrows. + fn report(&mut self, err: DivergenceErrors) { + self.report_shared(err); + } + + /// The caller is responsible for dropping any `shadow` borrows beforehand. + fn report_shared(&self, err: DivergenceErrors) { + self.shadow + .take() + .unwrap() + .report(err, self.main.dump_state()); + } + + /// Dumps the current VM state. + pub fn dump_state(&self) -> VmDump { + self.main.dump_state() + } +} + +impl ShadowVm +where + S: ReadStorage, + Main: VmFactory> + VmTrackingContracts, + Shadow: VmInterface, +{ + /// Creates a VM with a custom shadow storage. + pub fn with_custom_shadow( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + shadow_storage: StoragePtr, + ) -> Self + where + Shadow: VmFactory, + { + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); + let shadow = VmWithReporting { + vm: shadow, + divergence_handler: DivergenceHandler::default(), + }; + Self { + main, + shadow: RefCell::new(Some(shadow)), + } + } +} + +impl VmFactory> for ShadowVm +where + S: ReadStorage, + Main: VmFactory> + VmTrackingContracts, + Shadow: VmFactory>, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + Self::with_custom_shadow(batch_env, system_env, storage.clone(), storage) + } +} + +/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! +impl VmInterface for ShadowVm +where + S: ReadStorage, + Main: VmTrackingContracts, + Shadow: VmInterface, +{ + type TracerDispatcher =
::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.push_transaction(tx.clone()); + } + self.main.push_transaction(tx); + } + + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let main_result = self.main.inspect(dispatcher, execution_mode); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_result = shadow + .vm + .inspect(Shadow::TracerDispatcher::default(), execution_mode); + let mut errors = DivergenceErrors::new(); + errors.check_results_match(&main_result, &shadow_result); + + if let Err(err) = errors.into_result() { + let ctx = format!("executing VM with mode {execution_mode:?}"); + self.report(err.context(ctx)); + } + } + main_result + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.main.start_new_l2_block(l2_block_env); + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.start_new_l2_block(l2_block_env); + } + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { + let tx_hash = tx.hash(); + let (main_bytecodes_result, main_tx_result) = self + .main + .inspect_transaction_with_bytecode_compression(tracer, tx.clone(), with_compression); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_bytecodes_result = + main_bytecodes_result.map(|bytecodes| bytecodes.into_owned().into()); + + if let Some(shadow) = self.shadow.get_mut() { + let shadow_result = shadow.vm.inspect_transaction_with_bytecode_compression( + Shadow::TracerDispatcher::default(), + tx, + with_compression, + ); + let mut errors = DivergenceErrors::new(); + errors.check_results_match(&main_tx_result, &shadow_result.1); + if let Err(err) = errors.into_result() { + let ctx = format!( + "inspecting transaction {tx_hash:?}, with_compression={with_compression:?}" + ); + self.report(err.context(ctx)); + } + } + (main_bytecodes_result, main_tx_result) + } + + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + self.main.record_vm_memory_metrics() + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_batch = shadow.vm.finish_batch(); + let mut errors = DivergenceErrors::new(); + errors.check_results_match( + &main_batch.block_tip_execution_result, + &shadow_batch.block_tip_execution_result, + ); + errors.check_final_states_match( + &main_batch.final_execution_state, + &shadow_batch.final_execution_state, + ); + errors.check_match( + "final_bootloader_memory", + &main_batch.final_bootloader_memory, + &shadow_batch.final_bootloader_memory, + ); + errors.check_match( + "pubdata_input", + &main_batch.pubdata_input, + &shadow_batch.pubdata_input, + ); + errors.check_match( + "state_diffs", + &main_batch.state_diffs, + &shadow_batch.state_diffs, + ); + + if let Err(err) = errors.into_result() { + self.report(err); + } + } + main_batch + } +} + +#[derive(Debug)] +pub struct DivergenceErrors { + divergences: Vec, + context: Option, +} + +impl fmt::Display for DivergenceErrors { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(context) = &self.context { + write!( + formatter, + "VM execution diverged: {context}: [{}]", + self.divergences.join(", ") + ) + } else { + write!( + formatter, + "VM execution diverged: [{}]", + self.divergences.join(", ") + ) + } + } +} + +impl DivergenceErrors { + fn new() -> Self { + Self { + divergences: vec![], + context: None, + } + } + + fn context(mut self, context: String) -> Self { + self.context = Some(context); + self + } + + fn check_results_match( + &mut self, + main_result: &VmExecutionResultAndLogs, + shadow_result: &VmExecutionResultAndLogs, + ) { + self.check_match("result", &main_result.result, &shadow_result.result); + self.check_match( + "logs.events", + &main_result.logs.events, + &shadow_result.logs.events, + ); + self.check_match( + "logs.system_l2_to_l1_logs", + &main_result.logs.system_l2_to_l1_logs, + &shadow_result.logs.system_l2_to_l1_logs, + ); + self.check_match( + "logs.user_l2_to_l1_logs", + &main_result.logs.user_l2_to_l1_logs, + &shadow_result.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); + self.check_match("logs.storage_logs", &main_logs, &shadow_logs); + self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + self.check_match( + "statistics.circuit_statistic", + &main_result.statistics.circuit_statistic, + &shadow_result.statistics.circuit_statistic, + ); + self.check_match( + "gas_remaining", + &main_result.statistics.gas_remaining, + &shadow_result.statistics.gas_remaining, + ); + } + + fn check_match(&mut self, context: &str, main: &T, shadow: &T) { + if main != shadow { + let comparison = pretty_assertions::Comparison::new(main, shadow); + let err = format!("`{context}` mismatch: {comparison}"); + self.divergences.push(err); + } + } + + fn check_final_states_match( + &mut self, + main: &CurrentExecutionState, + shadow: &CurrentExecutionState, + ) { + self.check_match("final_state.events", &main.events, &shadow.events); + self.check_match( + "final_state.user_l2_to_l1_logs", + &main.user_l2_to_l1_logs, + &shadow.user_l2_to_l1_logs, + ); + self.check_match( + "final_state.system_logs", + &main.system_logs, + &shadow.system_logs, + ); + self.check_match( + "final_state.storage_refunds", + &main.storage_refunds, + &shadow.storage_refunds, + ); + self.check_match( + "final_state.pubdata_costs", + &main.pubdata_costs, + &shadow.pubdata_costs, + ); + self.check_match( + "final_state.used_contract_hashes", + &main.used_contract_hashes.iter().collect::>(), + &shadow.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); + let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); + self.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + } + + fn gather_logs(logs: &[StorageLog]) -> BTreeMap { + logs.iter() + .filter(|log| log.is_write()) + .map(|log| (log.key, log)) + .collect() + } + + fn into_result(self) -> Result<(), Self> { + if self.divergences.is_empty() { + Ok(()) + } else { + Err(self) + } + } +} + +// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them +// inside the VM, hence this auxiliary struct. +#[derive(PartialEq)] +struct UniqueStorageLogs(BTreeMap); + +impl fmt::Debug for UniqueStorageLogs { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = formatter.debug_map(); + for log in self.0.values() { + map.entry( + &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), + &format!("{:?} -> {:?}", log.previous_value, log.log.value), + ); + } + map.finish() + } +} + +impl UniqueStorageLogs { + fn new(logs: &[StorageLogWithPreviousValue]) -> Self { + let mut unique_logs = BTreeMap::::new(); + for log in logs { + if !log.log.is_write() { + continue; + } + if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { + existing_log.log.value = log.log.value; + } else { + unique_logs.insert(log.log.key, *log); + } + } + + // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. + unique_logs.retain(|_, log| log.previous_value != log.log.value); + Self(unique_logs) + } +} + +impl VmInterfaceHistoryEnabled for ShadowVm +where + S: ReadStorage, + Main: VmInterfaceHistoryEnabled + VmTrackingContracts, + Shadow: VmInterfaceHistoryEnabled, +{ + fn make_snapshot(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.make_snapshot(); + } + self.main.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.rollback_to_the_latest_snapshot(); + } + self.main.rollback_to_the_latest_snapshot(); + } + + fn pop_snapshot_no_rollback(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.pop_snapshot_no_rollback(); + } + self.main.pop_snapshot_no_rollback(); + } +} diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index f70be52bd86..a380f0659e6 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,7 +11,7 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. -use zksync_types::Transaction; +use zksync_types::{Transaction, H256}; use crate::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, @@ -103,3 +103,9 @@ pub trait VmInterfaceHistoryEnabled: VmInterface { /// (i.e., the VM must not panic in this case). fn pop_snapshot_no_rollback(&mut self); } + +/// VM that tracks decommitment of bytecodes during execution. This is required to create a [`VmDump`]. +pub trait VmTrackingContracts: VmInterface { + /// Returns hashes of all decommitted bytecodes. + fn used_contract_hashes(&self) -> Vec; +} diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 4ef3310368a..b27bf262877 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -6,8 +6,8 @@ use jsonrpsee::proc_macros::rpc; use zksync_types::{ api::{ state_override::StateOverride, BlockDetails, BridgeAddresses, L1BatchDetails, - L2ToL1LogProof, LeafAggProof, Proof, ProtocolVersion, TransactionDetailedResult, - TransactionDetails, + L1ProcessingDetails, L2ToL1LogProof, LeafAggProof, Proof, ProtocolVersion, + TransactionDetailedResult, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -123,6 +123,12 @@ pub trait ZksNamespace { async fn get_l1_batch_details(&self, batch: L1BatchNumber) -> RpcResult>; + #[method(name = "getL1ProcessingDetails")] + async fn get_l1_processing_details( + &self, + batch: L1BatchNumber, + ) -> RpcResult>; + #[method(name = "getBytecodeByHash")] async fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult>>; diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index ba99354657d..3c69352cfa5 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -236,7 +236,7 @@ fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 00cd4393021..c32a5e10ad0 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -4,8 +4,8 @@ use zksync_multivm::interface::VmEvent; use zksync_types::{ api::{ state_override::StateOverride, ApiStorageLog, BlockDetails, BridgeAddresses, - L1BatchDetails, L2ToL1LogProof, LeafAggProof, Log, Proof, ProtocolVersion, - TransactionDetailedResult, TransactionDetails, + L1BatchDetails, L1ProcessingDetails, L2ToL1LogProof, LeafAggProof, Log, Proof, + ProtocolVersion, TransactionDetailedResult, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -163,6 +163,15 @@ impl ZksNamespaceServer for ZksNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn get_l1_processing_details( + &self, + batch_number: L1BatchNumber, + ) -> RpcResult> { + self.get_l1_processing_details_impl(batch_number) + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult>> { self.get_bytecode_by_hash_impl(hash) .await diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index a80340c5262..09f1ead54a5 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -11,8 +11,8 @@ use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ state_override::StateOverride, BlockDetails, BridgeAddresses, ChainAggProof, GetLogsFilter, - L1BatchDetails, L2ToL1LogProof, LeafAggProof, LeafChainProof, Proof, ProtocolVersion, - StorageProof, TransactionDetails, + L1BatchDetails, L1ProcessingDetails, L2ToL1LogProof, LeafAggProof, LeafChainProof, Proof, + ProtocolVersion, StorageProof, TransactionDetails, }, ethabi, fee::Fee, @@ -32,7 +32,7 @@ use zksync_utils::{address_to_h256, h256_to_u256, u256_to_h256}; use zksync_web3_decl::{ client::{Client, L2}, error::Web3Error, - namespaces::ZksNamespaceClient, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, types::{Address, Token, H256}, }; @@ -352,22 +352,19 @@ impl ZksNamespace { let (root, mut proof) = MiniMerkleTree::new(merkle_tree_leaves, Some(tree_size)) .merkle_root_and_path(l1_log_index); + // FIXME Definitely refactor all of it // For now it is always 0 - let aggregated_root = batch_meta.metadata.aggregation_root; + let aggregated_root = batch_meta.metadata.aggregation_root.unwrap(); let final_root = KeccakHasher.compress(&root, &aggregated_root); proof.push(aggregated_root); - println!("Trying to get the final proof! {}", l1_batch_number); + println!("\n\nTrying to get the final proof! {}\n\n", l1_batch_number); - // FIXME Definitely refactor all of it - const EXPECTED_SYNC_LAYER_CHAIN_ID: u64 = 270; + const EXPECTED_SYNC_LAYER_CHAIN_ID: u64 = 505; let mut log_leaf_proof = LogLeafProof::new(proof); - let settlement_layer: u64 = std::env::var("ETH_CLIENT_CHAIN_ID") - .unwrap() - .parse() - .unwrap(); + let settlement_layer: u64 = self.state.api_config.sl_chain_id.0; if settlement_layer == EXPECTED_SYNC_LAYER_CHAIN_ID { println!("\nI am on sync layer!!\n"); @@ -376,7 +373,10 @@ impl ZksNamespace { // Create a client for pinging the RPC. let client: Client = Client::http( - std::env::var("GATEWAY_API_WEB3_JSON_RPC_HTTP_URL") + self.state + .api_config + .settlement_layer_url + .clone() .unwrap() .parse() .unwrap(), @@ -517,14 +517,14 @@ impl ZksNamespace { let Some(l1_batch_number_with_agg_batch) = l1_batch_number_with_agg_batch else { return Ok(None); }; - println!("hee3"); + println!("\n\nhee3 -- {}\n\n", l1_batch_number_with_agg_batch); let local_msg_root = storage .blocks_dal() .get_l1_batch_metadata(L1BatchNumber(l1_batch_number_with_agg_batch)) .await .map_err(DalError::generalize)? - .map(|metadata| metadata.metadata.local_root); + .and_then(|metadata| metadata.metadata.local_root); let Some(local_msg_root) = local_msg_root else { return Ok(None); @@ -539,14 +539,23 @@ impl ZksNamespace { l1_batch_number_with_agg_batch, ) .await?; - let Some((leaf_proof, batch_added_block_number)) = batch_proof else { + let Some(leaf_proof) = batch_proof else { + return Ok(None); + }; + let correct_l2_block_number = storage + .blocks_dal() + .get_l2_block_range_of_l1_batch(L1BatchNumber(l1_batch_number_with_agg_batch)) + .await + .map_err(DalError::generalize)?; + let Some((_, max_l2_block_number)) = correct_l2_block_number else { return Ok(None); }; + let chain_proof = self .get_chain_inclusion_proof_impl( message_root_addr, searched_chain_id, - batch_added_block_number, + max_l2_block_number, local_msg_root, ) .await?; @@ -569,7 +578,7 @@ impl ZksNamespace { searched_chain_id: u32, latest_sealed_block_number: L2BlockNumber, l1_batch_number_with_agg_batch: u32, - ) -> Result, Web3Error> { + ) -> Result, Web3Error> { let mut storage = self.state.acquire_connection().await?; // FIXME: move as api config @@ -585,7 +594,8 @@ impl ZksNamespace { addresses: vec![message_root_addr], topics: vec![(1, vec![*MESSAGE_ROOT_ADDED_CHAIN_BATCH_ROOT_EVENT])], }, - self.state.api_config.req_entities_limit, + // FIXME: this is a bit inefficient, better ways need to be created + i32::MAX as usize, ) .await .map_err(DalError::generalize)?; @@ -597,7 +607,6 @@ impl ZksNamespace { let mut chain_id_merkle_tree = MiniMerkleTree::<[u8; 96], KeccakHasher>::new(Vec::<[u8; 96]>::new().into_iter(), None); let mut cnt = 0; - let mut batch_added_block_number = None; for add_batch_log in add_batch_logs.iter() { let Some(batch_num) = add_batch_log.l1_batch_number else { @@ -619,8 +628,6 @@ impl ZksNamespace { if batch_number.as_u32() == searched_batch_number.0 { println!("relevant batch found! {:#?}", add_batch_log); batch_leaf_proof_mask = Some(cnt); - batch_added_block_number = - Some(L2BlockNumber(add_batch_log.block_number.unwrap().as_u32())); } println!("appended log: {:#?}", add_batch_log); @@ -645,23 +652,17 @@ impl ZksNamespace { searched_chain_id, batch_leaf_proof_mask, root ); - if batch_added_block_number.is_none() { - return Ok(None); - } - Ok(Some(( - LeafChainProof { - batch_leaf_proof, - batch_leaf_proof_mask: batch_leaf_proof_mask.into(), - }, - batch_added_block_number.unwrap(), - ))) + Ok(Some(LeafChainProof { + batch_leaf_proof, + batch_leaf_proof_mask: batch_leaf_proof_mask.into(), + })) } pub async fn get_chain_inclusion_proof_impl( &self, message_root_addr: Address, searched_chain_id: u32, - batch_added_block_number: L2BlockNumber, + block_number: L2BlockNumber, local_msg_root: H256, ) -> Result, Web3Error> { let mut storage = self.state.acquire_connection().await?; @@ -672,7 +673,7 @@ impl ZksNamespace { ); let chain_count = storage .storage_web3_dal() - .get_historical_value_unchecked(storage_key.hashed_key(), batch_added_block_number) + .get_historical_value_unchecked(storage_key.hashed_key(), block_number) .await .map_err(DalError::generalize)?; let chain_count_integer = chain_count.0[31]; @@ -684,11 +685,11 @@ impl ZksNamespace { for i in 0..chain_count_integer { let chain_id = self - .get_chain_id_from_index_impl(i.into(), batch_added_block_number) + .get_chain_id_from_index_impl(i.into(), block_number) .await .unwrap(); let chain_root = self - .get_chain_root_from_id_impl(chain_id, batch_added_block_number) + .get_chain_root_from_id_impl(chain_id, block_number) .await .unwrap(); full_chain_merkle_tree.push(Self::chain_id_leaf_preimage(chain_root, chain_id)); @@ -785,19 +786,20 @@ impl ZksNamespace { .await .map_err(DalError::generalize)?; - let length = length_encoding.0[31] / 2; - let chain_root_slot = H256::from_slice(&keccak256( - &[ - u256_to_h256(U256::from(length)).0, - chain_sides_slot.to_fixed_bytes(), - ] - .concat(), - )); + let length = h256_to_u256(length_encoding); + // Here we assume that length is non zero + assert!(length > U256::zero(), "Length is zero"); + let last_elem_pos = length - 1; + + let sides_data_start = H256(keccak256(chain_sides_slot.as_bytes())); + + let chain_root_slot = self + .get_message_root_log_key(u256_to_h256(h256_to_u256(sides_data_start) + last_elem_pos)); println!("kl todo length_encoding = {:#?}", length_encoding); println!("kl todo chain_root_slot = {:#?}", chain_root_slot); let chain_root = storage .storage_web3_dal() - .get_historical_value_unchecked(chain_root_slot, block_number) + .get_historical_value_unchecked(chain_root_slot.hashed_key(), block_number) .await .map_err(DalError::generalize)?; println!("kl todo chain_root = {:#?}", chain_root); @@ -937,6 +939,97 @@ impl ZksNamespace { .map_err(DalError::generalize)?) } + pub async fn get_l1_processing_details_impl( + &self, + batch_number: L1BatchNumber, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + println!("\n\nHey1\n\n"); + self.state + .start_info + .ensure_not_pruned(batch_number, &mut storage) + .await?; + + let batch_details = storage + .blocks_web3_dal() + .get_l1_batch_details(batch_number) + .await + .map_err(DalError::generalize)?; + + let Some(batch_details) = batch_details else { + return Ok(None); + }; + + let settlement_info = storage + .eth_sender_dal() + .get_batch_finalization_info(batch_number) + .await + .map_err(DalError::generalize)?; + + let Some(info) = settlement_info else { + return Ok(None); + }; + + // FIXME: this method should eventually also provide data about L1 commit and L1 prove. + + let (execute_tx_hash, executed_at) = + if info.settlement_layer_id.0 == self.state.api_config.l1_chain_id.0 { + ( + batch_details.base.execute_tx_hash, + batch_details.base.executed_at, + ) + } else { + // It is sl-based chain, we need to query the batch info from the SL + // Create a client for pinging the RPC. + let client: Client = Client::http( + self.state + .api_config + .settlement_layer_url + .clone() + .unwrap() + .parse() + .unwrap(), + )? + .for_network(L2::from(L2ChainId(self.state.api_config.l1_chain_id.0))) + .build(); + + let info = client + .get_transaction_receipt(info.settlement_layer_tx_hash) + .await + .expect("Failed to query the SL"); + let Some(info) = info else { + return Ok(None); + }; + let sl_l1_batch_number = info.l1_batch_number; + let Some(sl_l1_batch_number) = sl_l1_batch_number else { + return Ok(None); + }; + let batch_details = client + .get_l1_batch_details(L1BatchNumber(sl_l1_batch_number.as_u32())) + .await + .expect("Failed to query the SL2"); + let Some(batch_details) = batch_details else { + return Ok(None); + }; + + ( + batch_details.base.execute_tx_hash, + batch_details.base.executed_at, + ) + }; + + let details = L1ProcessingDetails { + commit_tx_hash: None, + committed_at: None, + prove_tx_hash: None, + proven_at: None, + execute_tx_hash, + executed_at, + }; + + Ok(Some(details)) + } + pub async fn get_bytecode_by_hash_impl( &self, hash: H256, diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 5f97d556464..203852346a9 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -21,7 +21,7 @@ use zksync_metadata_calculator::api_server::TreeApiClient; use zksync_node_sync::SyncState; use zksync_types::{ api, commitment::L1BatchCommitmentMode, l2::L2Tx, transaction_request::CallRequest, Address, - L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, H256, U256, U64, + L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, SLChainId, H256, U256, U64, }; use zksync_web3_decl::{error::Web3Error, types::Filter}; @@ -97,7 +97,9 @@ impl BlockStartInfo { pub struct InternalApiConfig { /// Chain ID of the L1 network. Note, that it may be different from the chain id of the settlement layer. pub l1_chain_id: L1ChainId, + pub sl_chain_id: SLChainId, pub l2_chain_id: L2ChainId, + pub settlement_layer_url: Option, pub max_tx_size: usize, pub estimate_gas_scale_factor: f64, pub estimate_gas_acceptable_overestimation: u32, @@ -135,6 +137,8 @@ impl InternalApiConfig { Self { l1_chain_id: genesis_config.l1_chain_id, l2_chain_id: genesis_config.l2_chain_id, + sl_chain_id: genesis_config.settlement_layer_id(), + settlement_layer_url: web3_config.settlement_layer_url.clone(), max_tx_size: web3_config.max_tx_size, estimate_gas_scale_factor: web3_config.estimate_gas_scale_factor, estimate_gas_acceptable_overestimation: web3_config diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index a77498d4341..18ee3a641d0 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -182,7 +182,7 @@ async fn spawn_server( let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots]); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); let server_builder = match transport { ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 635620e9c52..fe90f1483a5 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -63,6 +63,7 @@ use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; mod debug; mod filters; mod snapshots; +mod unstable; mod vm; mod ws; diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs new file mode 100644 index 00000000000..1d425f8b951 --- /dev/null +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -0,0 +1,69 @@ +//! Tests for the `unstable` Web3 namespace. + +use zksync_types::tee_types::TeeType; +use zksync_web3_decl::namespaces::UnstableNamespaceClient; + +use super::*; + +#[derive(Debug)] +struct GetTeeProofsTest {} + +impl GetTeeProofsTest { + fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl HttpTest for GetTeeProofsTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let batch_no = L1BatchNumber(1337); + let tee_type = TeeType::Sgx; + let proof = client.tee_proofs(batch_no, Some(tee_type)).await?; + + assert!(proof.is_empty()); + + let mut storage = pool.connection().await.unwrap(); + storage + .tee_verifier_input_producer_dal() + .create_tee_verifier_input_producer_job(batch_no) + .await?; + + let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; + let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); + tee_proof_generation_dal + .save_attestation(&pubkey, &attestation) + .await?; + tee_proof_generation_dal + .insert_tee_proof_generation_job(batch_no, tee_type) + .await?; + + let signature = vec![0, 1, 2, 3, 4]; + let proof_vec = vec![5, 6, 7, 8, 9]; + tee_proof_generation_dal + .save_proof_artifacts_metadata(batch_no, tee_type, &pubkey, &signature, &proof_vec) + .await?; + + let proofs = client.tee_proofs(batch_no, Some(tee_type)).await?; + assert!(proofs.len() == 1); + let proof = &proofs[0]; + assert!(proof.l1_batch_number == batch_no); + assert!(proof.tee_type == Some(tee_type)); + assert!(proof.pubkey.as_ref() == Some(&pubkey)); + assert!(proof.signature.as_ref() == Some(&signature)); + assert!(proof.proof.as_ref() == Some(&proof_vec)); + assert!(proof.attestation.as_ref() == Some(&attestation)); + + Ok(()) + } +} + +#[tokio::test] +async fn get_tee_proofs() { + test_http_server(GetTeeProofsTest::new()).await; +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 22f8fc01192..cada58b0756 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -1,5 +1,5 @@ //! Configuration utilities for the consensus component. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use anyhow::Context as _; use secrecy::{ExposeSecret as _, Secret}; @@ -44,6 +44,7 @@ pub(super) struct GenesisSpec { pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, pub(super) registry_address: Option, + pub(super) seed_peers: BTreeMap, } impl GenesisSpec { @@ -55,6 +56,7 @@ impl GenesisSpec { attesters: cfg.genesis.attesters.clone(), leader_selection: cfg.genesis.leader_selection.clone(), registry_address: cfg.registry_address, + seed_peers: cfg.seed_peers.clone(), } } @@ -98,6 +100,19 @@ impl GenesisSpec { Some(attester::Committee::new(attesters).context("attesters")?) }, registry_address: x.registry_address, + seed_peers: x + .seed_peers + .iter() + .map(|(key, addr)| { + anyhow::Ok(( + Text::new(&key.0) + .decode::() + .context("key")?, + net::Host(addr.0.clone()), + )) + }) + .collect::>() + .context("seed_peers")?, }) } } @@ -109,9 +124,18 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { - let mut gossip_static_outbound = HashMap::new(); + // Always connect to seed peers. + // Once we implement dynamic peer discovery, + // we won't establish a persistent connection to seed peers + // but rather just ask them for more peers. + let mut gossip_static_outbound: HashMap<_, _> = global_config + .seed_peers + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); { let mut append = |key: &NodePublicKey, addr: &Host| { gossip_static_outbound.insert( diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index a52393c0f48..0b78662f8c2 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -125,7 +125,7 @@ impl EN { )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, build_version)?, + config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -304,6 +304,7 @@ impl EN { Ok(consensus_dal::GlobalConfig { genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, registry_address: None, + seed_peers: [].into(), }) } diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 4d428346ebe..f80bfe58954 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -76,12 +76,12 @@ pub async fn run_main_node( s.spawn_bg(run_attestation_controller( ctx, &pool, - global_config, + global_config.clone(), attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, None)?, + config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, batch_store, validator: Some(executor::Validator { diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs index a0c55a557fe..07a87e3b676 100644 --- a/core/node/consensus/src/registry/testonly.rs +++ b/core/node/consensus/src/registry/testonly.rs @@ -13,7 +13,7 @@ pub(crate) fn make_tx( ) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: *address, + contract_address: Some(*address), calldata: call.calldata().unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 512b37e81a1..2c297eed727 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -317,6 +317,7 @@ impl<'a> Connection<'a> { } .with_hash(), registry_address: spec.registry_address, + seed_peers: spec.seed_peers.clone(), }; txn.try_update_global_config(ctx, &new) diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 01be1155771..35802d14940 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -91,11 +91,8 @@ impl ConfigSet { } } -pub(super) fn new_configs( - rng: &mut impl Rng, - setup: &Setup, - gossip_peers: usize, -) -> Vec { +pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { + let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), protocol_version: config::ProtocolVersion(setup.genesis.protocol_version.0), @@ -117,8 +114,17 @@ pub(super) fn new_configs( .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), registry_address: None, + seed_peers: net_cfgs[..seed_peers] + .iter() + .map(|c| { + ( + config::NodePublicKey(c.gossip.key.public().encode()), + config::Host(c.public_addr.0.clone()), + ) + }) + .collect(), }; - network::testonly::new_configs(rng, setup, gossip_peers) + net_cfgs .into_iter() .enumerate() .map(|(i, net)| ConfigSet { diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index abd35508c7f..e783dbecdc3 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -47,6 +47,7 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 91f01f865a2..aabfff462a8 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -49,6 +49,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 11b6b5c67e3..c93cafc09f9 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -56,7 +56,7 @@ impl VM { call: abi::Call, ) -> ctx::Result { let tx = L2Tx::new( - *address, + Some(*address), call.calldata().context("call.calldata()")?, Nonce(0), Fee { diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 271a33d49c3..0c708fee369 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -35,13 +35,24 @@ pub(crate) struct GasAdjusterFeesOracle { } impl GasAdjusterFeesOracle { + fn assert_fee_is_not_zero(&self, value: u64, fee_type: &'static str) { + if value == 0 { + panic!( + "L1 RPC incorrectly reported {fee_type} prices, either it doesn't return them at \ + all or returns 0's, eth-sender cannot continue without proper {fee_type} prices!" + ); + } + } fn calculate_fees_with_blob_sidecar( &self, previous_sent_tx: &Option, ) -> Result { let base_fee_per_gas = self.gas_adjuster.get_blob_tx_base_fee(); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); let priority_fee_per_gas = self.gas_adjuster.get_blob_tx_priority_fee(); - let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_tx_blob_base_fee()); + let blob_base_fee_per_gas = self.gas_adjuster.get_blob_tx_blob_base_fee(); + self.assert_fee_is_not_zero(blob_base_fee_per_gas, "blob"); + let blob_base_fee_per_gas = Some(blob_base_fee_per_gas); if let Some(previous_sent_tx) = previous_sent_tx { // for blob transactions on re-sending need to double all gas prices @@ -72,6 +83,7 @@ impl GasAdjusterFeesOracle { time_in_mempool: u32, ) -> Result { let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(time_in_mempool); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( previous_sent_tx.id, diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 0d78ab71c62..450443a652d 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -361,7 +361,8 @@ impl EthTxManager { // then `tx` is mined and confirmed (either successful or reverted). // Only then we will check the history to find the receipt. // Otherwise, `tx` is mined but not confirmed, so we skip to the next one. - if operator_nonce.finalized <= tx.nonce { + // FIXME: WHY THIS CHANGE + if operator_nonce.finalized < tx.nonce { continue; } diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index d04b06fdfa2..1d6dde5846e 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -154,7 +154,7 @@ impl EthSenderTester { .into_iter() .map(|base_fee_per_gas| BaseFees { base_fee_per_gas, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .collect(); @@ -162,8 +162,8 @@ impl EthSenderTester { let gateway = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -182,8 +182,8 @@ impl EthSenderTester { let l2_gateway: MockSettlementLayer = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -202,8 +202,8 @@ impl EthSenderTester { let gateway_blobs = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 5de757270c4..db49564093f 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -83,9 +83,10 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], - state_diff_hash: H256::default(), - local_root: H256::zero(), - aggregation_root: H256::zero(), + state_diff_hash: Some(H256::default()), + local_root: Some(H256::default()), + aggregation_root: Some(H256::default()), + da_inclusion_data: Some(vec![]), } } diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index d2e47035e26..7acd163f78e 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -25,6 +25,7 @@ anyhow.workspace = true thiserror.workspace = true async-trait.workspace = true tracing.workspace = true +async-recursion.workspace = true [dev-dependencies] zksync_concurrency.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index d82e24e0bb6..ea36daa816d 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -104,21 +104,24 @@ impl EthHttpQueryClient { .collect() } + #[async_recursion::async_recursion] async fn get_events_inner( &self, from: BlockNumber, to: BlockNumber, - topics1: Vec, - topics2: Vec, - addresses: Vec
, + topics1: Option>, + topics2: Option>, + addresses: Option>, retries_left: usize, ) -> EnrichedClientResult> { - let filter = FilterBuilder::default() + let mut builder = FilterBuilder::default() .from_block(from) .to_block(to) - .topics(Some(topics1.clone()), Some(topics2.clone()), None, None) - .address(addresses) - .build(); + .topics(topics1.clone(), topics2.clone(), None, None); + if let Some(addresses) = addresses.clone() { + builder = builder.address(addresses); + } + let filter = builder.build(); let mut result = self.client.logs(&filter).await; // This code is compatible with both Infura and Alchemy API providers. @@ -172,20 +175,22 @@ impl EthHttpQueryClient { tracing::warn!("Splitting block range in half: {from:?} - {mid:?} - {to:?}"); let mut first_half = self - .get_events( + .get_events_inner( from, BlockNumber::Number(mid), topics1.clone(), topics2.clone(), + addresses.clone(), RETRY_LIMIT, ) .await?; let mut second_half = self - .get_events( + .get_events_inner( BlockNumber::Number(mid + 1u64), to, topics1, topics2, + addresses, RETRY_LIMIT, ) .await?; @@ -195,7 +200,7 @@ impl EthHttpQueryClient { } else if should_retry(err_code, err_message) && retries_left > 0 { tracing::warn!("Retrying. Retries left: {retries_left}"); result = self - .get_events(from, to, topics1, topics2, retries_left - 1) + .get_events_inner(from, to, topics1, topics2, addresses, retries_left - 1) .await; } } @@ -234,9 +239,9 @@ impl EthClient for EthHttpQueryClient { .get_events_inner( from_block.into(), to_block.into(), - vec![self.new_upgrade_cut_data_signature], - vec![packed_version], - vec![state_transition_manager_address], + Some(vec![self.new_upgrade_cut_data_signature]), + Some(vec![packed_version]), + Some(vec![state_transition_manager_address]), RETRY_LIMIT, ) .await?; @@ -255,9 +260,9 @@ impl EthClient for EthHttpQueryClient { self.get_events_inner( from, to, - topics1, - topics2, - self.get_default_address_list(), + Some(topics1), + Some(topics2), + Some(self.get_default_address_list()), retries_left, ) .await diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 9c202fb0196..abacdbb0087 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -183,7 +183,7 @@ impl EthClient for MockEthClient { fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { let tx = L1Tx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), @@ -214,7 +214,7 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { let tx = ProtocolUpgradeTx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs index 4355896e2a2..d152ea265cb 100644 --- a/core/node/external_proof_integration_api/src/lib.rs +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -19,10 +19,7 @@ use types::{ExternalProof, ProofGenerationDataResponse}; use zksync_basic_types::L1BatchNumber; pub use crate::processor::Processor; -use crate::{ - metrics::{CallOutcome, Method}, - middleware::MetricsMiddleware, -}; +use crate::{metrics::Method, middleware::MetricsMiddleware}; /// External API implementation. #[derive(Debug)] @@ -37,11 +34,7 @@ impl Api { axum::middleware::from_fn(move |req: Request, next: Next| async move { let middleware = MetricsMiddleware::new(method); let response = next.run(req).await; - let outcome = match response.status().is_success() { - true => CallOutcome::Success, - false => CallOutcome::Failure, - }; - middleware.observe(outcome); + middleware.observe(response.status()); response }) }; diff --git a/core/node/external_proof_integration_api/src/metrics.rs b/core/node/external_proof_integration_api/src/metrics.rs index f43b49b7b1c..6b909a278d6 100644 --- a/core/node/external_proof_integration_api/src/metrics.rs +++ b/core/node/external_proof_integration_api/src/metrics.rs @@ -2,13 +2,6 @@ use std::time::Duration; use vise::{EncodeLabelSet, EncodeLabelValue, Histogram, LabeledFamily, Metrics}; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] -#[metrics(label = "outcome", rename_all = "snake_case")] -pub(crate) enum CallOutcome { - Success, - Failure, -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] #[metrics(label = "type", rename_all = "snake_case")] pub(crate) enum Method { @@ -20,8 +13,8 @@ pub(crate) enum Method { #[derive(Debug, Metrics)] #[metrics(prefix = "external_proof_integration_api")] pub(crate) struct ProofIntegrationApiMetrics { - #[metrics(labels = ["method", "outcome"], buckets = vise::Buckets::LATENCIES)] - pub call_latency: LabeledFamily<(Method, CallOutcome), Histogram, 2>, + #[metrics(labels = ["method", "status"], buckets = vise::Buckets::LATENCIES)] + pub call_latency: LabeledFamily<(Method, u16), Histogram, 2>, } #[vise::register] diff --git a/core/node/external_proof_integration_api/src/middleware.rs b/core/node/external_proof_integration_api/src/middleware.rs index 1dc6aefe917..ebd4ef5bfb0 100644 --- a/core/node/external_proof_integration_api/src/middleware.rs +++ b/core/node/external_proof_integration_api/src/middleware.rs @@ -1,6 +1,7 @@ +use axum::http::StatusCode; use tokio::time::Instant; -use crate::metrics::{CallOutcome, Method, METRICS}; +use crate::metrics::{Method, METRICS}; #[derive(Debug)] pub(crate) struct MetricsMiddleware { @@ -16,7 +17,8 @@ impl MetricsMiddleware { } } - pub fn observe(&self, outcome: CallOutcome) { - METRICS.call_latency[&(self.method, outcome)].observe(self.started_at.elapsed()); + pub fn observe(&self, status_code: StatusCode) { + METRICS.call_latency[&(self.method, status_code.as_u16())] + .observe(self.started_at.elapsed()); } } diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index e6842b92fdb..e43de3e34bf 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -85,8 +85,8 @@ impl GasAdjuster { anyhow::ensure!(client.gateway_mode, "Must be L2 client in L2 mode"); anyhow::ensure!( - matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata), - "Only relayed L2 calldata is available for L2 mode, got: {pubdata_sending_mode:?}" + matches!(pubdata_sending_mode, PubdataSendingMode::RelayedL2Calldata | PubdataSendingMode::Custom), + "Only relayed L2 calldata or Custom is available for L2 mode, got: {pubdata_sending_mode:?}" ); } else { anyhow::ensure!(!client.gateway_mode, "Must be L1 client in L1 mode"); diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 2643e4b3c42..47023203de0 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -152,7 +152,7 @@ async fn kept_updated_l2(commitment_mode: L1BatchCommitmentMode) { .zip(TEST_PUBDATA_PRICES) .map(|(block, pubdata)| BaseFees { base_fee_per_gas: block, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: pubdata.into(), }) .collect(); diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs index b252101e333..58180ae0501 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs @@ -1,6 +1,6 @@ use anyhow::Context; use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; -use zksync_config::configs::{eth_sender::EthConfig, ContractsConfig}; +use zksync_config::configs::{eth_sender::EthConfig, gateway::GatewayChainConfig, ContractsConfig}; use zksync_eth_client::BoundEthInterface; use zksync_eth_sender::{Aggregator, EthTxAggregator}; use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode, L2ChainId}; @@ -44,7 +44,7 @@ use crate::{ pub struct EthTxAggregatorLayer { eth_sender_config: EthConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, zksync_network_id: L2ChainId, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, settlement_mode: SettlementMode, @@ -75,7 +75,7 @@ impl EthTxAggregatorLayer { pub fn new( eth_sender_config: EthConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, zksync_network_id: L2ChainId, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, settlement_mode: SettlementMode, @@ -109,11 +109,31 @@ impl WiringLayer for EthTxAggregatorLayer { tracing::info!("Contracts: {:?}", self.contracts_config); tracing::info!("Gateway contracts: {:?}", self.gateway_contracts_config); // Get resources. - let contracts_config = if self.settlement_mode.is_gateway() { - self.gateway_contracts_config.unwrap() - } else { - self.contracts_config - }; + + let (validator_timelock_addr, multicall3_addr, diamond_proxy_addr) = + if self.settlement_mode.is_gateway() { + ( + self.gateway_contracts_config + .clone() + .unwrap() + .validator_timelock_addr, + self.gateway_contracts_config + .clone() + .unwrap() + .multicall3_addr, + self.gateway_contracts_config + .clone() + .unwrap() + .diamond_proxy_addr, + ) + } else { + ( + self.contracts_config.validator_timelock_addr, + self.contracts_config.l1_multicall3_addr, + self.contracts_config.diamond_proxy_addr, + ) + }; + let eth_client = if self.settlement_mode.is_gateway() { input.eth_client_gateway.unwrap().0 } else { @@ -145,9 +165,9 @@ impl WiringLayer for EthTxAggregatorLayer { config.clone(), aggregator, eth_client, - contracts_config.validator_timelock_addr, - contracts_config.l1_multicall3_addr, - contracts_config.diamond_proxy_addr, + validator_timelock_addr, + multicall3_addr, + diamond_proxy_addr, self.zksync_network_id, eth_client_blobs_addr, self.settlement_mode, diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 29516988b4a..8cadb4b5348 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,4 +1,4 @@ -use zksync_config::{ContractsConfig, EthWatchConfig}; +use zksync_config::{configs::gateway::GatewayChainConfig, ContractsConfig, EthWatchConfig}; use zksync_contracts::{chain_admin_contract, governance_contract}; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; use zksync_types::settlement::SettlementMode; @@ -23,7 +23,7 @@ use crate::{ pub struct EthWatchLayer { eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, settlement_mode: SettlementMode, } @@ -47,7 +47,7 @@ impl EthWatchLayer { pub fn new( eth_watch_config: EthWatchConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, settlement_mode: SettlementMode, ) -> Self { Self { @@ -106,9 +106,7 @@ impl WiringLayer for EthWatchLayer { EthHttpQueryClient::new( gateway_client, contracts_config.diamond_proxy_addr, - contracts_config - .ecosystem_contracts - .map(|a| a.state_transition_proxy_addr), + Some(contracts_config.state_transition_proxy_addr), contracts_config.chain_admin_addr, contracts_config.governance_addr, self.eth_watch_config.confirmations_for_eth_event, diff --git a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs index 73ddd11134e..528601ca91e 100644 --- a/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs +++ b/core/node/node_framework/src/implementations/layers/pk_signing_eth_client.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use zksync_config::{ - configs::{wallets, ContractsConfig}, + configs::{gateway::GatewayChainConfig, wallets, ContractsConfig}, EthConfig, }; use zksync_eth_client::clients::PKSigningClient; @@ -20,7 +20,7 @@ use crate::{ pub struct PKSigningEthClientLayer { eth_sender_config: EthConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, sl_chain_id: SLChainId, wallets: wallets::EthSender, } @@ -45,7 +45,7 @@ impl PKSigningEthClientLayer { pub fn new( eth_sender_config: EthConfig, contracts_config: ContractsConfig, - gateway_contracts_config: Option, + gateway_contracts_config: Option, sl_chain_id: SLChainId, wallets: wallets::EthSender, ) -> Self { @@ -98,18 +98,18 @@ impl WiringLayer for PKSigningEthClientLayer { BoundEthInterfaceForBlobsResource(Box::new(signing_client_for_blobs)) }); let signing_client_for_gateway = if input.gateway_client.is_some() { - self.wallets.gateway.map(|gateway_operator| { - let private_key = gateway_operator.private_key(); - let GatewayEthInterfaceResource(gateway_client) = input.gateway_client.unwrap(); - let signing_client_for_blobs = PKSigningClient::new_raw( - private_key.clone(), - self.gateway_contracts_config.unwrap().diamond_proxy_addr, - gas_adjuster_config.default_priority_fee_per_gas, - self.sl_chain_id, - gateway_client, - ); - BoundEthInterfaceForL2Resource(Box::new(signing_client_for_blobs)) - }) + let private_key = self.wallets.operator.private_key(); + let GatewayEthInterfaceResource(gateway_client) = input.gateway_client.unwrap(); + let signing_client_for_blobs = PKSigningClient::new_raw( + private_key.clone(), + self.gateway_contracts_config.unwrap().diamond_proxy_addr, + gas_adjuster_config.default_priority_fee_per_gas, + self.sl_chain_id, + gateway_client, + ); + Some(BoundEthInterfaceForL2Resource(Box::new( + signing_client_for_blobs, + ))) } else { None }; diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs index ee1be98319b..e4eb8b38a69 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs @@ -13,6 +13,7 @@ use zksync_vm_runner::{ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, + object_store::ObjectStoreResource, pools::{PoolResource, ReplicaPool}, }, StopReceiver, Task, TaskId, WiringError, WiringLayer, @@ -38,6 +39,7 @@ impl VmPlaygroundLayer { pub struct Input { // We use a replica pool because VM playground doesn't write anything to the DB by design. pub replica_pool: PoolResource, + pub dumps_object_store: Option, #[context(default)] pub app_health: AppHealthCheckResource, } @@ -65,6 +67,7 @@ impl WiringLayer for VmPlaygroundLayer { async fn wire(self, input: Self::Input) -> Result { let Input { replica_pool, + dumps_object_store, app_health, } = input; @@ -95,6 +98,7 @@ impl WiringLayer for VmPlaygroundLayer { }; let (playground, tasks) = VmPlayground::new( connection_pool, + dumps_object_store.map(|resource| resource.0), self.config.fast_vm_mode, storage, self.zksync_network_id, diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index b7b8930c495..6075ff048bf 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -49,7 +49,7 @@ impl ExternalIO { main_node_client: Box, chain_id: L2ChainId, ) -> anyhow::Result { - let l1_batch_params_provider = L1BatchParamsProvider::new(); + let l1_batch_params_provider = L1BatchParamsProvider::uninitialized(); Ok(Self { pool, l1_batch_params_provider, diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index ee266a88971..eea3925bdd7 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -17,7 +17,7 @@ use zksync_types::{ basic_fri_types::Eip4844Blobs, commitment::{serialize_commitments, L1BatchCommitmentMode}, web3::keccak256, - L1BatchNumber, H256, + L1BatchNumber, ProtocolVersionId, H256, STATE_DIFF_HASH_KEY_PRE_GATEWAY, }; use crate::{errors::RequestProcessorError, metrics::METRICS}; @@ -226,58 +226,63 @@ impl RequestProcessor { .unwrap() .expect("Proved block without metadata"); - let is_pre_boojum = l1_batch + let protocol_version = l1_batch .header .protocol_version - .map(|v| v.is_pre_boojum()) - .unwrap_or(true); - if !is_pre_boojum { - let events_queue_state = l1_batch - .metadata - .events_queue_commitment - .expect("No events_queue_commitment"); - let bootloader_heap_initial_content = l1_batch - .metadata - .bootloader_initial_content_commitment - .expect("No bootloader_initial_content_commitment"); - - if events_queue_state != events_queue_state_from_prover - || bootloader_heap_initial_content - != bootloader_heap_initial_content_from_prover - { - let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); - let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + let events_queue_state = l1_batch + .metadata + .events_queue_commitment + .expect("No events_queue_commitment"); + let bootloader_heap_initial_content = l1_batch + .metadata + .bootloader_initial_content_commitment + .expect("No bootloader_initial_content_commitment"); + + if events_queue_state != events_queue_state_from_prover + || bootloader_heap_initial_content + != bootloader_heap_initial_content_from_prover + { + let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); + let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); } let system_logs = serialize_commitments(&l1_batch.header.system_logs); let system_logs_hash = H256(keccak256(&system_logs)); - if !is_pre_boojum { - let state_diff_hash = l1_batch + let state_diff_hash = if protocol_version.is_pre_gateway() { + l1_batch .header .system_logs - .into_iter() - .find(|elem| elem.0.key == H256::from_low_u64_be(2)) - .expect("No state diff hash key") - .0 - .value; - - if state_diff_hash != state_diff_hash_from_prover - || system_logs_hash != system_logs_hash_from_prover - { - let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); - let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); - } + .iter() + .find_map(|log| { + (log.0.key == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY)) + .then_some(log.0.value) + }) + .expect("Failed to get state_diff_hash from system logs") + } else { + l1_batch + .metadata + .state_diff_hash + .expect("Failed to get state_diff_hash from metadata") + }; + + if state_diff_hash != state_diff_hash_from_prover + || system_logs_hash != system_logs_hash_from_prover + { + let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); + let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); } + storage .proof_generation_dal() .save_proof_artifacts_metadata(l1_batch_number, &blob_url) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index f95e3bfa323..4319fce6216 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -132,9 +132,9 @@ async fn submit_tee_proof() { // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof let tee_proof_request_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_request = diff --git a/core/node/state_keeper/src/executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs index 7f52b7992ca..cb96b3c1b00 100644 --- a/core/node/state_keeper/src/executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -70,7 +70,7 @@ async fn execute_l2_tx(storage_type: StorageType, vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -135,7 +135,7 @@ async fn execute_l2_tx_after_snapshot_recovery( let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -186,7 +186,7 @@ async fn execute_l1_tx(vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -229,7 +229,7 @@ async fn execute_l2_and_l1_txs(vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -277,7 +277,7 @@ async fn rollback(vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -352,7 +352,7 @@ async fn too_big_gas_limit(vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -417,7 +417,7 @@ async fn deploy_and_call_loadtest(vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -492,7 +492,7 @@ async fn execute_reverted_tx(vm_mode: FastVmMode) { let message_root_init_txn = bob.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -548,7 +548,7 @@ async fn execute_realistic_scenario(vm_mode: FastVmMode) { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], @@ -708,7 +708,7 @@ async fn catchup_rocksdb_cache() { let message_root_init_txn = alice.get_l2_tx_for_execute( Execute { - contract_address: L2_MESSAGE_ROOT_ADDRESS, + contract_address: Some(L2_MESSAGE_ROOT_ADDRESS), calldata: encoded_data, value: U256::zero(), factory_deps: vec![], diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 55418a656d3..9b48fce8b79 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -461,7 +461,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: writes as usize, @@ -497,7 +497,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata, value: Default::default(), factory_deps: vec![], diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index 9ea699234f8..b2a24acb495 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -103,8 +103,7 @@ async fn waiting_for_l1_batch_params_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) .await @@ -143,8 +142,7 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) .await @@ -192,8 +190,7 @@ async fn getting_first_l2_block_in_batch_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(0), Ok(Some(L2BlockNumber(0)))), (L1BatchNumber(1), Ok(Some(L2BlockNumber(1)))), @@ -264,8 +261,7 @@ async fn getting_first_l2_block_in_batch_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(1), Err(())), (snapshot_recovery.l1_batch_number, Err(())), @@ -321,24 +317,20 @@ async fn loading_pending_batch_with_genesis() { ) .await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); - let first_l2_block_in_batch = provider - .load_first_l2_block_in_batch(&mut storage, L1BatchNumber(1)) - .await - .unwrap() - .expect("no first L2 block"); - assert_eq!(first_l2_block_in_batch.number(), L2BlockNumber(1)); - + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (system_env, l1_batch_env) = provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &first_l2_block_in_batch, + L1BatchNumber(1), u32::MAX, L2ChainId::default(), ) .await - .unwrap(); + .unwrap() + .expect("no L1 batch"); + + assert_eq!(l1_batch_env.first_l2_block.number, 1); + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .unwrap(); @@ -403,27 +395,17 @@ async fn loading_pending_batch_after_snapshot_recovery() { ) .await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); - let first_l2_block_in_batch = provider - .load_first_l2_block_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) - .await - .unwrap() - .expect("no first L2 block"); - assert_eq!( - first_l2_block_in_batch.number(), - snapshot_recovery.l2_block_number + 1 - ); - + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (system_env, l1_batch_env) = provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &first_l2_block_in_batch, + snapshot_recovery.l1_batch_number + 1, u32::MAX, L2ChainId::default(), ) .await - .unwrap(); + .unwrap() + .expect("no L1 batch"); let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .unwrap(); @@ -466,8 +448,7 @@ async fn getting_batch_version_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(0)) .await @@ -506,8 +487,7 @@ async fn getting_batch_version_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number) .await diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index a743b22cf70..ca20a2f6000 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -101,30 +101,18 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let pending_l2_block_header = self + let Some((system_env, l1_batch_env)) = self .l1_batch_params_provider - .load_first_l2_block_in_batch(&mut storage, cursor.l1_batch) - .await - .with_context(|| { - format!( - "failed loading first L2 block for L1 batch #{}", - cursor.l1_batch - ) - })?; - let Some(pending_l2_block_header) = pending_l2_block_header else { - return Ok((cursor, None)); - }; - - let (system_env, l1_batch_env) = self - .l1_batch_params_provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &pending_l2_block_header, + cursor.l1_batch, self.validation_computational_gas_limit, self.chain_id, ) - .await - .with_context(|| format!("failed loading params for L1 batch #{}", cursor.l1_batch))?; + .await? + else { + return Ok((cursor, None)); + }; let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .with_context(|| { @@ -447,7 +435,7 @@ impl MempoolIO { l2_block_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - l1_batch_params_provider: L1BatchParamsProvider::new(), + l1_batch_params_provider: L1BatchParamsProvider::uninitialized(), fee_account, l2_da_validator_address, pubdata_type, diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 0a1c59b6bfb..efa8d738ad0 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -163,7 +163,7 @@ pub fn fee(gas_limit: u32) -> Fee { pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -175,7 +175,7 @@ pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { pub fn l1_transaction(account: &mut Account, serial_id: PriorityOpId) -> Transaction { account.get_l1_tx( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 08382903ad6..8a99aa07ae5 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -77,34 +77,24 @@ impl TeeVerifierInputProducer { .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? .unwrap(); - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut connection) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) .await .context("failed initializing L1 batch params provider")?; - let first_miniblock_in_batch = l1_batch_params_provider - .load_first_l2_block_in_batch(&mut connection, l1_batch_number) - .await - .with_context(|| { - format!("failed loading first miniblock in L1 batch #{l1_batch_number}") - })? - .with_context(|| format!("no miniblocks persisted for L1 batch #{l1_batch_number}"))?; - // In the state keeper, this value is used to reject execution. // All batches have already been executed by State Keeper. // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_params( + .load_l1_batch_env( &mut connection, - &first_miniblock_in_batch, + l1_batch_number, validation_computational_gas_limit, l2_chain_id, ) - .await - .context("expected miniblock to be executed and sealed")?; + .await? + .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; let used_contract_hashes = l1_batch_header .used_contract_hashes diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index cd68a1787d4..3caadaaf573 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -97,9 +97,10 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], - state_diff_hash: H256::zero(), - local_root: H256::zero(), - aggregation_root: H256::zero(), + state_diff_hash: Some(H256::zero()), + local_root: Some(H256::zero()), + aggregation_root: Some(H256::zero()), + da_inclusion_data: Some(vec![]), } } @@ -114,13 +115,13 @@ pub fn l1_batch_metadata_to_commitment_artifacts( commitment: metadata.commitment, }, l2_l1_merkle_root: metadata.l2_l1_merkle_root, - local_root: metadata.local_root, - aggregation_root: metadata.aggregation_root, + local_root: metadata.local_root.unwrap(), + aggregation_root: metadata.aggregation_root.unwrap(), compressed_state_diffs: Some(metadata.state_diffs_compressed.clone()), compressed_initial_writes: metadata.initial_writes_compressed.clone(), compressed_repeated_writes: metadata.repeated_writes_compressed.clone(), zkporter_is_available: ZKPORTER_IS_AVAILABLE, - state_diff_hash: metadata.state_diff_hash, + state_diff_hash: metadata.state_diff_hash.unwrap(), aux_commitments: match ( metadata.bootloader_initial_content_commitment, metadata.events_queue_commitment, @@ -145,7 +146,7 @@ pub fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; let mut tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index ceb11a98247..9c235ad6b29 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -24,6 +24,7 @@ zksync_vm_executor.workspace = true zksync_health_check.workspace = true serde.workspace = true +serde_json.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f23f63533ff..6c2933635b4 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -52,9 +52,9 @@ impl BasicWitnessInputProducer { ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); let vm_runner = VmRunner::new( pool, - Box::new(io), + Arc::new(io), Arc::new(loader), - Box::new(output_handler_factory), + Arc::new(output_handler_factory), batch_executor_factory, ); Ok(( @@ -168,7 +168,7 @@ impl OutputHandler for BasicWitnessInputProducerOutputHandler { )] async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { let l1_batch_number = self.l1_batch_number; - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("bwip").await?; tracing::info!(%l1_batch_number, "Started saving VM run data"); @@ -381,7 +381,7 @@ struct BasicWitnessInputProducerOutputHandlerFactory { #[async_trait] impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { async fn create_handler( - &mut self, + &self, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs index 091fa15fc95..4bab43d1d0f 100644 --- a/core/node/vm_runner/src/impls/playground.rs +++ b/core/node/vm_runner/src/impls/playground.rs @@ -1,4 +1,5 @@ use std::{ + hash::{DefaultHasher, Hash, Hasher}, io, num::NonZeroU32, path::{Path, PathBuf}, @@ -14,10 +15,14 @@ use tokio::{ }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_object_store::{Bucket, ObjectStore}; use zksync_state::RocksdbStorage; use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; use zksync_vm_executor::batch::MainBatchExecutorFactory; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; +use zksync_vm_interface::{ + utils::{DivergenceHandler, VmDump}, + L1BatchEnv, L2BlockEnv, SystemEnv, +}; use crate::{ storage::{PostgresLoader, StorageLoader}, @@ -95,6 +100,7 @@ impl VmPlayground { /// Creates a new playground. pub async fn new( pool: ConnectionPool, + dumps_object_store: Option>, vm_mode: FastVmMode, storage: VmPlaygroundStorageOptions, chain_id: L2ChainId, @@ -129,6 +135,23 @@ impl VmPlayground { let mut batch_executor_factory = MainBatchExecutorFactory::new(false, false); batch_executor_factory.set_fast_vm_mode(vm_mode); + batch_executor_factory.observe_storage_metrics(); + let handle = tokio::runtime::Handle::current(); + if let Some(store) = dumps_object_store { + tracing::info!("Using object store for VM dumps: {store:?}"); + + let handler = DivergenceHandler::new(move |err, dump| { + let err_message = err.to_string(); + if let Err(err) = handle.block_on(Self::dump_vm_state(&*store, &err_message, &dump)) + { + let l1_batch_number = dump.l1_batch_number(); + tracing::error!( + "Saving VM dump for L1 batch #{l1_batch_number} failed: {err:#}" + ); + } + }); + batch_executor_factory.set_divergence_handler(handler); + } let io = VmPlaygroundIo { cursor_file_path, @@ -175,6 +198,27 @@ impl VmPlayground { )) } + async fn dump_vm_state( + object_store: &dyn ObjectStore, + err_message: &str, + dump: &VmDump, + ) -> anyhow::Result<()> { + // Deduplicate VM dumps by the error hash so that we don't create a lot of dumps for the same error. + let mut hasher = DefaultHasher::new(); + err_message.hash(&mut hasher); + let err_hash = hasher.finish(); + let batch_number = dump.l1_batch_number().0; + let dump_filename = format!("shadow_vm_dump_batch{batch_number:08}_{err_hash:x}.json"); + + tracing::info!("Dumping diverged VM state to `{dump_filename}`"); + let dump = serde_json::to_string(&dump).context("failed serializing VM dump")?; + object_store + .put_raw(Bucket::VmDumps, &dump_filename, dump.into_bytes()) + .await + .context("failed putting VM dump to object store")?; + Ok(()) + } + /// Returns a health check for this component. pub fn health_check(&self) -> ReactiveHealthCheck { self.io.health_updater.subscribe() @@ -246,9 +290,9 @@ impl VmPlayground { }; let vm_runner = VmRunner::new( self.pool, - Box::new(self.io), + Arc::new(self.io), loader, - Box::new(self.output_handler_factory), + Arc::new(self.output_handler_factory), Box::new(self.batch_executor_factory), ); vm_runner.run(&stop_receiver).await @@ -412,7 +456,7 @@ impl OutputHandler for VmPlaygroundOutputHandler { #[async_trait] impl OutputHandlerFactory for VmPlaygroundOutputHandler { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, _l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index b620675b78e..b1aff9fe382 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -41,9 +41,9 @@ impl ProtectiveReadsWriter { let batch_processor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, - Box::new(io), + Arc::new(io), Arc::new(loader), - Box::new(output_handler_factory), + Arc::new(output_handler_factory), Box::new(batch_processor), ); Ok(( @@ -219,7 +219,7 @@ struct ProtectiveReadsOutputHandlerFactory { #[async_trait] impl OutputHandlerFactory for ProtectiveReadsOutputHandlerFactory { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/io.rs b/core/node/vm_runner/src/io.rs index 2e118f6cfd1..6d758f816f8 100644 --- a/core/node/vm_runner/src/io.rs +++ b/core/node/vm_runner/src/io.rs @@ -1,4 +1,4 @@ -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; use zksync_dal::{Connection, Core}; @@ -31,8 +31,9 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { conn: &mut Connection<'_, Core>, ) -> anyhow::Result; - /// Marks the specified batch as being in progress. Must be called before a batch can be marked - /// as completed. + /// Marks the specified batch as being in progress. Will be called at least once before a batch can be marked + /// as completed; can be called multiple times in case of a crash. The order in which this method is called + /// is not specified; i.e., it is **not** guaranteed to be called sequentially. /// /// # Errors /// @@ -44,7 +45,8 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { ) -> anyhow::Result<()>; /// Marks the specified batch as the latest completed batch. All earlier batches are considered - /// to be completed too. No guarantees about later batches. + /// to be completed too. No guarantees about later batches. This method is guaranteed to be called + /// with monotonically increasing batch numbers. /// /// # Errors /// @@ -55,3 +57,44 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { l1_batch_number: L1BatchNumber, ) -> anyhow::Result<()>; } + +#[async_trait] +impl VmRunnerIo for Arc { + fn name(&self) -> &'static str { + (**self).name() + } + + async fn latest_processed_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + (**self).latest_processed_batch(conn).await + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + (**self).last_ready_to_be_loaded_batch(conn).await + } + + async fn mark_l1_batch_as_processing( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + (**self) + .mark_l1_batch_as_processing(conn, l1_batch_number) + .await + } + + async fn mark_l1_batch_as_completed( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + (**self) + .mark_l1_batch_as_completed(conn, l1_batch_number) + .await + } +} diff --git a/core/node/vm_runner/src/metrics.rs b/core/node/vm_runner/src/metrics.rs index 4252ad5f0d4..cc588fd0263 100644 --- a/core/node/vm_runner/src/metrics.rs +++ b/core/node/vm_runner/src/metrics.rs @@ -2,7 +2,28 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics}; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; +use zksync_state::OwnedStorage; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "storage", rename_all = "snake_case")] +pub(super) enum StorageKind { + Postgres, + Snapshot, + Rocksdb, + Unknown, +} + +impl StorageKind { + pub fn new(storage: &OwnedStorage) -> Self { + match storage { + OwnedStorage::Rocksdb(_) | OwnedStorage::RocksdbWithMemory(_) => Self::Rocksdb, + OwnedStorage::Postgres(_) => Self::Postgres, + OwnedStorage::Snapshot(_) => Self::Snapshot, + OwnedStorage::Boxed(_) => Self::Unknown, + } + } +} #[derive(Debug, Metrics)] #[metrics(prefix = "vm_runner")] @@ -16,6 +37,9 @@ pub(super) struct VmRunnerMetrics { /// Total latency of loading an L1 batch (RocksDB mode only). #[metrics(buckets = Buckets::LATENCIES)] pub storage_load_time: Histogram, + /// Latency of loading data and storage for a batch, grouped by the storage kind. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub data_and_storage_latency: Family>, /// Total latency of running VM on an L1 batch. #[metrics(buckets = Buckets::LATENCIES)] pub run_vm_time: Histogram, diff --git a/core/node/vm_runner/src/output_handler.rs b/core/node/vm_runner/src/output_handler.rs index 25eae5e3684..7a8d1e41e52 100644 --- a/core/node/vm_runner/src/output_handler.rs +++ b/core/node/vm_runner/src/output_handler.rs @@ -61,7 +61,7 @@ pub trait OutputHandler: fmt::Debug + Send { /// simultaneously. Implementing this trait signifies that this property is held for the data the /// implementation is responsible for. #[async_trait] -pub trait OutputHandlerFactory: fmt::Debug + Send { +pub trait OutputHandlerFactory: fmt::Debug + Send + Sync { /// Creates a [`StateKeeperOutputHandler`] implementation for the provided L1 batch. Only /// supposed to be used for the L1 batch data it was created against. Using it for anything else /// will lead to errors. @@ -70,7 +70,7 @@ pub trait OutputHandlerFactory: fmt::Debug + Send { /// /// Propagates DB errors. async fn create_handler( - &mut self, + &self, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result>; @@ -139,7 +139,7 @@ impl OutputHandlerFactory for ConcurrentOutputHandlerFactory { async fn create_handler( - &mut self, + &self, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index e2a678ccdce..4f7ac1f9728 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -1,20 +1,26 @@ -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use anyhow::Context; -use tokio::{sync::watch, task::JoinHandle}; +use tokio::{ + sync::{watch, Mutex}, + task::JoinHandle, +}; use zksync_dal::{ConnectionPool, Core}; use zksync_state::OwnedStorage; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber}; -use zksync_vm_interface::{ - executor::{BatchExecutor, BatchExecutorFactory}, - L2BlockEnv, -}; +use zksync_types::L1BatchNumber; +use zksync_vm_interface::{executor::BatchExecutorFactory, L2BlockEnv}; use crate::{ - metrics::METRICS, output_handler::OutputHandler, storage::StorageLoader, L1BatchOutput, - L2BlockOutput, OutputHandlerFactory, VmRunnerIo, + metrics::{StorageKind, METRICS}, + storage::StorageLoader, + L1BatchOutput, L2BlockOutput, OutputHandlerFactory, VmRunnerIo, }; +const SLEEP_INTERVAL: Duration = Duration::from_millis(50); + /// VM runner represents a logic layer of L1 batch / L2 block processing flow akin to that of state /// keeper. The difference is that VM runner is designed to be run on batches/blocks that have /// already been processed by state keeper but still require some extra handling as regulated by @@ -26,13 +32,13 @@ use crate::{ /// /// You can think of VM runner as a concurrent processor of a continuous stream of newly committed /// batches/blocks. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct VmRunner { pool: ConnectionPool, - io: Box, + io: Arc, loader: Arc, - output_handler_factory: Box, - batch_executor_factory: Box>, + output_handler_factory: Arc, + batch_executor_factory: Arc>>>, } impl VmRunner { @@ -44,9 +50,9 @@ impl VmRunner { /// an underlying implementation of [`OutputHandlerFactory`]. pub fn new( pool: ConnectionPool, - io: Box, + io: Arc, loader: Arc, - output_handler_factory: Box, + output_handler_factory: Arc, batch_executor_factory: Box>, ) -> Self { Self { @@ -54,17 +60,42 @@ impl VmRunner { io, loader, output_handler_factory, - batch_executor_factory, + batch_executor_factory: Arc::new(Mutex::new(batch_executor_factory)), } } - async fn process_batch( - mut batch_executor: Box>, - l2_blocks: Vec, - mut output_handler: Box, - ) -> anyhow::Result<()> { + async fn process_batch(self, number: L1BatchNumber) -> anyhow::Result<()> { + let stage_started_at = Instant::now(); + let (batch_data, storage) = loop { + match self.loader.load_batch(number).await? { + Some(data_and_storage) => break data_and_storage, + None => { + // Next batch has not been loaded yet + tokio::time::sleep(SLEEP_INTERVAL).await; + } + } + }; + let kind = StorageKind::new(&storage); + METRICS.data_and_storage_latency[&kind].observe(stage_started_at.elapsed()); + + let mut batch_executor = self.batch_executor_factory.lock().await.init_batch( + storage, + batch_data.l1_batch_env.clone(), + batch_data.system_env.clone(), + ); + let mut output_handler = self + .output_handler_factory + .create_handler(batch_data.system_env, batch_data.l1_batch_env) + .await?; + self.io + .mark_l1_batch_as_processing( + &mut self.pool.connection_tagged("vm_runner").await?, + number, + ) + .await?; + let latency = METRICS.run_vm_time.start(); - for (i, l2_block) in l2_blocks.into_iter().enumerate() { + for (i, l2_block) in batch_data.l2_blocks.into_iter().enumerate() { let block_env = L2BlockEnv::from_l2_block_data(&l2_block); if i > 0 { // First L2 block in every batch is already preloaded @@ -112,14 +143,12 @@ impl VmRunner { /// Consumes VM runner to execute a loop that continuously pulls data from [`VmRunnerIo`] and /// processes it. - pub async fn run(mut self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { - const SLEEP_INTERVAL: Duration = Duration::from_millis(50); - + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { // Join handles for asynchronous tasks that are being run in the background let mut task_handles: Vec<(L1BatchNumber, JoinHandle>)> = Vec::new(); let mut next_batch = self .io - .latest_processed_batch(&mut self.pool.connection().await?) + .latest_processed_batch(&mut self.pool.connection_tagged("vm_runner").await?) .await? + 1; loop { @@ -148,7 +177,7 @@ impl VmRunner { let last_ready_batch = self .io - .last_ready_to_be_loaded_batch(&mut self.pool.connection().await?) + .last_ready_to_be_loaded_batch(&mut self.pool.connection_tagged("vm_runner").await?) .await?; METRICS.last_ready_batch.set(last_ready_batch.0.into()); if next_batch > last_ready_batch { @@ -156,31 +185,8 @@ impl VmRunner { tokio::time::sleep(SLEEP_INTERVAL).await; continue; } - let Some((batch_data, storage)) = self.loader.load_batch(next_batch).await? else { - // Next batch has not been loaded yet - tokio::time::sleep(SLEEP_INTERVAL).await; - continue; - }; - let batch_executor = self.batch_executor_factory.init_batch( - storage, - batch_data.l1_batch_env.clone(), - batch_data.system_env.clone(), - ); - let output_handler = self - .output_handler_factory - .create_handler(batch_data.system_env, batch_data.l1_batch_env) - .await?; - - self.io - .mark_l1_batch_as_processing(&mut self.pool.connection().await?, next_batch) - .await?; - let handle = tokio::task::spawn(Self::process_batch( - batch_executor, - batch_data.l2_blocks, - output_handler, - )); + let handle = tokio::spawn(self.clone().process_batch(next_batch)); task_handles.push((next_batch, handle)); - next_batch += 1; } } diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index baee426007c..2285455ba24 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -48,9 +48,8 @@ pub(crate) struct PostgresLoader { impl PostgresLoader { pub async fn new(pool: ConnectionPool, chain_id: L2ChainId) -> anyhow::Result { - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - let mut conn = pool.connection().await?; - l1_batch_params_provider.initialize(&mut conn).await?; + let mut conn = pool.connection_tagged("vm_runner").await?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn).await?; Ok(Self { pool, l1_batch_params_provider, @@ -72,7 +71,7 @@ impl StorageLoader for PostgresLoader { &self, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { - let mut conn = self.pool.connection().await?; + let mut conn = self.pool.connection_tagged("vm_runner").await?; let Some(data) = load_batch_execute_data( &mut conn, l1_batch_number, @@ -86,7 +85,7 @@ impl StorageLoader for PostgresLoader { if let Some(snapshot) = OwnedStorage::snapshot(&mut conn, l1_batch_number).await? { let postgres = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; - let storage = snapshot.with_fallback(postgres, self.shadow_snapshots); + let storage = snapshot.with_fallback(postgres.into(), self.shadow_snapshots); let storage = OwnedStorage::from(storage); return Ok(Some((data, storage))); } @@ -94,7 +93,7 @@ impl StorageLoader for PostgresLoader { tracing::info!( "Incomplete data to create storage snapshot for batch; will use sequential storage" ); - let conn = self.pool.connection().await?; + let conn = self.pool.connection_tagged("vm_runner").await?; let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; Ok(Some((data, storage.into()))) } @@ -151,12 +150,11 @@ impl VmRunnerStorage { chain_id: L2ChainId, ) -> anyhow::Result<(Self, StorageSyncTask)> { let mut conn = pool.connection_tagged(io.name()).await?; - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut conn) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) .await .context("Failed initializing L1 batch params provider")?; drop(conn); + let state = Arc::new(RwLock::new(State { rocksdb: None, l1_batch_number: L1BatchNumber(0), @@ -263,9 +261,7 @@ impl StorageSyncTask { state: Arc>, ) -> anyhow::Result { let mut conn = pool.connection_tagged(io.name()).await?; - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut conn) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) .await .context("Failed initializing L1 batch params provider")?; let target_l1_batch_number = io.latest_processed_batch(&mut conn).await?; @@ -398,29 +394,20 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let first_l2_block_in_batch = l1_batch_params_provider - .load_first_l2_block_in_batch(conn, l1_batch_number) - .await - .with_context(|| { - format!( - "Failed loading first L2 block for L1 batch #{}", - l1_batch_number - ) - })?; - let Some(first_l2_block_in_batch) = first_l2_block_in_batch else { - return Ok(None); - }; - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_params( + let Some((system_env, l1_batch_env)) = l1_batch_params_provider + .load_l1_batch_env( conn, - &first_l2_block_in_batch, + l1_batch_number, // `validation_computational_gas_limit` is only relevant when rejecting txs, but we // are re-executing so none of them should be rejected u32::MAX, chain_id, ) - .await - .with_context(|| format!("Failed loading params for L1 batch #{}", l1_batch_number))?; + .await? + else { + return Ok(None); + }; + let l2_blocks = conn .transactions_dal() .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 53001640814..53bef106a8f 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -39,7 +39,7 @@ struct IoMock { } #[async_trait] -impl VmRunnerIo for Arc> { +impl VmRunnerIo for RwLock { fn name(&self) -> &'static str { "io_mock" } @@ -153,7 +153,7 @@ struct TestOutputFactory { #[async_trait] impl OutputHandlerFactory for TestOutputFactory { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { @@ -202,7 +202,7 @@ pub fn create_l2_transaction( }; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], diff --git a/core/node/vm_runner/src/tests/playground.rs b/core/node/vm_runner/src/tests/playground.rs index aaaf4b45b1a..92cd149f405 100644 --- a/core/node/vm_runner/src/tests/playground.rs +++ b/core/node/vm_runner/src/tests/playground.rs @@ -74,6 +74,7 @@ async fn run_playground( let (playground, playground_tasks) = VmPlayground::new( pool.clone(), + None, FastVmMode::Shadow, storage, genesis_params.config().l2_chain_id, @@ -255,6 +256,7 @@ async fn using_larger_window_size(window_size: u32) { }; let (playground, playground_tasks) = VmPlayground::new( pool.clone(), + None, FastVmMode::Shadow, VmPlaygroundStorageOptions::from(&rocksdb_dir), genesis_params.config().l2_chain_id, diff --git a/core/node/vm_runner/src/tests/process.rs b/core/node/vm_runner/src/tests/process.rs index e010b81f666..ab6d1703a84 100644 --- a/core/node/vm_runner/src/tests/process.rs +++ b/core/node/vm_runner/src/tests/process.rs @@ -53,16 +53,16 @@ // let output_stop_receiver = stop_receiver.clone(); // tokio::task::spawn(async move { task.run(output_stop_receiver).await.unwrap() }); -// let storage = Arc::new(storage); -// let batch_executor = MainBatchExecutorFactory::new(false, false); -// let vm_runner = VmRunner::new( -// connection_pool, -// Box::new(io.clone()), -// storage, -// Box::new(output_factory), -// Box::new(batch_executor), -// ); -// tokio::task::spawn(async move { vm_runner.run(&stop_receiver).await.unwrap() }); +// let storage = Arc::new(storage); +// let batch_executor = MainBatchExecutorFactory::new(false, false); +// let vm_runner = VmRunner::new( +// connection_pool, +// io.clone(), +// storage, +// Arc::new(output_factory), +// Box::new(batch_executor), +// ); +// tokio::task::spawn(async move { vm_runner.run(&stop_receiver).await.unwrap() }); // wait::for_batch_progressively(io, L1BatchNumber(batch_count), TEST_TIMEOUT).await?; // Ok(()) diff --git a/core/node/vm_runner/src/tests/storage_writer.rs b/core/node/vm_runner/src/tests/storage_writer.rs index 76d0867125a..c377cf95b5a 100644 --- a/core/node/vm_runner/src/tests/storage_writer.rs +++ b/core/node/vm_runner/src/tests/storage_writer.rs @@ -57,6 +57,8 @@ impl VmRunnerIo for StorageWriterIo { l1_batch_number: L1BatchNumber, ) -> anyhow::Result<()> { assert_eq!(l1_batch_number, self.batch() + 1); + // ^ The assertion works because of `last_ready_to_be_loaded_batch()` implementation; it wouldn't hold if we allowed + // to process multiple batches concurrently. Ok(()) } @@ -147,7 +149,7 @@ impl OutputHandler for StorageWriterIo { #[async_trait] impl OutputHandlerFactory for StorageWriterIo { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { @@ -167,7 +169,7 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool, insert_protec .unwrap() .expect("No L1 batches in storage"); drop(conn); - let io = Box::new(StorageWriterIo { + let io = Arc::new(StorageWriterIo { last_processed_batch: Arc::new(watch::channel(L1BatchNumber(0)).0), last_processed_block: L2BlockNumber(0), pool: pool.clone(), @@ -240,9 +242,9 @@ async fn storage_writer_works(insert_protective_reads: bool) { let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, - Box::new(io.clone()), + io.clone(), loader, - Box::new(output_factory), + Arc::new(output_factory), Box::new(batch_executor), ); diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index 161d156a53e..67e877ae8ef 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -145,7 +145,7 @@ where let mut factory_deps = self.factory_deps.clone().unwrap_or_default(); factory_deps.push(bytecode); let l2_tx = L2Tx::new( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), Execute::encode_deploy_params_create(Default::default(), main_contract_hash, calldata), Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/execute_contract.rs b/core/tests/loadnext/src/sdk/operations/execute_contract.rs index d5fe57c7b79..627e889ed01 100644 --- a/core/tests/loadnext/src/sdk/operations/execute_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/execute_contract.rs @@ -144,7 +144,7 @@ where .unwrap_or_default(); let execute = L2Tx::new( - contract_address, + Some(contract_address), calldata, Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/transfer.rs b/core/tests/loadnext/src/sdk/operations/transfer.rs index 94ee3aeb608..651fabeb788 100644 --- a/core/tests/loadnext/src/sdk/operations/transfer.rs +++ b/core/tests/loadnext/src/sdk/operations/transfer.rs @@ -153,7 +153,7 @@ where let tx = if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { // ETH estimate Execute { - contract_address: to, + contract_address: Some(to), calldata: Default::default(), factory_deps: vec![], value: amount, @@ -161,7 +161,7 @@ where } else { // ERC-20 estimate Execute { - contract_address: token, + contract_address: Some(token), calldata: create_transfer_calldata(to, amount), factory_deps: vec![], value: Default::default(), diff --git a/core/tests/loadnext/src/sdk/signer.rs b/core/tests/loadnext/src/sdk/signer.rs index 0f4b1cf2971..6f98f674ed9 100644 --- a/core/tests/loadnext/src/sdk/signer.rs +++ b/core/tests/loadnext/src/sdk/signer.rs @@ -51,7 +51,7 @@ impl Signer { // Sign Ether transfer if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { let mut transfer = L2Tx::new( - to, + Some(to), Default::default(), nonce, fee, @@ -73,7 +73,7 @@ impl Signer { // Sign ERC-20 transfer let data = create_transfer_calldata(to, amount); let mut transfer = L2Tx::new( - token, + Some(token), data, nonce, fee, @@ -122,7 +122,7 @@ impl Signer { paymaster_params: PaymasterParams, ) -> Result { let mut execute_contract = L2Tx::new( - contract, + Some(contract), calldata, nonce, fee, diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 6599e7c5d29..462404af606 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -271,7 +271,7 @@ export class FundedWallet { await depositTx.waitFinalize(); } - /** Generates at least one L1 batch by transfering funds to itself. */ + /** Generates at least one L1 batch by transferring funds to itself. */ async generateL1Batch(): Promise { const transactionResponse = await this.wallet.transfer({ to: this.wallet.address, @@ -279,15 +279,15 @@ export class FundedWallet { token: zksync.utils.ETH_ADDRESS }); console.log('Generated a transaction from funded wallet', transactionResponse); - const receipt = await transactionResponse.wait(); - console.log('Got finalized transaction receipt', receipt); - // Wait until an L1 batch with the transaction is sealed. - const pastL1BatchNumber = await this.wallet.provider.getL1BatchNumber(); - let newL1BatchNumber: number; - while ((newL1BatchNumber = await this.wallet.provider.getL1BatchNumber()) <= pastL1BatchNumber) { + let receipt: zksync.types.TransactionReceipt; + while (!(receipt = await transactionResponse.wait()).l1BatchNumber) { + console.log('Transaction is not included in L1 batch; sleeping'); await sleep(1000); } + + console.log('Got finalized transaction receipt', receipt); + const newL1BatchNumber = receipt.l1BatchNumber; console.log(`Sealed L1 batch #${newL1BatchNumber}`); return newL1BatchNumber; } diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 28e3d609e63..d0c97abab72 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -129,7 +129,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps, value: U256::zero(), @@ -158,7 +158,7 @@ impl Account { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), - to: address_to_u256(&execute.contract_address), + to: address_to_u256(&execute.contract_address.unwrap_or_default()), gas_limit, gas_per_pubdata_byte_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), max_fee_per_gas, @@ -216,7 +216,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: value.unwrap_or_default(), factory_deps: vec![], @@ -235,7 +235,7 @@ impl Account { ) -> Transaction { let calldata = params.to_bytes(); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: U256::zero(), factory_deps: vec![], diff --git a/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm b/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm index bdfb4e70de7..4cdf8542cab 100644 --- a/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm +++ b/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm @@ -1,26 +1,25 @@ .text - .file "loop.yul" - .globl __entry + .file "loop.yul" + .globl __entry __entry: .func_begin0: - and! 1, r2, r1 - jump.ne @.BB0_2 + and! 1, r2, r1 + jump.ne @.BB0_2 __LOOP: - near_call r0, @__LOOP, @__LOOP + call r0, @__LOOP, @__LOOP .BB0_1: - add 1, r0, r2 - ret + add 1, r0, r2 + ret .BB0_2: - add 32, r0, r1 - st.2 256, r1 - st.2 288, r0 - add @CPI0_0[0], r0, r1 - ret.ok.to_label r1, @DEFAULT_FAR_RETURN + add 32, r0, r1 + stm.ah 256, r1 + stm.ah 288, r0 + add code[@CPI0_0], r0, r1 + retl r1, @DEFAULT_FAR_RETURN .func_end0: - -.func_end1: - - .note.GNU-stack - .rodata +;; landing pad for returns +DEFAULT_FAR_RETURN: + retl @DEFAULT_FAR_RETURN + .rodata CPI0_0: - .cell 53919893334301279589334030174039261352344891250716429051063678533632 \ No newline at end of file + .cell 53919893334301279589334030174039261352344891250716429051063678533632 diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 00abe2b32ef..a96a83ca3ee 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -4,14 +4,14 @@ import '@matterlabs/hardhat-zksync-vyper'; export default { zksolc: { - version: '1.3.21', + version: '1.5.3', compilerSource: 'binary', settings: { - isSystem: true + enableEraVMExtensions: true } }, zkvyper: { - version: '1.3.13', + version: '1.5.4', compilerSource: 'binary' }, networks: { @@ -20,7 +20,11 @@ export default { } }, solidity: { - version: '0.8.23' + version: '0.8.26', + eraVersion: '1.0.1', + settings: { + evmVersion: 'cancun' + } }, vyper: { version: '0.3.10' diff --git a/core/tests/ts-integration/jest.config.json b/core/tests/ts-integration/jest.config.json index 8fa5ea1eb72..1756de1bb02 100644 --- a/core/tests/ts-integration/jest.config.json +++ b/core/tests/ts-integration/jest.config.json @@ -14,6 +14,7 @@ "testTimeout": 605000, "globalSetup": "/src/jest-setup/global-setup.ts", "globalTeardown": "/src/jest-setup/global-teardown.ts", + "testEnvironment": "/src/jest-setup/env.ts", "setupFilesAfterEnv": [ "/src/jest-setup/add-matchers.ts" ], diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 097cc3a3e98..624f10a4482 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -13,9 +13,9 @@ "build-yul": "hardhat run scripts/compile-yul.ts" }, "devDependencies": { - "@matterlabs/hardhat-zksync-deploy": "^1.3.0", - "@matterlabs/hardhat-zksync-solc": "^1.1.4", - "@matterlabs/hardhat-zksync-vyper": "^1.0.8", + "@matterlabs/hardhat-zksync-deploy": "^1.5.0", + "@matterlabs/hardhat-zksync-solc": "^1.2.4", + "@matterlabs/hardhat-zksync-vyper": "^1.1.0", "@nomiclabs/hardhat-vyper": "^3.0.6", "@types/jest": "^29.0.3", "@types/node": "^18.19.15", @@ -25,6 +25,7 @@ "ethers": "^6.7.1", "hardhat": "=2.22.2", "jest": "^29.0.3", + "jest-environment-node": "^29.0.3", "jest-matcher-utils": "^29.0.3", "node-fetch": "^2.6.1", "ts-jest": "^29.0.1", diff --git a/core/tests/ts-integration/scripts/compile-yul.ts b/core/tests/ts-integration/scripts/compile-yul.ts index dda65456a6c..876caacdfab 100644 --- a/core/tests/ts-integration/scripts/compile-yul.ts +++ b/core/tests/ts-integration/scripts/compile-yul.ts @@ -7,7 +7,7 @@ import { getZksolcUrl, saltFromUrl } from '@matterlabs/hardhat-zksync-solc'; import { getCompilersDir } from 'hardhat/internal/util/global-dir'; import path from 'path'; -const COMPILER_VERSION = '1.3.21'; +const COMPILER_VERSION = '1.5.3'; const IS_COMPILER_PRE_RELEASE = false; async function compilerLocation(): Promise { @@ -48,18 +48,24 @@ export async function compile( } let paths = preparePaths(pathToHome, path, files, outputDirName); - let systemMode = type === 'yul' ? '--system-mode --optimization 3' : ''; + let eraVmExtensions = type === 'yul' ? '--enable-eravm-extensions --optimization 3' : ''; const zksolcLocation = await compilerLocation(); await spawn( - `${zksolcLocation} ${paths.absolutePathSources}/${paths.outputDir} ${systemMode} --${type} --bin --overwrite -o ${paths.absolutePathArtifacts}/${paths.outputDir}` + `${zksolcLocation} ${paths.absolutePathSources}/${paths.outputDir} ${eraVmExtensions} --${type} --bin --overwrite -o ${paths.absolutePathArtifacts}/${paths.outputDir}` ); } export async function compileFolder(pathToHome: string, path: string, type: string) { + let compilationMode; + if (type === 'zkasm') { + compilationMode = 'eravm-assembly'; + } else { + compilationMode = type; + } let files: string[] = (await fs.promises.readdir(path)).filter((fn) => fn.endsWith(`.${type}`)); for (const file of files) { - await compile(pathToHome, path, [file], `${file}`, type); + await compile(pathToHome, path, [file], `${file}`, compilationMode); } } diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index efce638e6f5..3c3c1d0802f 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -398,7 +398,7 @@ export class TestContextOwner { gasPrice }, // specify gas limit manually, until EVM-554 is fixed - l2GasLimit: 1000000 + l2GasLimit: 2000000 }) .then(async (tx) => { const amount = ethers.formatEther(l2ETHAmountToDeposit); diff --git a/core/tests/ts-integration/src/helpers.ts b/core/tests/ts-integration/src/helpers.ts index f0d1c541772..354dfe64fdf 100644 --- a/core/tests/ts-integration/src/helpers.ts +++ b/core/tests/ts-integration/src/helpers.ts @@ -101,6 +101,38 @@ export async function waitUntilBlockFinalized(wallet: zksync.Wallet, blockNumber } } +async function getL1BatchFinalizationStatus(provider: zksync.Provider, number: number) { + const result = await provider.send('zks_getL1ProcessingDetails', [number]); + + if (result == null) { + return null; + } + if (result.executedAt != null) { + return { + finalizedHash: result.executeTxHash, + finalizedAt: result.executedAt + }; + } + return null; +} + +export async function waitForBlockToBeFinalizedOnL1(wallet: zksync.Wallet, blockNumber: number) { + // Waiting for the block to be finalized on the immediate settlement layer. + await waitUntilBlockFinalized(wallet, blockNumber); + + const provider = wallet.provider; + + const batchNumber = (await provider.getBlockDetails(blockNumber)).l1BatchNumber; + + let result = await getL1BatchFinalizationStatus(provider, batchNumber); + + while (result == null) { + await zksync.utils.sleep(provider.pollingInterval); + + result = await getL1BatchFinalizationStatus(provider, batchNumber); + } +} + /** * Returns an increased gas price to decrease chances of L1 transactions being stuck * diff --git a/core/tests/ts-integration/src/jest-setup/env.ts b/core/tests/ts-integration/src/jest-setup/env.ts new file mode 100644 index 00000000000..77bbfc92911 --- /dev/null +++ b/core/tests/ts-integration/src/jest-setup/env.ts @@ -0,0 +1,14 @@ +import NodeEnvironment from 'jest-environment-node'; +import type { EnvironmentContext, JestEnvironmentConfig } from '@jest/environment'; + +export default class IntegrationTestEnvironment extends NodeEnvironment { + constructor(config: JestEnvironmentConfig, context: EnvironmentContext) { + super(config, context); + } + + override async setup() { + await super.setup(); + // Provide access to raw console in order to produce less cluttered debug messages + this.global.rawWriteToConsole = console.log; + } +} diff --git a/core/tests/ts-integration/src/jest-setup/global-setup.ts b/core/tests/ts-integration/src/jest-setup/global-setup.ts index d84d70fe69d..ffb1a8c3503 100644 --- a/core/tests/ts-integration/src/jest-setup/global-setup.ts +++ b/core/tests/ts-integration/src/jest-setup/global-setup.ts @@ -11,11 +11,12 @@ declare global { */ async function performSetup(_globalConfig: any, _projectConfig: any) { // Perform the test initialization. - // This is an expensive operation that preceeds running any tests, as we need + // This is an expensive operation that precedes running any tests, as we need // to deposit & distribute funds, deploy some contracts, and perform basic server checks. // Jest writes an initial message without a newline, so we have to do it manually. console.log(''); + globalThis.rawWriteToConsole = console.log; // Before starting any actual logic, we need to ensure that the server is running (it may not // be the case, for example, right after deployment on stage). diff --git a/core/tests/ts-integration/src/l1-provider.ts b/core/tests/ts-integration/src/l1-provider.ts new file mode 100644 index 00000000000..39b0397cd06 --- /dev/null +++ b/core/tests/ts-integration/src/l1-provider.ts @@ -0,0 +1,82 @@ +import { + ethers, + JsonRpcProvider, + Network, + TransactionRequest, + TransactionResponse, + TransactionResponseParams +} from 'ethers'; +import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; + +export class L1Provider extends JsonRpcProvider { + readonly reporter: Reporter; + + constructor(url: string, reporter?: Reporter) { + super(url, undefined, { batchMaxCount: 1 }); + this.reporter = reporter ?? new Reporter(); + } + + override _wrapTransactionResponse(tx: TransactionResponseParams, network: Network): L1TransactionResponse { + const base = super._wrapTransactionResponse(tx, network); + return new L1TransactionResponse(base, this.reporter); + } +} + +class L1TransactionResponse extends ethers.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L1'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: ethers.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number, timeout?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L1 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + + const receipt = await super.wait(confirmations, timeout); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L1 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L1TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L1TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries `sendTransaction` requests on "nonce expired" errors, provided that it's possible (i.e., no nonce is set in the request). */ +export class RetryableL1Wallet extends ethers.Wallet { + constructor(key: string, provider: L1Provider) { + super(key, provider); + } + + override async sendTransaction(tx: TransactionRequest): Promise { + const reporter = (this.provider!).reporter; + while (true) { + try { + return await super.sendTransaction(tx); + } catch (err: any) { + // For unknown reason, `reth` sometimes returns outdated transaction count under load, leading to transactions getting rejected. + // This is a workaround for this issue. + reporter.debug('L1 transaction request failed', tx, err); + if (err.code === 'NONCE_EXPIRED' && (tx.nonce === null || tx.nonce === undefined)) { + reporter.debug('Retrying L1 transaction request', tx); + } else { + throw err; + } + } + } + } +} diff --git a/core/tests/ts-integration/src/matchers/transaction.ts b/core/tests/ts-integration/src/matchers/transaction.ts index 89e90b6d5f1..ac5bf8e77ea 100644 --- a/core/tests/ts-integration/src/matchers/transaction.ts +++ b/core/tests/ts-integration/src/matchers/transaction.ts @@ -1,7 +1,8 @@ import { TestMessage } from './matcher-helpers'; import { MatcherModifier } from '../modifiers'; import * as zksync from 'zksync-ethers'; -import { AugmentedTransactionResponse } from '../retry-provider'; +import { AugmentedTransactionResponse } from '../transaction-response'; +import { ethers } from 'ethers'; // This file contains implementation of matchers for ZKsync/ethereum transaction. // For actual doc-comments, see `typings/jest.d.ts` file. @@ -207,7 +208,7 @@ function fail(message: string) { * * @returns If check has failed, returns a Jest error object. Otherwise, returns `undefined`. */ -function checkReceiptFields(request: zksync.types.TransactionResponse, receipt: zksync.types.TransactionReceipt) { +function checkReceiptFields(request: ethers.TransactionResponseParams, receipt: zksync.types.TransactionReceipt) { const errorMessageBuilder = new TestMessage() .matcherHint('.checkReceiptFields') .line('Transaction receipt is not properly formatted. Transaction request:') diff --git a/core/tests/ts-integration/src/modifiers/balance-checker.ts b/core/tests/ts-integration/src/modifiers/balance-checker.ts index bdf04db0598..12e2c70c53d 100644 --- a/core/tests/ts-integration/src/modifiers/balance-checker.ts +++ b/core/tests/ts-integration/src/modifiers/balance-checker.ts @@ -53,7 +53,8 @@ export async function shouldChangeTokenBalances( ): Promise { return await ShouldChangeBalance.create(token, balanceChanges, { noAutoFeeCheck: true, - l1: params?.l1 ?? false + l1: params?.l1 ?? false, + ignoreUndeployedToken: params?.ignoreUndeployedToken ?? false }); } @@ -80,6 +81,7 @@ export interface Params { noAutoFeeCheck?: boolean; l1?: boolean; l1ToL2?: boolean; + ignoreUndeployedToken?: boolean; } /** @@ -114,7 +116,7 @@ class ShouldChangeBalance extends MatcherModifier { for (const entry of balanceChanges) { const wallet = entry.wallet; const address = entry.addressToCheck ?? entry.wallet.address; - const initialBalance = await getBalance(l1, wallet, address, token); + const initialBalance = await getBalance(l1, wallet, address, token, params?.ignoreUndeployedToken); populatedBalanceChanges.push({ wallet: entry.wallet, change: entry.change, @@ -282,11 +284,21 @@ function extractRefundForL1ToL2(receipt: zksync.types.TransactionReceipt, refund * @param token Address of the token * @returns Token balance */ -async function getBalance(l1: boolean, wallet: zksync.Wallet, address: string, token: string): Promise { +async function getBalance( + l1: boolean, + wallet: zksync.Wallet, + address: string, + token: string, + ignoreUndeployedToken?: boolean +): Promise { const provider = l1 ? wallet.providerL1! : wallet.provider; if (zksync.utils.isETH(token)) { return await provider.getBalance(address); } else { + if (ignoreUndeployedToken && (await provider.getCode(token)) === '0x') { + return 0n; + } + const erc20contract = IERC20Factory.connect(token, provider); return await erc20contract.balanceOf(address); } diff --git a/core/tests/ts-integration/src/reporter.ts b/core/tests/ts-integration/src/reporter.ts index 903ff3101ef..e6a11f0725b 100644 --- a/core/tests/ts-integration/src/reporter.ts +++ b/core/tests/ts-integration/src/reporter.ts @@ -102,7 +102,7 @@ export class Reporter { // Timestamps only make sense to include in tests. const timestampString = testName === undefined ? '' : timestamp(`${new Date().toISOString()} `); const testString = testName ? info(` [${testName}]`) : ''; - console.log(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); + rawWriteToConsole(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); } } diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index 1763c0e4edf..51d88357c6c 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -1,12 +1,15 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; +import { L1Provider, RetryableL1Wallet } from './l1-provider'; /** * RetryProvider retries every RPC request if it detects a timeout-related issue on the server side. */ export class RetryProvider extends zksync.Provider { private readonly reporter: Reporter; + private readonly knownTransactionHashes: Set = new Set(); constructor(_url?: string | { url: string; timeout: number }, network?: ethers.Networkish, reporter?: Reporter) { let url; @@ -55,15 +58,63 @@ export class RetryProvider extends zksync.Provider { } } + override _wrapTransactionResponse(txResponse: any): L2TransactionResponse { + const base = super._wrapTransactionResponse(txResponse); + this.knownTransactionHashes.add(base.hash); + return new L2TransactionResponse(base, this.reporter); + } + override _wrapTransactionReceipt(receipt: any): zksync.types.TransactionReceipt { const wrapped = super._wrapTransactionReceipt(receipt); - this.reporter.debug( - `Obtained receipt for transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` - ); + if (!this.knownTransactionHashes.has(receipt.transactionHash)) { + this.knownTransactionHashes.add(receipt.transactionHash); + this.reporter.debug( + `Obtained receipt for L2 transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + } return wrapped; } } -export interface AugmentedTransactionResponse extends zksync.types.TransactionResponse { - readonly reporter?: Reporter; +class L2TransactionResponse extends zksync.types.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L2'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: zksync.types.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L2 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + const receipt = await super.wait(confirmations); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L2 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L2TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L2TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries expired nonce errors for L1 transactions. */ +export class RetryableWallet extends zksync.Wallet { + constructor(privateKey: string, l2Provider: RetryProvider, l1Provider: L1Provider) { + super(privateKey, l2Provider, l1Provider); + } + + override ethWallet(): RetryableL1Wallet { + return new RetryableL1Wallet(this.privateKey, this._providerL1()); + } } diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index b63c9f3c231..d2be3237a7f 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -2,10 +2,11 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { TestEnvironment, TestContext } from './types'; import { claimEtherBack } from './context-owner'; -import { RetryProvider } from './retry-provider'; +import { RetryableWallet, RetryProvider } from './retry-provider'; import { Reporter } from './reporter'; import { isNetworkLocal } from 'utils'; import { bigIntReviver } from './helpers'; +import { L1Provider } from './l1-provider'; /** * Test master is a singleton class (per suite) that is capable of providing wallets to the suite. @@ -20,8 +21,8 @@ export class TestMaster { private readonly env: TestEnvironment; readonly reporter: Reporter; - private readonly l1Provider: ethers.JsonRpcProvider; - private readonly l2Provider: zksync.Provider; + private readonly l1Provider: L1Provider; + private readonly l2Provider: RetryProvider; private readonly mainWallet: zksync.Wallet; private readonly subAccounts: zksync.Wallet[] = []; @@ -53,7 +54,7 @@ export class TestMaster { if (!suiteWalletPK) { throw new Error(`Wallet for ${suiteName} suite was not provided`); } - this.l1Provider = new ethers.JsonRpcProvider(this.env.l1NodeUrl); + this.l1Provider = new L1Provider(this.env.l1NodeUrl, this.reporter); this.l2Provider = new RetryProvider( { url: this.env.l2NodeUrl, @@ -72,7 +73,7 @@ export class TestMaster { this.l2Provider.pollingInterval = 5000; } - this.mainWallet = new zksync.Wallet(suiteWalletPK, this.l2Provider, this.l1Provider); + this.mainWallet = new RetryableWallet(suiteWalletPK, this.l2Provider, this.l1Provider); } /** @@ -113,7 +114,7 @@ export class TestMaster { */ newEmptyAccount(): zksync.Wallet { const randomPK = ethers.Wallet.createRandom().privateKey; - const newWallet = new zksync.Wallet(randomPK, this.l2Provider, this.l1Provider); + const newWallet = new RetryableWallet(randomPK, this.l2Provider, this.l1Provider); this.subAccounts.push(newWallet); return newWallet; } diff --git a/core/tests/ts-integration/src/transaction-response.ts b/core/tests/ts-integration/src/transaction-response.ts new file mode 100644 index 00000000000..a104b0107ed --- /dev/null +++ b/core/tests/ts-integration/src/transaction-response.ts @@ -0,0 +1,9 @@ +import { ethers } from 'ethers'; +import { Reporter } from './reporter'; + +export interface AugmentedTransactionResponse extends ethers.TransactionResponseParams { + readonly kind: 'L1' | 'L2'; + readonly reporter?: Reporter; + + wait(confirmations?: number, timeout?: number): Promise; +} diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index 7202898c4c6..519991ea9dc 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -10,11 +10,11 @@ import { NodeMode } from '../../src/types'; // Regular expression to match ISO dates. const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; -const ZKSOLC_VERSION = 'v1.3.21'; -const SOLC_VERSION = '0.8.23'; -const ZK_VM_SOLC_VERSION = 'zkVM-0.8.23-1.0.0'; +const ZKSOLC_VERSION = 'v1.5.3'; +const SOLC_VERSION = '0.8.26'; +const ZK_VM_SOLC_VERSION = 'zkVM-0.8.26-1.0.1'; -const ZKVYPER_VERSION = 'v1.3.13'; +const ZKVYPER_VERSION = 'v1.5.4'; const VYPER_VERSION = '0.3.10'; type HttpMethod = 'POST' | 'GET'; @@ -54,32 +54,6 @@ describe.skip('Tests for the contract verification API', () => { const counterContract = await deployContract(alice, contracts.counter, []); const constructorArguments = counterContract.interface.encodeDeploy([]); - const requestBody = { - contractAddress: await counterContract.getAddress(), - contractName: 'contracts/counter/counter.sol:Counter', - sourceCode: getContractSource('counter/counter.sol'), - compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, - optimizationUsed: true, - constructorArguments, - isSystem: true - }; - let requestId = await query('POST', '/contract_verification', undefined, requestBody); - - await expectVerifyRequestToSucceed(requestId, requestBody); - }); - - test('should test zkVM solc contract verification', async () => { - let artifact = contracts.counter; - // TODO: use plugin compilation when it's ready instead of pre-compiled bytecode. - artifact.bytecode = fs.readFileSync( - `${testMaster.environment().pathToHome}/core/tests/ts-integration/contracts/counter/zkVM_bytecode.txt`, - 'utf8' - ); - - const counterContract = await deployContract(alice, artifact, []); - const constructorArguments = counterContract.interface.encodeDeploy([]); - const requestBody = { contractAddress: await counterContract.getAddress(), contractName: 'contracts/counter/counter.sol:Counter', @@ -127,7 +101,7 @@ describe.skip('Tests for the contract verification API', () => { sourceCode: standardJsonInput, codeFormat: 'solidity-standard-json-input', compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, + compilerSolcVersion: ZK_VM_SOLC_VERSION, optimizationUsed: true, constructorArguments }; @@ -144,8 +118,8 @@ describe.skip('Tests for the contract verification API', () => { const bytecodePath = `${ testMaster.environment().pathToHome - }/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/Empty.yul.zbin`; - const bytecode = fs.readFileSync(bytecodePath); + }/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/yul/Empty.yul.zbin`; + const bytecode = fs.readFileSync(bytecodePath, 'utf8'); const contractFactory = new zksync.ContractFactory([], bytecode, alice); const deployTx = await contractFactory.deploy(); @@ -157,7 +131,7 @@ describe.skip('Tests for the contract verification API', () => { sourceCode, codeFormat: 'yul-single-file', compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, + compilerSolcVersion: ZK_VM_SOLC_VERSION, optimizationUsed: true, constructorArguments: '0x', isSystem: true diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index dd1ea141a41..054aa57cf64 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -29,8 +29,8 @@ describe('Debug methods', () => { test('Should not fail for infinity recursion', async () => { const bytecodePath = `${ testMaster.environment().pathToHome - }/core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin`; - const bytecode = fs.readFileSync(bytecodePath); + }/core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/zkasm/deep_stak.zkasm.zbin`; + const bytecode = fs.readFileSync(bytecodePath, 'utf-8'); const contractFactory = new zksync.ContractFactory([], bytecode, testMaster.mainAccount()); const deployTx = await contractFactory.deploy(); diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 70df1dff928..7f7974205dc 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -7,7 +7,7 @@ import { Token } from '../src/types'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { scaledGasPrice } from '../src/helpers'; +import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; const SECONDS = 1000; jest.setTimeout(100 * SECONDS); @@ -168,7 +168,8 @@ describe('base ERC20 contract checks', () => { const withdrawalPromise = alice.withdraw({ token: baseTokenDetails.l2Address, amount }); await expect(withdrawalPromise).toBeAccepted([]); const withdrawalTx = await withdrawalPromise; - await withdrawalTx.waitFinalize(); + const l2Receipt = await withdrawalTx.wait(); + await waitForBlockToBeFinalizedOnL1(alice, l2Receipt!.blockNumber); await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted([]); const receipt = await alice._providerL2().getTransactionReceipt(withdrawalTx.hash); diff --git a/core/tests/ts-integration/tests/erc20.test.ts b/core/tests/ts-integration/tests/erc20.test.ts index 659d0368844..382c625ac70 100644 --- a/core/tests/ts-integration/tests/erc20.test.ts +++ b/core/tests/ts-integration/tests/erc20.test.ts @@ -8,9 +8,8 @@ import { shouldChangeTokenBalances, shouldOnlyTakeFee } from '../src/modifiers/b import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { scaledGasPrice, waitUntilBlockFinalized } from '../src/helpers'; +import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; import { L2_DEFAULT_ETH_PER_ACCOUNT } from '../src/context-owner'; -import { sleep } from 'zksync-ethers/build/utils'; describe('L1 ERC20 contract checks', () => { let testMaster: TestMaster; @@ -176,8 +175,7 @@ describe('L1 ERC20 contract checks', () => { await expect(withdrawalPromise).toBeAccepted([l2BalanceChange, feeCheck]); const withdrawalTx = await withdrawalPromise; const l2TxReceipt = await alice.provider.getTransactionReceipt(withdrawalTx.hash); - await waitUntilBlockFinalized(alice, l2TxReceipt!.blockNumber); - // await withdrawalTx.waitFinalize(); + await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); // Note: For L1 we should use L1 token address. const l1BalanceChange = await shouldChangeTokenBalances( @@ -187,7 +185,6 @@ describe('L1 ERC20 contract checks', () => { l1: true } ); - await sleep(60000); await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted([l1BalanceChange]); }); @@ -219,8 +216,7 @@ describe('L1 ERC20 contract checks', () => { // It throws once it gets status == 0 in the receipt and doesn't wait for the finalization. const l2Hash = zksync.utils.getL2HashFromPriorityOp(l1Receipt, await alice.provider.getMainContractAddress()); const l2TxReceipt = await alice.provider.getTransactionReceipt(l2Hash); - await waitUntilBlockFinalized(alice, l2TxReceipt!.blockNumber); - await sleep(35000); + await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); // Claim failed deposit. await expect(alice.claimFailedDeposit(l2Hash)).toBeAccepted(); await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.toEqual(initialBalance); diff --git a/core/tests/ts-integration/tests/ether.test.ts b/core/tests/ts-integration/tests/ether.test.ts index 5bb1138654d..abc9237025a 100644 --- a/core/tests/ts-integration/tests/ether.test.ts +++ b/core/tests/ts-integration/tests/ether.test.ts @@ -11,8 +11,7 @@ import { import { checkReceipt } from '../src/modifiers/receipt-check'; import * as zksync from 'zksync-ethers'; -import { scaledGasPrice, waitUntilBlockFinalized } from '../src/helpers'; -import { sleep } from 'utils'; +import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; import { ethers } from 'ethers'; describe('ETH token checks', () => { @@ -50,7 +49,9 @@ describe('ETH token checks', () => { ? await shouldChangeETHBalances([{ wallet: alice, change: amount }], { l1ToL2: true }) - : await shouldChangeTokenBalances(l2EthTokenAddressNonBase, [{ wallet: alice, change: amount }]); + : await shouldChangeTokenBalances(l2EthTokenAddressNonBase, [{ wallet: alice, change: amount }], { + ignoreUndeployedToken: true + }); // Variables used only for base token implementation const l1BaseTokenBalanceBefore = await alice.getBalanceL1(baseTokenAddress); @@ -254,10 +255,7 @@ describe('ETH token checks', () => { await expect(withdrawalPromise).toBeAccepted([l2ethBalanceChange]); const withdrawalTx = await withdrawalPromise; const l2TxReceipt = await alice.provider.getTransactionReceipt(withdrawalTx.hash); - await waitUntilBlockFinalized(alice, l2TxReceipt!.blockNumber); - // await withdrawalTx.waitFinalize(); - - await sleep(60); + await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); // TODO (SMA-1374): Enable L1 ETH checks as soon as they're supported. await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted(); diff --git a/core/tests/ts-integration/typings/jest.d.ts b/core/tests/ts-integration/typings/jest.d.ts index 4d8f1c3530c..3bb62732cf7 100644 --- a/core/tests/ts-integration/typings/jest.d.ts +++ b/core/tests/ts-integration/typings/jest.d.ts @@ -1,6 +1,8 @@ import { MatcherModifier } from '../src/matchers/transaction-modifiers'; export declare global { + function rawWriteToConsole(message: string, ...args: any[]); + namespace jest { interface Matchers { // Generic matchers diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs index 90e1c6360b8..d5fedfa4df9 100644 --- a/core/tests/vm-benchmark/src/transaction.rs +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -47,7 +47,7 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> .collect(); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), @@ -76,7 +76,7 @@ fn tx_fee(gas_limit: u32) -> Fee { pub fn get_transfer_tx(nonce: u32) -> Transaction { let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), + Some(PRIVATE_KEY.address()), vec![], // calldata Nonce(nonce), tx_fee(1_000_000), @@ -109,7 +109,7 @@ pub fn get_load_test_deploy_tx() -> Transaction { factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), create_calldata, Nonce(0), tx_fee(100_000_000), @@ -147,7 +147,7 @@ pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> T .expect("cannot encode `execute` inputs"); let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, + Some(*LOAD_TEST_CONTRACT_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 0aeefbf5a19..24662ff6350 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -88,7 +88,7 @@ impl BenchmarkingVmFactory for Fast { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance { - vm_fast::Vm::new(batch_env, system_env, storage) + vm_fast::Vm::custom(batch_env, system_env, storage) } } diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index e0f751130eb..beb54f3ade9 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index f2089446a41..35a0faeb962 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index 35c6c3778f2..f95ae0d5f54 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index 7e1b52f8334..1e3a273ec9a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,7 +13,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config postgres: diff --git a/docker/build-base/Dockerfile b/docker/build-base/Dockerfile index be3c6ddb452..16ea566cef7 100644 --- a/docker/build-base/Dockerfile +++ b/docker/build-base/Dockerfile @@ -13,3 +13,4 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup default nightly-2024-08-01 RUN cargo install sqlx-cli --version 0.8.0 +RUN cargo install sccache --version 0.8.1 diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7ed1906b857..7fcc695bf70 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,14 +1,27 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 jq && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y wget python3 jq && rm -rf /var/lib/apt/lists/* # install zksolc 1.3.x RUN skip_versions="v1.3.12 v1.3.15 v1.3.20" && \ diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index dc989f9ba4e..79b79cbc5f6 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,14 +1,23 @@ # Will work locally only after prior contracts build -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin @@ -18,6 +27,8 @@ COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-co COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ +# TODO Remove once we use foundry inside contracts repo COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ COPY etc/tokens/ /etc/tokens/ diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 45f2ffa51b0..e744787c825 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -1,10 +1,20 @@ # Will work locally only after prior universal setup key download -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ @@ -22,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index de59451fee8..3e631b35156 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,14 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_prover_fri_gateway -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy VK required for proof wrapping COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index ad3ff1ff719..2a680a49c5d 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -1,10 +1,21 @@ -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -21,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile index 25d5dcd3af9..88b46df27ff 100644 --- a/docker/prover-job-monitor/Dockerfile +++ b/docker/prover-job-monitor/Dockerfile @@ -1,14 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_prover_job_monitor -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_job_monitor /usr/bin/ diff --git a/docker/runtime-base/Dockerfile b/docker/runtime-base/Dockerfile new file mode 100644 index 00000000000..09d920b1c43 --- /dev/null +++ b/docker/runtime-base/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bookworm-slim + +RUN apt-get update && \ + apt-get install -y \ + curl \ + libpq5 \ + ca-certificates \ + && \ + rm -rf /var/lib/apt/lists/* diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index e5d378c3b6d..460ac70c622 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,6 +1,17 @@ # Will work locally only after prior contracts build # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync @@ -8,9 +19,9 @@ COPY . . RUN cargo build --release --features=rocksdb/io-uring -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* EXPOSE 3000 @@ -24,6 +35,8 @@ COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-co COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ +# TODO Remove once we use foundry inside contracts repo COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ COPY etc/tokens/ /etc/tokens/ diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index 10eef06dfbb..2d3c8306498 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -1,14 +1,25 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release --bin snapshots_creator -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/snapshots_creator /usr/bin diff --git a/docker/verified-sources-fetcher/Dockerfile b/docker/verified-sources-fetcher/Dockerfile index 972f85d0faf..87475f3187f 100644 --- a/docker/verified-sources-fetcher/Dockerfile +++ b/docker/verified-sources-fetcher/Dockerfile @@ -1,14 +1,26 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cargo build --release --bin verified_sources_fetcher -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y apt-transport-https ca-certificates gnupg curl git && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +RUN apt-get update && apt-get install -y apt-transport-https gnupg git && rm -rf /var/lib/apt/lists/* RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 2eebe07515e..06d836c9fa5 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,17 +1,26 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive -ARG RUST_FLAGS="" +ARG RUST_FLAGS="--cfg=no_cuda" ENV RUSTFLAGS=${RUST_FLAGS} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_witness_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index e315f670101..eb46b459c69 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,15 +1,26 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +ARG RUST_FLAGS="--cfg=no_cuda" +ENV RUSTFLAGS=${RUST_FLAGS} + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_witness_vector_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy finalization hints required for witness vector generation COPY prover/data/keys/ /prover/data/keys/ diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 3902fdc1556..5eb601e3d59 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -65,12 +65,12 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > [!NOTE] > -> To stop state growth, you can enable state pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, -> you can read more about pruning in +> To stop historical DB growth, you can enable DB pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose +> file, you can read more about pruning in > [08_pruning.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/08_pruning.md) - 32 GB of RAM and a relatively modern CPU -- 30 GB of storage for testnet nodes +- 50 GB of storage for testnet nodes - 300 GB of storage for mainnet nodes - 100 Mbps connection (1 Gbps+ recommended) diff --git a/docs/guides/external-node/09_decentralization.md b/docs/guides/external-node/09_decentralization.md index 41f59486bef..caf93a85a92 100644 --- a/docs/guides/external-node/09_decentralization.md +++ b/docs/guides/external-node/09_decentralization.md @@ -7,85 +7,8 @@ will eventually be used instead of ZKsync API for synchronizing data. On the gossipnet, the data integrity will be protected by the BFT (byzantine fault-tolerant) consensus algorithm (currently data is signed just by the main node though). -## Enabling gossipnet on your node - -> [!NOTE] -> -> Because the data transmitted over the gossipnet is signed by the main node (and eventually by the consensus quorum), -> the signatures need to be backfilled to the node's local storage the first time you switch from centralized (ZKsync -> API based) synchronization to the decentralized (gossipnet based) synchronization (this is a one-time thing). With the -> current implementation it may take a couple of hours and gets faster the more nodes you add to the -> `gossip_static_outbound` list (see below). We are working to remove this inconvenience. - -> [!NOTE] -> -> The minimal supported server version for this is -> [24.11.0](https://github.com/matter-labs/zksync-era/releases/tag/core-v24.11.0) - -### Generating secrets - -Each participant node of the gossipnet has to have an identity (a public/secret key pair). When running your node for -the first time, generate the secrets by running: - -``` -docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v24.12.0" generate-secrets > consensus_secrets.yaml -chmod 600 consensus_secrets.yaml -``` - -> [!NOTE] -> -> NEVER reveal the secret keys used by your node. Otherwise, someone can impersonate your node on the gossipnet. If you -> suspect that your secret key has been leaked, you can generate fresh keys using the same tool. -> -> If you want someone else to connect to your node, give them your PUBLIC key instead. Both public and secret keys are -> present in the `consensus_secrets.yaml` (public keys are in comments). - -### Preparing configuration file - -Copy the template of the consensus configuration file (for -[mainnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml) -or -[testnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml) -). - -> [!NOTE] -> -> You need to fill in the `public_addr` field. This is the address that will (not implemented yet) be advertised over -> gossipnet to other nodes, so that they can establish connections to your node. If you don't want to expose your node -> to the public internet, you can use IP in your local network. - -Currently the config contains the following fields (refer to config -[schema](https://github.com/matter-labs/zksync-era/blob/990676c5f84afd2ff8cd337f495c82e8d1f305a4/core/lib/protobuf_config/src/proto/core/consensus.proto#L66) -for more details): - -- `server_addr` - local TCP socket address that the node should listen on for incoming connections. Note that this is an - additional TCP port that will be opened by the node. -- `public_addr` - the public address of your node that will be advertised over the gossipnet. -- `max_payload_size` - limit (in bytes) on the sized of the ZKsync ERA block received from the gossipnet. This protects - your node from getting DoS`ed by too large network messages. Use the value from the template. -- `gossip_dynamic_inbound_limit` - maximal number of unauthenticated concurrent inbound connections that can be - established to your node. This is a DDoS protection measure. -- `gossip_static_outbound` - list of trusted peers that your node should always try to connect to. The template contains - the nodes maintained by Matterlabs, but you can add more if you know any. Note that the list contains both the network - address AND the public key of the node - this prevents spoofing attacks. - -### Setting environment variables - -Uncomment (or add) the following lines in your `.env` config: - -``` -EN_CONSENSUS_CONFIG_PATH=... -EN_CONSENSUS_SECRETS_PATH=... -``` - -These variables should point to your consensus config and secrets files that we have just created. Tweak the paths to -the files if you have placed them differently. - ### Add `--enable-consensus` flag to your entry point command -For the consensus configuration to take effect you have to add `--enable-consensus` flag to the command line when -running the node, for example: - -``` -docker run "matterlabs/external-node:2.0-v24.12.0" --enable-consensus -``` +For the consensus configuration to take effect you have to add `--enable-consensus` flag when running the node. You can +do that by editing the docker compose files (mainnet-external-node-docker-compose.yml or +testnet-external-node-docker-compose.yml) and uncommenting the line with `--enable-consensus`. diff --git a/docs/guides/external-node/building-from-scratch/Dockerfile b/docs/guides/external-node/building-from-scratch/Dockerfile index da098df91d5..5b015a4545b 100644 --- a/docs/guides/external-node/building-from-scratch/Dockerfile +++ b/docs/guides/external-node/building-from-scratch/Dockerfile @@ -11,8 +11,7 @@ ENV ZKSYNC_HOME=/usr/src/zksync/zksync-era ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" # build zk tool -RUN zk -RUN yarn zk build +RUN zkt # build rust RUN cargo build --release @@ -20,12 +19,7 @@ RUN cp target/release/zksync_external_node /usr/bin # build contracts RUN git submodule update --init --recursive -RUN zk run yarn -RUN zk compiler all || true -RUN rm /root/.cache/hardhat-nodejs/compilers-v2/linux-amd64/solc-*.does.not.work || true -RUN zk compiler all -RUN zk contract build -RUN zk f yarn run l2-contracts build +RUN zk_supervisor contracts # copy migrations (node expects them to be in specific directory) RUN cp -r core/lib/dal/migrations/ migrations diff --git a/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh b/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh new file mode 100755 index 00000000000..e4d8ceed67b --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [ ! -s $1 ]; then + /usr/bin/zksync_external_node generate-secrets > $1 +fi diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml new file mode 100644 index 00000000000..01c9d323a93 --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -0,0 +1,10 @@ +server_addr: '0.0.0.0:3054' +public_addr: '127.0.0.1:3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 +gossip_static_outbound: + # preconfigured ENs owned by Matterlabs that you can connect to + - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' + addr: 'external-node-consensus-mainnet.zksync.dev:3054' + - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' + addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml new file mode 100644 index 00000000000..cfcc6b9d43e --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -0,0 +1,10 @@ +server_addr: '0.0.0.0:3054' +public_addr: '127.0.0.1:3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 +gossip_static_outbound: + # preconfigured ENs owned by Matterlabs that you can connect to + - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' + addr: 'external-node-consensus-sepolia.zksync.dev:3054' + - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' + addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 369ce50be0b..64bef02b17a 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -1,16 +1,16 @@ -version: "3.2" +name: "mainnet-node" services: prometheus: image: prom/prometheus:v2.35.0 volumes: - - mainnet-prometheus-data:/prometheus + - prometheus-data:/prometheus - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml expose: - 9090 grafana: image: grafana/grafana:9.3.6 volumes: - - mainnet-grafana-data:/var/lib/grafana + - grafana-data:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning environment: GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin" @@ -37,7 +37,7 @@ services: expose: - 5430 volumes: - - mainnet-postgres:/var/lib/postgresql/data + - postgres:/var/lib/postgresql/data healthcheck: interval: 1s timeout: 3s @@ -49,17 +49,39 @@ services: environment: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 + # Generation of consensus secrets. + # The secrets are generated iff the secrets file doesn't already exist. + generate-secrets: + image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/configs/generate_secrets.sh", + "/configs/mainnet_consensus_secrets.yaml", + ] + volumes: + - ./configs:/configs external-node: image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/usr/bin/entrypoint.sh", + # Uncomment the following line to enable consensus + # "--enable-consensus", + ] + restart: always depends_on: postgres: condition: service_healthy + generate-secrets: + condition: service_completed_successfully ports: + - "0.0.0.0:3054:3054" # consensus public port - "127.0.0.1:3060:3060" - "127.0.0.1:3061:3061" - "127.0.0.1:3081:3081" volumes: - - mainnet-rocksdb:/db + - rocksdb:/db + - ./configs:/configs expose: - 3322 environment: @@ -83,8 +105,11 @@ services: EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" RUST_LOG: "warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=debug,zksync_utils=debug,zksync_web3_decl::client=error" + EN_CONSENSUS_CONFIG_PATH: "/configs/mainnet_consensus_config.yaml" + EN_CONSENSUS_SECRETS_PATH: "/configs/mainnet_consensus_secrets.yaml" + volumes: - mainnet-postgres: {} - mainnet-rocksdb: {} - mainnet-prometheus-data: {} - mainnet-grafana-data: {} + postgres: {} + rocksdb: {} + prometheus-data: {} + grafana-data: {} diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index 1417c6cc360..f865f500c5b 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -1,16 +1,16 @@ -version: "3.2" +name: "testnet-node" services: prometheus: image: prom/prometheus:v2.35.0 volumes: - - testnet-prometheus-data:/prometheus + - prometheus-data:/prometheus - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml expose: - 9090 grafana: image: grafana/grafana:9.3.6 volumes: - - testnet-grafana-data:/var/lib/grafana + - grafana-data:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning environment: GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin" @@ -37,7 +37,7 @@ services: expose: - 5430 volumes: - - testnet-postgres:/var/lib/postgresql/data + - postgres:/var/lib/postgresql/data healthcheck: interval: 1s timeout: 3s @@ -49,17 +49,39 @@ services: environment: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 + # Generation of consensus secrets. + # The secrets are generated iff the secrets file doesn't already exist. + generate-secrets: + image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/configs/generate_secrets.sh", + "/configs/testnet_consensus_secrets.yaml", + ] + volumes: + - ./configs:/configs external-node: image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/usr/bin/entrypoint.sh", + # Uncomment the following line to enable consensus + # "--enable-consensus", + ] + restart: always depends_on: postgres: condition: service_healthy + generate-secrets: + condition: service_completed_successfully ports: + - "0.0.0.0:3054:3054" # consensus public port - "127.0.0.1:3060:3060" - "127.0.0.1:3061:3061" - "127.0.0.1:3081:3081" volumes: - - testnet-rocksdb:/db + - rocksdb:/db + - ./configs:/configs expose: - 3322 environment: @@ -83,8 +105,11 @@ services: EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" RUST_LOG: "warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=debug,zksync_utils=debug,zksync_web3_decl::client=error" + EN_CONSENSUS_CONFIG_PATH: "/configs/testnet_consensus_config.yaml" + EN_CONSENSUS_SECRETS_PATH: "/configs/testnet_consensus_secrets.yaml" + volumes: - testnet-postgres: {} - testnet-rocksdb: {} - testnet-prometheus-data: {} - testnet-grafana-data: {} + postgres: {} + rocksdb: {} + prometheus-data: {} + grafana-data: {} diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 10eb329628c..7781e65e5bf 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -48,6 +48,10 @@ cargo install sqlx-cli --version 0.8.1 # Foundry curl -L https://foundry.paradigm.xyz | bash foundryup --branch master + +# Non GPU setup, can be skipped if the machine has a GPU configured for provers +echo "export RUSTFLAGS='--cfg=no_cuda'" >> ~/.bashrc + # You will need to reload your `*rc` file here # Clone the repo to the desired location @@ -237,6 +241,28 @@ Go to the zksync folder and run `nix develop`. After it finishes, you are in a s [Foundry](https://book.getfoundry.sh/getting-started/installation) can be utilized for deploying smart contracts. For commands related to deployment, you can pass flags for Foundry integration. +## Non-GPU setup + +Circuit Prover requires a GPU (& CUDA bindings) to run. If you still want to be able to build everything locally on +non-GPU setup, you'll need to change your rustflags. + +For a single run, it's enough to export it on the shell: + +``` +export RUSTFLAGS='--cfg=no_cuda' +``` + +For persistent runs, you can either echo it in your ~/.rc file (discouraged), or configure it for your taste in +`config.toml`. + +For project level configuration, edit `/path/to/zksync/.cargo/config.toml`. For global cargo setup, +`~/.cargo/config.toml`. Add the following: + +```toml +[build] +rustflags = ["--cfg=no_cuda"] +``` + ## Environment Edit the lines below and add them to your shell profile file (e.g. `~/.bash_profile`, `~/.zshrc`): diff --git a/etc/env/base/prover_job_monitor.toml b/etc/env/base/prover_job_monitor.toml index 40cdf76b8b1..ce206c74ffd 100644 --- a/etc/env/base/prover_job_monitor.toml +++ b/etc/env/base/prover_job_monitor.toml @@ -13,3 +13,4 @@ proof_compressor_queue_reporter_run_interval_ms = 10000 prover_queue_reporter_run_interval_ms = 10000 witness_generator_queue_reporter_run_interval_ms = 10000 witness_job_queuer_run_interval_ms = 10000 +http_port = 3074 diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 1bb69374ab1..d8bef020c64 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -7,7 +7,7 @@ RUST_LOG="""\ zksync_node_framework=info,\ zksync_block_reverter=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_node_db_pruner=info,\ zksync_eth_sender=info,\ zksync_node_fee_model=info,\ diff --git a/etc/env/configs/ext-node.toml b/etc/env/configs/ext-node.toml index b2f74006559..a5eb22db5ec 100644 --- a/etc/env/configs/ext-node.toml +++ b/etc/env/configs/ext-node.toml @@ -63,7 +63,7 @@ zksync_node_consensus=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_core=debug,\ zksync_dal=info,\ zksync_db_connection=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 864bff15ded..6a36f65c97c 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -36,7 +36,7 @@ api: filters_limit: 10000 subscriptions_limit: 10000 pubsub_polling_interval: 200 - max_nonce_ahead: 20 + max_nonce_ahead: 40 gas_price_scale_factor: 1.5 estimate_gas_scale_factor: 1.3 estimate_gas_acceptable_overestimation: 5000 @@ -287,6 +287,7 @@ prover_job_monitor: prover_queue_reporter_run_interval_ms: 10000 witness_generator_queue_reporter_run_interval_ms: 10000 witness_job_queuer_run_interval_ms: 10000 + http_port: 3074 base_token_adjuster: @@ -311,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 71328fdb8f5..0338e3327e4 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,15 +1,14 @@ -genesis_root: 0x98c875e50c7bb347f1fa5b5107b2cb4a0c0b37b9781d51672c5f5f3ba4398bb7 -genesis_rollup_leaf_index: 54 -genesis_batch_commitment: 0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd -genesis_protocol_semantic_version: '0.24.2' -# deprecated -genesis_protocol_version: 24 -default_aa_hash: 0x0100055dd60e287d65a886ec1a868dcb4f400e67efffaf7df2615da552f00777 -bootloader_hash: 0x010008ebbfd1b493a9e7030a84799f367a68086f1750b19bcb2e9945752975ef +genesis_root: 0xf9030b78c5bf5ac997a76962aa32c90a6d8e8ebce9838c8eeb388d73e1f7659a +genesis_rollup_leaf_index: 64 +genesis_batch_commitment: 0x34c1b220363e0cde7eaf10fe95754d61de097e0f9d9a1dc56c8026562e395259 +genesis_protocol_version: 25 +default_aa_hash: 0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e +bootloader_hash: 0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf l1_chain_id: 9 l2_chain_id: 270 fee_account: '0x0000000000000000000000000000000000000001' prover: - recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 dummy_verifier: true + snark_wrapper_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 +genesis_protocol_semantic_version: 0.25.0 l1_batch_commit_data_generator_mode: Rollup diff --git a/etc/env/file_based/overrides/mainnet/general.yaml b/etc/env/file_based/overrides/mainnet.yaml similarity index 92% rename from etc/env/file_based/overrides/mainnet/general.yaml rename to etc/env/file_based/overrides/mainnet.yaml index 7abe8eb5472..0600abf694c 100644 --- a/etc/env/file_based/overrides/mainnet/general.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -10,12 +10,13 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.06 internal_l1_pricing_multiplier: 1 internal_pubdata_pricing_multiplier: 1.50 poll_period: 60 + watcher: + confirmations_for_eth_event: null observability: log_directives: zksync=info,zksync_state_keeper=debug,zksync_core=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=debug,zksync_state=debug,zksync_utils=debug,zksync_eth_sender=debug,loadnext=debug,dev_ticker=info,vm=info,block_sizes_test=info,setup_key_generator_and_server=info,zksync_queued_job_processor=debug,slot_index_consistency_checker=debug,zksync_health_check=debug,zksync_consensus_bft=debug,zksync_consensus_network=debug,zksync_consensus_storage=debug,zksync_consensus_executor=debug, - -# remove eth_sender_wait_confirmations, eth_watcher_confirmations_for_eth_event variables diff --git a/etc/env/file_based/overrides/only_real_proofs.yaml b/etc/env/file_based/overrides/only_real_proofs.yaml new file mode 100644 index 00000000000..52747467511 --- /dev/null +++ b/etc/env/file_based/overrides/only_real_proofs.yaml @@ -0,0 +1,3 @@ +eth: + sender: + proof_sending_mode: ONLY_REAL_PROOFS diff --git a/etc/env/file_based/overrides/testnet/general.yaml b/etc/env/file_based/overrides/testnet.yaml similarity index 95% rename from etc/env/file_based/overrides/testnet/general.yaml rename to etc/env/file_based/overrides/testnet.yaml index 43a62f3f0dd..e4da1ac96e2 100644 --- a/etc/env/file_based/overrides/testnet/general.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -10,6 +10,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.1 internal_l1_pricing_multiplier: 1 @@ -18,5 +19,3 @@ eth: confirmations_for_eth_event: 10 observability: log_directives: zksync=info,zksync_state_keeper=debug,zksync_core=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=debug,zksync_state=debug,zksync_utils=debug,zksync_eth_sender=debug,loadnext=debug,dev_ticker=info,vm=info,block_sizes_test=info,setup_key_generator_and_server=info,zksync_queued_job_processor=debug,slot_index_consistency_checker=debug,zksync_health_check=debug,zksync_consensus_bft=debug,zksync_consensus_network=debug,zksync_consensus_storage=debug,zksync_consensus_executor=debug, - -# remove eth_sender_wait_confirmations variable diff --git a/etc/env/file_based/overrides/tests/loadtest-new.yaml b/etc/env/file_based/overrides/tests/loadtest-new.yaml new file mode 100644 index 00000000000..2167f7347e0 --- /dev/null +++ b/etc/env/file_based/overrides/tests/loadtest-new.yaml @@ -0,0 +1,7 @@ +db: + merkle_tree: + mode: LIGHTWEIGHT +experimental_vm: + state_keeper_fast_vm_mode: NEW +mempool: + delay_interval: 50 diff --git a/etc/env/file_based/overrides/tests/loadtest-old.yaml b/etc/env/file_based/overrides/tests/loadtest-old.yaml new file mode 100644 index 00000000000..a2d66d1cf4a --- /dev/null +++ b/etc/env/file_based/overrides/tests/loadtest-old.yaml @@ -0,0 +1,7 @@ +db: + merkle_tree: + mode: LIGHTWEIGHT +experimental_vm: + state_keeper_fast_vm_mode: OLD +mempool: + delay_interval: 50 diff --git a/etc/env/file_based/overrides/validium.yaml b/etc/env/file_based/overrides/validium.yaml new file mode 100644 index 00000000000..1af02dd9589 --- /dev/null +++ b/etc/env/file_based/overrides/validium.yaml @@ -0,0 +1,6 @@ +eth: + sender: + pubdata_sending_mode: CUSTOM +state_keeper: + pubdata_overhead_part: 0 + compute_overhead_part: 1 diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml index 108192b1843..3d0c4869df8 100644 --- a/etc/lint-config/ignore.yaml +++ b/etc/lint-config/ignore.yaml @@ -22,5 +22,6 @@ dirs: [ "system-contracts", "artifacts-zk", "cache-zk", - "contracts/" + "contracts/", + "era-observability" ] diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 50273b91fb5..0b424522dff 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -7,4 +7,8 @@ craneLib.buildPackage (commonArgs // { version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; inherit cargoArtifacts; + + postInstall = '' + strip $out/bin/zksync_tee_prover + ''; }) diff --git a/etc/selector-generator-data/selectors.json b/etc/selector-generator-data/selectors.json index 6ea986e4263..f8a6d2e825b 100644 --- a/etc/selector-generator-data/selectors.json +++ b/etc/selector-generator-data/selectors.json @@ -1,518 +1,1025 @@ { - "d0f2c663": "getBatchNumberAndTimestamp", - "2a79c611": "getCommitment", - "8129fc1c": "initialize", - "e2a9d554": "setUpgradeTimestamp", - "84c2ff75": "stmAssetId", - "7ac3a553": "withdrawLegacyBridge", - "e91659ae": "addNewChainIfNeeded", - "06d49e5b": "getPubdataPricingMode", - "1ff5a783": "execute", - "8310f2c6": "transferFundsFromSharedBridge", - "80b41246": "getBlockHashEVM", - "7da01cd6": "executeUpgrade", - "74044673": "addStateTransitionManager", - "82b57749": "forwardedBridgeMint", - "6478d8ed": "chainAdmin", - "4af63f02": "deploy", - "d0707b67": "aggregate", - "e0ab6368": "assetIdIsRegistered", - "27e86d6e": "getLastBlockHash", - "13bc9f20": "isOperationReady", - "4a2e35ba": "withdraw", - "1e4fba05": "getChainRoot", - "762008c2": "executeBatchesSharedBridge", - "155fd27a": "setValueUnderNonce", - "a6ae0aac": "coinbase", - "86d516e8": "getCurrentBlockGasLimit", - "3659cfe6": "upgradeTo", - "fa8f7ea6": "getAllHyperchains", - "7b510fe8": "getAccountInfo", - "40c10f19": "mint", - "e02e1bfd": "chainCount", - "015f58d7": "genesisUpgrade", - "28e439f3": "tryBlockAndAggregate", - "e76db865": "setPubdataPricingMode", - "62f84b24": "sendToL1", - "1c9f0149": "updateChainBalancesFromSharedBridge", - "38720778": "sharedBridge", - "64e130cf": "nativeTokenVault", - "adfca15e": "facetFunctionSelectors", - "af500fb7": "readBytes32", - "7b315630": "upgradeChainFromVersion", - "b6ea1757": "pushNewLeaf", - "e66c8c44": "validatorTimelock", + "e341eaa4": "sign", + "600903ad": "keyExistsToml", + "174dea71": "aggregate3Value", + "21ed2977": "assertApproxEqRelDecimal", + "90c5013b": "stopPrank", + "e4948f43": "proveL2MessageInclusion", "4f1ef286": "upgradeToAndCall", - "fe26699e": "getTotalBlocksCommitted", - "805b9869": "executeTransactionFromOutside", - "aa4593dc": "revertReceive", - "64b554ad": "forwardedBridgeBurn", - "ba238947": "getProtocolVersion", - "07f8c636": "multicall", - "39607382": "getTotalBlocksExecuted", - "796b89b9": "getBlockTimestamp", - "9cd939e4": "l2LogsRootHash", - "b298e36b": "push", + "e985e9c5": "isApprovedForAll", + "6ba3ba2b": "createFork", + "23b872dd": "transferFrom", + "740211ce": "commitValidatorCommittee", + "b292f5f1": "proveL1ToL2TransactionStatus", + "dbfe3e96": "updateSecurityCouncil", + "b12fc005": "assertLt", + "868085b1": "getBatchProofPublicInput", + "64d62353": "updateDelay", + "48c3241f": "closeFile", + "60f9bb11": "readFile", + "c88a5e6d": "deal", + "6f497ac6": "executeBatchesSharedBridge", + "966c523e": "blockAndAggregate", + "c438a9f2": "L2_LEGACY_SHARED_BRIDGE", + "59890bcb": "setExecutedBatches", + "e30c3978": "pendingOwner", + "31ba3498": "createFork", + "669efca7": "assertNotEqDecimal", + "c75ac8fa": "processL2Logs", + "ddc2651b": "envBytes", + "4de2e468": "getRawCodeHash", + "129de7eb": "blobhashes", + "b3e47705": "envOr", + "e543e5bf": "setChainCreationParams", + "8466f415": "assertLe", + "cc3fbc63": "setEraPostDiamondUpgradeFirstBatch", + "be6f11cf": "setPriorityTxMaxGasLimit", + "8da5cb5b": "owner", + "9f5684a2": "readLink", + "994057ef": "changeAttesterKey", + "711043ac": "assertEq", + "5875da2b": "changeValidatorKey", + "5139839c": "nodeOwners", + "7b048ccd": "parseJsonInt", + "a37dc1d4": "forwardedBridgeClaimFailedBurn", + "adfca15e": "facetFunctionSelectors", + "3e64a696": "getBasefee", + "99624cb6": "getAttesterCommittee", + "1497876c": "readDir", + "d48bfca7": "addToken", + "17d7de7c": "getName", + "eeb8cb09": "executeTransactionFromOutside", + "0c56efe9": "initializeV2", + "01eae183": "depositAmount", + "561cd6f3": "serializeString", + "b7909320": "assertNotEq", + "41c841c3": "L1_WETH_TOKEN", + "7475e9ea": "chainAdminAcceptAdmin", + "ca669fa7": "prank", + "c1899c1d": "createBatchCommitment", + "38a78092": "increaseMinNonce", + "a9059cbb": "transfer", + "f54266a2": "l1TokenAddress", + "e0eb04d4": "isFile", + "e9f18c17": "forceDeployOnAddresses", + "3d1fe08a": "assertGeDecimal", + "a2b1a1ae": "expectCall", + "5d83b6da": "__DEPRECATED_baseToken", "7890e5da": "side", - "5e1ac65f": "hashOperation", - "1806aa18": "getCodeSize", - "d4a4ca0d": "getBlockNumberAndTimestamp", - "06bed036": "setL2Block", - "aa970773": "validateAndPayForPaymasterTransaction", - "6223258e": "setDAValidatorPair", + "40f0b4e0": "assertLtDecimal", + "71aad10d": "toString", + "29f172ad": "unsafeOverrideBatch", + "8289e621": "assertApproxEqAbs", + "eccd2437": "assertGtDecimal", + "cdf25430": "L1_ASSET_ROUTER", + "a22cb465": "setApprovalForAll", + "9983c28a": "parseJsonIntArray", + "f67a965b": "broadcast", + "aa5cf788": "assertLeDecimal", + "8775a591": "assertNotEq", + "57f3921f": "stmAssetIdToAddress", + "2f2fd63f": "getMappingLength", + "2c431917": "scheduleTransparent", + "7b574586": "publishedBlobCommitments", + "31d50750": "isOperation", + "21f603d7": "setTransactionFilterer", + "8129fc1c": "initialize", "728cb93b": "bridgeClaimFailedBurn", - "d6abe642": "getAssetId", - "d2ef1b0e": "storedBatchZero", - "51b3c157": "hyperbridgingEnabled", - "53e61bdc": "processL2RollupDAValidatorOutputHash", - "95d89b41": "symbol", - "a37dc1d4": "forwardedBridgeClaimFailedBurn", - "db1f0bf9": "getTotalBatchesCommitted", - "beda594a": "setHyperchain", + "19698bc9": "infiniteFunction", + "3ce969e6": "revokePersistent", + "a322c40e": "toString", + "c29f093f": "setSTM", + "fd921be8": "parseJsonBytes", + "0d4aae9b": "stopMappingRecording", + "e03e9177": "assertEq", + "0a30b771": "assertGe", + "5e1ac65f": "hashOperation", + "9a188371": "requestL2TransactionDirect", + "62c6f9fb": "assertNotEq", + "b8c2f66f": "getTotalBatchesExecuted", + "42181150": "envInt", + "d17d4b0d": "assertLe", + "6ab8f82e": "proveL2LogInclusion", + "580d6bff": "updateAllNodesAtHeight", + "c3bbd2d7": "isFacetFreezable", + "8a75bb09": "saveL2LogsRootHash", + "71dce7da": "toString", + "1091a261": "assertNotEq", + "6ee1dc20": "validateNonceUsage", + "65bc9481": "accesses", + "f088ccdc": "callCodeOracle", + "0956441b": "stopExpectSafeMemory", + "5aa6fa1f": "NATIVE_TOKEN_VAULT", + "8310f2c6": "transferFundsFromSharedBridge", + "484f0505": "getHyperchainLegacy", "3977d71c": "getAggregatedRoot", - "c4d252f5": "cancel", - "2878fe74": "genesisUpgrade", - "2ab0f529": "isOperationDone", - "5d4edca7": "BRIDGE_HUB", - "d4b9f4fa": "messageRoot", - "fb1a9a57": "getDeploymentNonce", - "bb0fd610": "extendedAccountVersion", - "3cda3351": "create2", - "3a9d7f8d": "stmDeployer", - "db541184": "setShouldRevertOnExecuteBatches", - "74f4f547": "bridgeBurn", - "b852ad36": "l1SharedBridge", - "6ef25c3a": "baseFee", - "eb39e6d5": "stateTransitionManager", - "381c3f13": "checkDA", + "0008efda": "runDefaultUpgrader", + "328ef4fe": "setBaseTokenGasMultiplierPrice", + "ad31b9fa": "envAddress", + "65c428e7": "parseTomlAddressArray", + "bf529569": "setFreezability", + "ef3f0bae": "getTotalBatchesVerified", + "ce8365f9": "envExists", + "30bda03e": "setL1Erc20Bridge", + "cdc4878b": "nodeCount", + "015f58d7": "genesisUpgrade", + "a84328dd": "assertGe", + "1cc5d103": "setPorterAvailability", + "588570a5": "initialize", + "ae00b630": "runDeployConsensusRegistry", + "a31ee5b0": "initialize", + "9a7fbd8f": "assertNotEq", + "e23d2563": "getEraChainId", + "24fd57fb": "requestL2TransactionTwoBridges", + "0cc9ee84": "assertEq", + "f56ff18b": "getBlobhashes", "f92ad219": "initialize", - "9fa8826b": "depositHappened", - "01d23d4b": "diamondCut", - "55d35d18": "getValueUnderNonce", - "ee7fb38b": "calculateRoot", - "64d62353": "updateDelay", - "fd3c6b55": "processCalldataDA", - "39b34c6e": "requestBytecodeL1Publication", + "f2830f7b": "rollFork", + "2f90b184": "L1_CHAIN_ID", + "5de097b1": "nullifyChainBalanceByNTV", + "14e75680": "assertNotEqDecimal", + "204e1c7a": "getProxyImplementation", + "240f839d": "assertApproxEqAbs", "71623274": "l2TransactionBaseCost", - "53b9e632": "assetHandlerAddress", - "c987336c": "upgrade", - "5c975abb": "paused", - "4623c91d": "setValidator", - "4f1e1be0": "storeAccountConstructingCodeHash", - "b0f40a17": "processBatch", - "2c4f2a58": "bridgehubDepositBaseToken", - "ced531eb": "setHashes", - "18160ddd": "totalSupply", - "7cb9357e": "gasPerPubdataByte", - "7877a797": "blockGasLimit", - "cdc4878b": "nodeCount", - "c2eeeebd": "l1Address", - "0f23da43": "revertBatchesSharedBridge", - "e1239cd8": "incrementMinNonceIfEquals", - "8456cb59": "pause", - "9a42c2c2": "zeroPointerTest", - "f9f3ee2d": "setResult", - "7ba8be34": "decodeUint8", + "d9a3c4d2": "assertGt", + "e23cd19f": "writeJson", + "a0803ef7": "currentBlockInfo", + "aa4593dc": "revertReceive", + "d0468156": "getPendingAdmin", + "3e9173c5": "assertEq", + "a6368557": "deleteSnapshot", + "f413f0b6": "assertEq", + "e03fe177": "getCodeHash", + "812a44b2": "parseTomlKeys", + "80b41246": "getBlockHashEVM", + "1f7b4f30": "roll", + "c87b56dd": "tokenURI", + "dc8e4b26": "registerSettlementLayer", + "949431dc": "approvalBased", + "35e1349b": "eth_getLogs", + "5d4edca7": "BRIDGE_HUB", + "607457f2": "setShouldRevertOnCommitBatches", + "18717dc1": "setPorterAvailability", + "ede25608": "protocolVersionToUpgradeTimestamp", + "9ebf6827": "selectFork", + "84d9fedd": "popFront", + "06d49e5b": "getPubdataPricingMode", + "b3a056d7": "loadAllocs", + "49a7cc72": "payForTransaction", + "b381724e": "setFeeParams", + "d30dced6": "parseTomlBool", + "72c7e0b5": "assertNotEq", + "6223258e": "setDAValidatorPair", + "f90eb963": "getPorterAvailability", + "3f58f5b5": "createNewChain", + "c126e860": "hashOperation", + "491cc7c2": "expectEmit", + "7eff275e": "changeProxyAdmin", + "252dba42": "aggregate", + "01ffc9a7": "supportsInterface", + "a5748aad": "getNonce", + "69c76df2": "readUint32", + "d0bf6fd4": "setSharedBridge", + "dbe8d88b": "assertLtDecimal", + "3d1f16d4": "commitAttesterCommittee", + "48016c04": "assertEqDecimal", + "8dd14802": "setBridge", + "923b3b56": "forceDeployOnAddress", + "def9d6af": "protocolVersionIsActive", + "714a2f13": "assertEq", + "cf1c049c": "assertEq", + "087e6e81": "parseBytes32", + "6d016688": "expectSafeMemory", + "aa970773": "validateAndPayForPaymasterTransaction", "a635f01d": "delegateCall", - "2f90b184": "L1_CHAIN_ID", - "6c0960f9": "finalizeEthWithdrawal", - "31d50750": "isOperation", - "59ec65a2": "baseToken", - "a9b0d128": "setPriorityTreeStartIndex", - "c4879440": "bridgehubDepositBaseToken", - "823f1d96": "l2TokenProxyBytecodeHash", - "18876a04": "chunkPubdataToBlobs", - "699b0fb9": "bridgeBurn", - "17338945": "unfreezeDiamond", - "8a75bb09": "saveL2LogsRootHash", - "91b19874": "validators", - "63dc94b1": "forceDeploy", - "5a590335": "getDAValidatorPair", - "60144197": "setTokenMultiplierSetter", - "938b5f32": "origin", - "36ba0355": "bridgeMint", - "6dde7209": "l2TokenBeacon", - "bf54096e": "MAX_NUMBER_OF_HYPERCHAINS", - "7e44bc5e": "setImmutables", - "8e8acf87": "getL2BlockNumberAndTimestamp", - "e30c3978": "pendingOwner", + "2f103f22": "activeFork", + "8102d70d": "readDir", + "3cf78e28": "assertNotEq", + "97949042": "envBytes32", "f5e69a47": "publishCompressedBytecode", - "84da1fb4": "getNewAddressCreate2", - "47fcedb8": "setFeeParams", - "b22dd78e": "storedBatchHash", + "39509351": "increaseAllowance", + "97bb3ce9": "tokenAddress", + "f9f3ee2d": "setResult", + "ae65def1": "node", + "d124dc4f": "send", + "64b554ad": "forwardedBridgeBurn", + "f0259e92": "breakpoint", + "ebc73ab4": "getMappingSlotAt", + "0e18b681": "acceptAdmin", + "f3385fb6": "forceDeployOnAddress", + "667f9d70": "load", + "8f5d232d": "parseBytes", + "515361f6": "assertEq", + "6d315d7e": "blobBaseFee", + "4724c5b9": "assertNotEq", + "3425eb89": "tokenMultiplierSetter", + "2d0335ab": "getNonce", + "07ee9355": "l2BridgeAddress", + "189a5a17": "nodes", + "9a8a0592": "chainId", + "1d9e269e": "makePersistent", + "3d5bc8bc": "assertApproxEqAbsDecimal", + "701f58c5": "commitBatches", + "7877a797": "blockGasLimit", + "e6962cdb": "broadcast", + "5aa9b6b5": "getRawNonce", + "876e24e6": "getMappingKeyAndParentOf", + "b67187f3": "assertNotEq", + "933999fb": "deposit", + "4074e0a8": "makePersistent", + "0dbad27e": "upgradeChainFromVersion", + "3601e63e": "bridgeRecoverFailedTransfer", + "f710b062": "assertApproxEqAbs", + "e516761e": "markFactoryDeps", "57e6246b": "initialCutHash", - "2b805192": "setNewVersionUpgrade", - "dbfe3e96": "updateSecurityCouncil", - "e03fe177": "getCodeHash", - "02fa5779": "setNewBatch", - "a225efcb": "setPubdataInfo", - "9cc395d0": "bridgeCheckCounterpartAddress", - "868085b1": "getBatchProofPublicInput", - "6a0cd1f5": "removeValidator", - "2ae9c600": "protocolVersion", - "61f91b2e": "initialForceDeploymentHash", - "72425d9d": "getCurrentBlockDifficulty", - "8c2a993e": "bridgeMint", - "b473318e": "l2TransactionBaseCost", - "f851a440": "admin", - "681fe70c": "isEmpty", - "ef3f0bae": "getTotalBatchesVerified", + "c0406226": "run", + "7b30c8da": "getL2SystemContractsUpgradeTxHash", + "74637a7a": "computeCreateAddress", + "ba334825": "hyperchain", + "ca408c23": "bridgehubDeposit", + "6352211e": "ownerOf", + "9f86dc91": "parseJsonBool", "ba75bbd8": "front", - "cdffacc6": "facetAddress", - "89f9a072": "validatePubdata", - "66869d49": "changeFeeParams", - "e8b99b1b": "deposit", - "4d4a1eca": "setTokenMultiplier", - "a0803ef7": "currentBlockInfo", - "fb4baf17": "changeFeeParams", - "3591c1a0": "getBridgehub", - "fd791f3c": "getL2DefaultAccountBytecodeHash", - "ec8067c7": "updateNonceOrdering", - "a3912ec8": "receiveEther", - "79823c9a": "getFirstUnprocessedPriorityTx", - "235d9eb5": "setTokenMultiplier", - "dd354a06": "calculateCreate2TokenAddress", - "7efda2ae": "proveL2LeafInclusion", - "f120e6c4": "encodeTxDataHash", - "f5f15168": "l2TokenAddress", - "4d2301cc": "getEthBalance", - "ab07b2e9": "getL2GasPrice", - "363bf964": "setAddresses", - "607457f2": "setShouldRevertOnCommitBatches", - "d1ba7e97": "hyperchainAddress", - "841a9d42": "aggregate3Value", + "b473318e": "l2TransactionBaseCost", + "c304aab7": "assertLeDecimal", + "64bf8d66": "changeFeeParams", + "f4844814": "expectRevert", + "2878fe74": "genesisUpgrade", + "89160467": "ffi", + "27af7d9c": "assertEqDecimal", + "ebe4a3d7": "getTransactionHashes", + "7fec2a8d": "startBroadcast", + "d505accf": "permit", + "09824a80": "registerToken", + "c21a38e2": "proveL2MessageInclusion", + "c31eb0e0": "expectRevert", + "561fe540": "envOr", + "f8d33b9b": "assertGt", "ea6c029c": "baseTokenGasPriceMultiplierNominator", - "de8fa431": "getSize", - "24a55db9": "markBytecodeAsPublished", - "c438a9f2": "L2_LEGACY_SHARED_BRIDGE", - "ddeaa8e6": "getBatchHash", - "8f31f052": "isWithdrawalFinalized", - "41cf49bb": "prepareChainCommitment", - "5d382700": "create2Account", - "6d9860e1": "l1AssetRouter", + "ef277d72": "assertApproxEqRel", + "c846f6df": "transferFundsFromLegacy", + "7ca29682": "createFork", + "5ca1e165": "getRoot", + "37736e08": "parseToml", + "29b98c67": "isDiamondStorageFrozen", + "f0e9da23": "readAddress", + "1206c8a8": "rpc", + "f8ccbf47": "IS_SCRIPT", + "dd62ed3e": "allowance", + "56ca623e": "toString", + "f6370c7b": "setChainCreationParams", + "06447d56": "startPrank", + "05838bf4": "expectSafeMemoryCall", "e1ad1162": "transfer", - "bf1fe420": "setGasPrice", - "a1954fc5": "getTotalPriorityTxs", - "c0a16dda": "setAssetDeploymentTracker", - "4145ca27": "removePriorityQueueFront", - "09e14277": "setStateTransitionManager", - "1f067457": "revertTransfer", - "b8c2f66f": "getTotalBatchesExecuted", - "07ee9355": "l2BridgeAddress", - "095ea7b3": "approve", - "84b0196e": "eip712Domain", - "18b1771f": "getAssetId", - "f85894c5": "forwardedBridgeBurn", + "c7ca373c": "initFromCommitment", + "5a362d45": "assertGt", + "14b02bc9": "envString", + "f7d39a8d": "breakpoint", + "b2dad155": "trim", + "4ad0bac9": "readCallers", + "53ce2061": "revertBatches", "bd7c5412": "isEthWithdrawalFinalized", - "70a08231": "balanceOf", - "3425eb89": "tokenMultiplierSetter", - "5aa9b6b5": "getRawNonce", - "7ab08472": "finalizeWithdrawalLegacyErc20Bridge", - "205c2878": "withdrawTo", - "ec3d5f88": "setPriorityTxMaxGasLimit", - "8eb7db57": "bridgehubConfirmL2Transaction", - "2a72b707": "bridgehubRequestL2Transaction", - "0f3fa211": "setNativeTokenVault", - "4bed8212": "isWithdrawalFinalized", - "0c56efe9": "initializeV2", - "501e60d5": "setUpgradeDiamondCut", - "c29f093f": "setSTM", - "f2fde38b": "transferOwnership", - "8c5a3445": "general", - "ca8f93f1": "setLegacyBaseTokenAssetId", - "71abd109": "upgrade", - "eced0bf0": "__DEPRECATED_tokenIsRegistered", - "dc8e4b26": "registerSettlementLayer", - "310ab089": "getImmutable", - "19cae462": "difficulty", - "77421056": "setFunctionToCall", - "3997d064": "tryAggregate", - "f1d357e5": "L1_SHARED_BRIDGE", - "952a3ee7": "getERC20Getters", - "29b98c67": "isDiamondStorageFrozen", - "17d7de7c": "getName", - "e81e0ba1": "isFunctionFreezable", - "7ebba672": "setTokenMultiplier", - "6ee1dc20": "validateNonceUsage", - "6a27e8b5": "getSettlementLayer", - "7a28adb2": "proveL2LogInclusion", - "671a7131": "settlementLayer", - "accdd16c": "freezeChain", - "c3bbd2d7": "isFacetFreezable", - "99a88ec4": "upgrade", + "a6ae0aac": "coinbase", + "ab07b2e9": "getL2GasPrice", + "e02e1bfd": "chainCount", + "d92d8efd": "isPersistent", + "79823c9a": "getFirstUnprocessedPriorityTx", + "78bdcea7": "assertNotEq", + "699b0fb9": "bridgeBurn", + "18e3a941": "getVerifierParams", + "ee7fb38b": "calculateRoot", + "f851a440": "admin", "95f11a40": "bridgeInitialize", - "c9f5c932": "requestL2TransactionTwoBridges", - "f1a78aa6": "postTransaction", - "ca65fe79": "finalizeDeposit", - "5518c73b": "getStateTransitionManager", - "b5b18fe5": "processL2Logs", - "969b53da": "l1Bridge", - "e8a71ca9": "forwardedBridgeMint", - "505e6d47": "updateAllLeaves", - "ecf95b8a": "createAccount", - "84d9fedd": "popFront", "3f4ba83a": "unpause", - "1f98fa08": "createNewChain", - "313ce567": "decimals", - "3ce695e7": "registerSTMAssetOnL1", - "73c58a2d": "publishBlobs", - "f0e9da23": "readAddress", - "e23d2563": "getEraChainId", - "0ec6b0b7": "getPriorityTxMaxGasLimit", - "fdbb0301": "__DEPRECATED_l2BridgeAddress", - "52d1902d": "proxiableUUID", - "97bb3ce9": "tokenAddress", - "5d83b6da": "__DEPRECATED_baseToken", - "966c523e": "blockAndAggregate", - "f4943a20": "protocolVersionDeadline", - "46746c7d": "commitBatchesSharedBridge", - "87d9d023": "verify", - "57f3921f": "stmAssetIdToAddress", - "e516761e": "markFactoryDeps", - "daa51a8c": "pushBack", - "2e1a7d4d": "withdraw", - "af6ed122": "executeUpgrade", - "a6f2c076": "setDataToBeReturnedInFinalizeWithdrawal", - "01eae183": "depositAmount", - "9e8945d2": "verificationKeyHash", - "a3bd0112": "genesisUpgrade", - "927c4bf7": "upgradeExternal", - "56079ac8": "sendL2ToL1Log", - "d92f86a2": "setLegacyChainAddress", - "be6f11cf": "setPriorityTxMaxGasLimit", - "7321c485": "dummySetValidator", - "c0991525": "claimFailedDeposit", - "72d74cd7": "reinitializeToken", - "ab93d6f3": "requestL2TransactionToGatewayMailbox", - "3601e63e": "bridgeRecoverFailedTransfer", - "eb672419": "requestL2Transaction", - "af6a2dcd": "getTotalBlocksVerified", - "27eb6c0f": "securityCouncil", - "4c6314f0": "getMarker", - "49a7cc72": "payForTransaction", - "f20265d2": "setRevertTransfer", + "33ce93fe": "getProtocolVersion", + "d9caed12": "withdraw", + "c37533bb": "proveBatchesSharedBridge", + "07168226": "deployBeaconProxy", + "9cd939e4": "l2LogsRootHash", + "af500fb7": "readBytes32", + "db4235f6": "keyExistsJson", + "8c5a3445": "general", + "607e2cb2": "setRevertReceive", + "27ae4c16": "freezeDiamond", + "e0bf0850": "setShouldRevertOnProveBatches", + "8cb7f3d0": "forceDeployOnAddresses", + "47fcedb8": "setFeeParams", + "5d18c73a": "assertEq", + "efb77a75": "makePersistent", + "421ae469": "deleteSnapshots", + "39607382": "getTotalBlocksExecuted", + "d145736c": "envOr", + "76eadd36": "stopBroadcast", + "2f9c8f0d": "add", "84bc3eb0": "withdrawWithMessage", - "79c4f929": "markBytecodeAsPublished", - "580d6bff": "updateAllNodesAtHeight", + "a28c1aee": "prepareForPaymaster", + "150b7a02": "onERC721Received", + "45c62011": "removeDir", + "8cf2b2f0": "uncheckedInc", + "74da756b": "execute", + "1c50cfea": "addTokenAssetId", + "c2e4ff97": "markAccountCodeHashAsConstructed", + "e91659ae": "addNewChainIfNeeded", + "59ec65a2": "baseToken", + "350d56bf": "envAddress", + "4d8abc4b": "transact", + "2077337e": "assertLtDecimal", + "938b5f32": "origin", + "f2fde38b": "transferOwnership", + "0d4651aa": "storeAccountConstructedCodeHash", + "b993549e": "getCommittedBatchTimestamp", + "7c84c69b": "assertEq", + "70f5c679": "setMessageRoot", + "e1239cd8": "incrementMinNonceIfEquals", + "04a5c7ab": "assertGtDecimal", + "f20265d2": "setRevertTransfer", + "95d89b41": "symbol", + "11a2ccc1": "finalizeWithdrawal", + "65b7b7cc": "expectCall", + "1f21fc80": "writeFileBinary", + "a457c2d7": "decreaseAllowance", + "7fb5297f": "startBroadcast", + "9ff531e3": "assertLt", + "127cfe9a": "parseTomlBoolArray", + "2b805192": "setNewVersionUpgrade", + "d241f618": "genesisUpgrade", + "a3bd0112": "genesisUpgrade", + "39b34c6e": "requestBytecodeL1Publication", + "3f33db60": "serializeInt", + "d3977322": "assertNotEq", + "bdfacbe8": "assertNotEq", + "8bb75533": "split", + "7ed1ec7d": "envBool", + "d2ef1b0e": "storedBatchZero", + "edecd035": "assertNotEq", + "7ebba672": "setTokenMultiplier", + "9d2ad72a": "rpcUrlStructs", + "a6f2c076": "setDataToBeReturnedInFinalizeWithdrawal", + "f8f7cd76": "validateTransaction", + "4777f3cf": "envOr", + "505e6d47": "updateAllLeaves", + "8eb7db57": "bridgehubConfirmL2Transaction", + "2f2769d1": "assertEq", + "548a5a33": "setAssetHandlerAddressThisChain", + "8c2a993e": "bridgeMint", + "3e9705c0": "startMappingRecording", + "d1a5b36f": "pauseGasMetering", + "84d52b7a": "createSelectFork", + "6dde7209": "l2TokenBeacon", + "dead6f7f": "getHyperchain", + "bbcb713e": "envOr", + "c0a16dda": "setAssetDeploymentTracker", + "ca65fe79": "finalizeDeposit", + "579952fc": "transferFromTo", + "805b5b74": "tokenIsRegistered", + "709ecd3f": "dumpState", + "c657c718": "label", + "fb4baf17": "changeFeeParams", + "e8295588": "zeros", + "85940ef1": "parseJson", + "213e4198": "parseJsonKeys", + "3f8be2c8": "toBase64", + "16d207c6": "assertApproxEqAbs", + "f26f3c8f": "proveL2MessageInclusion", + "b88d4fde": "safeTransferFrom", + "1e4fba05": "getChainRoot", + "facd743b": "isValidator", + "2c4f2a58": "bridgehubDepositBaseToken", + "82d6c8fd": "assertApproxEqRelDecimal", + "85df51fd": "blockHash", + "897e0a97": "writeFile", + "263b7f8e": "proveL2LogInclusion", + "ab93d6f3": "requestL2TransactionToGatewayMailbox", + "f5ba4232": "removeStateTransitionManager", + "f85894c5": "forwardedBridgeBurn", + "584b153e": "isOperationPending", + "95570d12": "getValidatorCommittee", + "70ca10bb": "store", + "aaaddeaf": "envBool", + "d0707b67": "aggregate", + "ec3d5f88": "setPriorityTxMaxGasLimit", + "8456cb59": "pause", + "6cd8c355": "reinitializeChainGovernance", + "48f50c0f": "txGasPrice", + "d930a0e6": "projectRoot", + "628b636e": "publishPubdataAndClearState", + "b5a85e9d": "forceDeploy", + "6a82600a": "parseJson", + "a85a8418": "rpcUrls", "e5355c75": "getL2SystemContractsUpgradeBatchNumber", - "ca408c23": "bridgehubDeposit", - "6ab8f82e": "proveL2LogInclusion", - "7528c2c6": "applyL1ToL2Alias", - "59890bcb": "setExecutedBatches", + "9cb1c0d4": "prevrandao", + "6a8237b3": "assertNotEq", + "26782247": "pendingAdmin", + "66869d49": "changeFeeParams", + "762008c2": "executeBatchesSharedBridge", + "426cb766": "attestersCommit", + "88da6d35": "serializeString", + "1e356e1a": "serializeAddress", + "8a0807b7": "indexOf", + "566338a9": "getL1TokenAddress", + "2555d2c1": "chunkAndPublishPubdata", + "e2f318e3": "payForTransaction", + "72d74cd7": "reinitializeToken", + "3997d064": "tryAggregate", + "57180981": "updateAccountVersion", + "f28dceb3": "expectRevert", + "db1f0bf9": "getTotalBatchesCommitted", + "51b3c157": "hyperbridgingEnabled", + "4be99e1d": "getCurrentPubdataCost", + "e34a329a": "executeUpgrade", + "546b6d2a": "SHARED_BRIDGE", + "7ab08472": "finalizeWithdrawalLegacyErc20Bridge", + "6a27e8b5": "getSettlementLayer", + "17338945": "unfreezeDiamond", + "df9c1589": "executeTransaction", + "99c16d1a": "proveL2MessageInclusion", + "98acd7a6": "getBaseToken", + "f4c004e3": "assertNotEq", + "a54a87d8": "copyFile", + "c1fa1ed0": "assertEq", + "47b4a7a6": "changeAttesterWeight", + "be646da1": "transact", + "d1132332": "attesterPubKeyHashes", + "30e5ccbd": "incrementTxNumberInBatch", + "7958004c": "getOperationState", + "045c55ce": "assertApproxEqAbsDecimal", + "56079ac8": "sendL2ToL1Log", + "d86970d8": "getL2BootloaderBytecodeHash", + "e8a71ca9": "forwardedBridgeMint", + "9a42c2c2": "zeroPointerTest", + "06fdde03": "name", + "086a56f8": "getBaseTokenBridge", + "18b1771f": "getAssetId", + "306395c6": "incrementDeploymentNonce", + "63dc94b1": "forceDeploy", + "1c9f0149": "updateChainBalancesFromSharedBridge", + "3591c1a0": "getBridgehub", + "d6abe642": "getAssetId", + "95fd154e": "assertLe", + "72425d9d": "getCurrentBlockDifficulty", "b19f0ade": "executeUpgradeNoOverlap", - "15f9a2fe": "prepareForPaymaster", - "6e9d7899": "legacyBridge", - "ef0e2ff4": "setChainId", - "e52db4ca": "baseTokenAssetId", - "0f28c97d": "getCurrentBlockTimestamp", + "44d7f0a4": "revertTo", + "51cff8d9": "withdraw", + "817b17f0": "postTransaction", + "33949f0b": "assertNotEqDecimal", + "83eddd19": "governanceAcceptOwner", + "301e7765": "getChainAdmin", + "a8b0574e": "getCurrentBlockCoinbase", + "52c9eacb": "upgradeCutHash", + "9c4d535b": "create", + "e8de12df": "validatorsCommit", + "b11a19e8": "toString", + "6d9860e1": "l1AssetRouter", + "9ec3f927": "changeValidatorWeight", + "9507540e": "assertNotEq", + "c0865ba7": "writeToml", + "e81e0ba1": "isFunctionFreezable", + "49c4fac8": "parseJsonString", + "0b72f4ef": "assertNotEq", + "235d9eb5": "setTokenMultiplier", + "c3d93e7c": "executeBatches", + "4af63f02": "deploy", + "a0ed82fa": "governanceAcceptAdmin", + "60144197": "setTokenMultiplierSetter", + "7ba8be34": "decodeUint8", + "f5a55558": "assertNotEqDecimal", + "6631aa99": "parseJsonBytesArray", + "b2332f51": "assertNotEq", + "08e4e116": "expectCallMinGas", "d0e30db0": "deposit", - "9623609d": "upgradeAndCall", - "5ca1e165": "getRoot", - "fe173b97": "gasPrice", - "a851ae78": "setTxOrigin", - "18717dc1": "setPorterAvailability", - "cbcf2e3c": "isTransactionAllowed", + "fd791f3c": "getL2DefaultAccountBytecodeHash", + "b197c247": "parseTomlBytesArray", + "3e716f81": "parseTomlBytes32Array", + "74f4d30d": "storedBlockHash", + "2a72b707": "bridgehubRequestL2Transaction", + "7e44bc5e": "setImmutables", + "927c4bf7": "upgradeExternal", + "440ed10d": "expectEmit", + "b7b080ab": "transferTokenToSharedBridge", + "db541184": "setShouldRevertOnExecuteBatches", + "eff6b27d": "assertEq", + "fdbb0301": "__DEPRECATED_l2BridgeAddress", + "b873634c": "assertNotEq", + "7a675bb6": "createWallet", + "399542e9": "tryBlockAndAggregate", + "81bad6f3": "expectEmit", + "ae1f6aaf": "l2Bridge", + "46cc92d9": "difficulty", + "498fdcf4": "parseJsonStringArray", + "d3522ae6": "parseTomlIntArray", + "ae5a2ae8": "serializeUintToHex", + "23dc4a09": "keccakPerformUpgrade", + "07f8c636": "multicall", + "1dd93b33": "keccakValidationTest", + "fd3c6b55": "processCalldataDA", + "e6d9923b": "proveL2LogInclusion", + "c3077fa9": "blockAndAggregate", + "c6ce059d": "parseAddress", + "cf347e17": "setValidator", + "b22dd78e": "storedBatchHash", + "f21d52c7": "serializeBytes", + "975d5a12": "assertEq", + "7ac3a553": "withdrawLegacyBridge", "c4d66de8": "initialize", - "7c9bd1f3": "publishTimestampDataToL1", - "69c76df2": "readUint32", - "a75b496d": "getAllHyperchainChainIDs", - "f5ba4232": "removeStateTransitionManager", - "42cbb15c": "getBlockNumber", - "607e2cb2": "setRevertReceive", - "328ef4fe": "setBaseTokenGasMultiplierPrice", - "1c50cfea": "addTokenAssetId", - "6d1d8363": "scheduleShadow", - "9cc7f708": "balanceOf", - "933999fb": "deposit", - "c2e047ff": "aggregate3", - "bb7044b6": "stateTransitionManagerIsRegistered", - "d4ce08c2": "addNewChain", - "f34d1868": "setExecutionDelay", - "9caf9bac": "setX", + "d77bfdb9": "parseTomlBytes", + "7b315630": "upgradeChainFromVersion", + "168b64d3": "createDir", + "cdffacc6": "facetAddress", + "e0ab6368": "assetIdIsRegistered", + "e25242c0": "assertGe", + "d566afd3": "createBatchCommitment", + "afc98040": "broadcast", + "cc7b0487": "parseTomlUint", + "3fdf4e15": "clearMockedCalls", + "ee82ac5e": "getBlockHash", + "79c4f929": "markBytecodeAsPublished", + "e5d6bf02": "warp", + "03e0aca9": "revertToAndDelete", + "d83e4e03": "genesisUpgrade", + "4a2e35ba": "withdraw", + "ba238947": "getProtocolVersion", + "88b44c85": "assertEq", + "fee9a469": "serializeUint", + "09e14277": "setStateTransitionManager", + "c63c4e9b": "minDelay", + "95218ecd": "executeInstant", + "abbf21cc": "assertApproxEqRelDecimal", + "addde2b6": "parseJsonUint", + "a1a7cddb": "runDeploySharedBridge", + "3644e515": "DOMAIN_SEPARATOR", + "6a5066d4": "assertApproxEqAbsDecimal", + "6edd4f12": "commitBatchesSharedBridge", + "78611f0e": "assertGtDecimal", "f113c88b": "createNewChain", - "1cc5d103": "setPorterAvailability", - "cdf25430": "L1_ASSET_ROUTER", - "def9d6af": "protocolVersionIsActive", - "c21a38e2": "proveL2MessageInclusion", - "e543e5bf": "setChainCreationParams", - "4be99e1d": "getCurrentPubdataCost", - "74f4d30d": "storedBlockHash", - "f8f7cd76": "validateTransaction", - "7a0ed627": "facets", - "38a78092": "increaseMinNonce", - "8cb7f3d0": "forceDeployOnAddresses", - "a2d5a0cc": "proveBatchesSharedBridge", - "301e7765": "getChainAdmin", - "fb644fc5": "addChainBatchRoot", + "f3dec099": "envUint", + "4700d74b": "envOr", + "ed7c5462": "createWallet", + "7676e127": "serializeInt", + "b6ea1757": "pushNewLeaf", + "b2ded522": "initialize", + "b298e36b": "push", + "c987336c": "upgrade", + "3cda3351": "create2", + "dd85df2d": "setEraLegacyBridgeLastDepositTime", + "32c8176d": "deriveKey", + "e9420f8c": "whitelistedSettlementLayers", + "e24fed00": "assertEq", + "d323826a": "computeCreate2Address", "6006d8b5": "verifyCompressedStateDiffs", - "39509351": "increaseAllowance", - "51cff8d9": "withdraw", - "8ffe1b81": "setBridgeHubAddress", - "95ce3e93": "decodeString", - "09824a80": "registerToken", - "d86970d8": "getL2BootloaderBytecodeHash", - "a31ee5b0": "initialize", - "0d4651aa": "storeAccountConstructedCodeHash", - "9a188371": "requestL2TransactionDirect", - "ed1d7d97": "chainIndexToId", - "c63c4e9b": "minDelay", - "546b6d2a": "SHARED_BRIDGE", - "187598a5": "getNewAddressCreate", - "bf529569": "setFreezability", + "1e279d41": "promptSecret", + "0d14edf7": "registerAlreadyDeployedHyperchain", + "5d382700": "create2Account", + "38720778": "sharedBridge", + "4d2301cc": "getEthBalance", + "11d1364a": "assertLeDecimal", + "1ff5a783": "execute", + "5c975abb": "paused", + "cf22e3c9": "startStateDiffRecording", + "64bc3e64": "envOr", + "55d35d18": "getValueUnderNonce", + "592151f0": "parseToml", + "8fbb3711": "claimFailedDepositLegacyErc20Bridge", + "53e61bdc": "processL2RollupDAValidatorOutputHash", + "41cf49bb": "prepareChainCommitment", + "86d516e8": "getCurrentBlockGasLimit", "cfe7af7c": "finalizeDeposit", - "bcf284e5": "executeTransaction", - "3437949a": "l1GenesisUpgrade", - "f54266a2": "l1TokenAddress", + "3ce695e7": "registerSTMAssetOnL1", + "b71bcf90": "reinitializeToken", + "db07fcd2": "assertGt", + "969b53da": "l1Bridge", + "e717bab7": "proveL1ToL2TransactionStatusViaGateway", + "1f067457": "revertTransfer", + "eb85e83b": "envOr", + "19cae462": "difficulty", + "7a1d8d3a": "safeTransferFundsFromLegacy", + "619d897f": "writeLine", + "5518c73b": "getStateTransitionManager", + "ca8f93f1": "setLegacyBaseTokenAssetId", + "eced0bf0": "__DEPRECATED_tokenIsRegistered", + "51d218f7": "unfreezeChain", + "4c6314f0": "getMarker", + "b760faf9": "depositTo", + "a1954fc5": "getTotalPriorityTxs", + "f120e6c4": "encodeTxDataHash", + "a2d5a0cc": "proveBatchesSharedBridge", + "8cf25ef4": "assertApproxEqRel", + "b8776d4d": "chainRegistered", + "7528c2c6": "applyL1ToL2Alias", + "91c75bc3": "parseJsonBytes32Array", + "22100064": "rememberKey", + "46657fe9": "getVerifier", + "e8b99b1b": "deposit", + "f1a78aa6": "postTransaction", + "a888cc3a": "bridgehubRequestL2TransactionOnGateway", + "2bcd50e0": "resumeGasMetering", + "4db19e7e": "assertEq", + "af6ed122": "executeUpgrade", + "47e50cce": "prank", + "03c5d8af": "forwardTransactionOnGateway", + "d23cd037": "mockCallRevert", + "16ed7bc4": "readFileBinary", + "28e439f3": "tryBlockAndAggregate", + "ced531eb": "setHashes", + "038a24bc": "validateAndPayForPaymasterTransaction", + "4cc5b15e": "diamondCut", + "27e86d6e": "getLastBlockHash", + "ae3165b3": "toBase64URL", + "ce817d47": "startBroadcast", + "823f1d96": "l2TokenProxyBytecodeHash", + "4b561753": "addValidator", + "d9bbf3a1": "rollFork", + "6fadcf72": "forward", + "bd6af434": "expectCall", + "4dd18bf5": "setPendingAdmin", + "84c2ff75": "stmAssetId", + "681fe70c": "isEmpty", + "fe26699e": "getTotalBlocksCommitted", + "a75b496d": "getAllHyperchainChainIDs", + "35d6ad46": "writeJson", + "40c10f19": "mint", + "06e7517b": "appendTransactionToCurrentL2Block", + "b852ad36": "l1SharedBridge", + "3ea053eb": "deactivate", + "e02da327": "readUint256", + "3ebf73b4": "getDeployedCode", + "86b7f856": "publishPubdataAndClearState", + "4145ca27": "removePriorityQueueFront", + "ef0e2ff4": "setChainId", + "68c09202": "executeUpgradeNoOverlap", + "402efc91": "stateTransitionManager", "c9d1c097": "stmAssetIdFromChainId", - "39d7d4aa": "getPriorityTreeRoot", - "41c841c3": "L1_WETH_TOKEN", - "19fa7f62": "claimFailedDeposit", - "5c60da1b": "implementation", - "dd62ed3e": "allowance", - "9cd45184": "chainBalance", - "7958004c": "getOperationState", - "8cf2b2f0": "uncheckedInc", - "715018a6": "renounceOwnership", - "30bda03e": "setL1Erc20Bridge", + "315fff4e": "THIS_ADDRESS", + "3558c188": "executeBatches", "c0d5b949": "getCurrentPubdataSpent", - "4de2e468": "getRawCodeHash", - "7ecebe00": "nonces", - "0e18b681": "acceptAdmin", - "d0468156": "getPendingAdmin", - "d83e4e03": "genesisUpgrade", - "49eb3b50": "getTransactionHashes", - "ebf0c717": "root", - "8da5cb5b": "owner", - "11a2ccc1": "finalizeWithdrawal", - "1dd93b33": "keccakValidationTest", - "f088ccdc": "callCodeOracle", - "aad74262": "setProtocolVersionDeadline", - "72c84445": "callKeccak", - "21f603d7": "setTransactionFilterer", - "52ef6b2c": "facetAddresses", - "9e6ea417": "depositLegacyErc20Bridge", + "3d5923ee": "setEnv", + "57e22dde": "makePersistent", + "2b589b28": "lastCallGas", + "896909dc": "getMinNonce", + "dbaad147": "mockCallRevert", + "3437949a": "l1GenesisUpgrade", + "c4d252f5": "cancel", + "dd82d13e": "skip", "960dcf24": "getBaseTokenAssetId", - "a888cc3a": "bridgehubRequestL2TransactionOnGateway", - "c7ca373c": "initFromCommitment", - "548a5a33": "setAssetHandlerAddressThisChain", - "402efc91": "stateTransitionManager", - "7b30c8da": "getL2SystemContractsUpgradeTxHash", - "0ef26743": "height", - "79ba5097": "acceptOwnership", - "584b153e": "isOperationPending", - "06fdde03": "name", - "e717bab7": "proveL1ToL2TransactionStatusViaGateway", - "a8b0574e": "getCurrentBlockCoinbase", - "30e5ccbd": "incrementTxNumberInBatch", - "ef011dff": "ERA_CHAIN_ID", - "f8c1f0d2": "upgradeChainFromVersion", + "1777e59d": "parseJsonBytes32", + "2e522851": "setNewVersionUpgrade", + "d4ce08c2": "addNewChain", + "c74e9deb": "envOr", + "f320d963": "assertEq", + "a5277a02": "initialize", + "9366518b": "createNewChain", + "7e77b0c5": "assertEqDecimal", + "29233b1f": "deriveKey", + "dc28c0f1": "assertGeDecimal", + "975a6ce9": "rpcUrl", + "65e7c844": "parseTomlAddress", + "e13a1834": "expectCallMinGas", + "4d7baf06": "envBytes", + "5c60da1b": "implementation", + "0f3fa211": "setNativeTokenVault", + "46746c7d": "commitBatchesSharedBridge", + "cbcf2e3c": "isTransactionAllowed", + "bcf284e5": "executeTransaction", + "a8d4d1d9": "assertGe", + "7a28adb2": "proveL2LogInclusion", + "972c6062": "serializeAddress", + "b4866c43": "setFeeParams", + "08dc3360": "validatorPubKeyHashes", + "e66c8c44": "validatorTimelock", + "d74c83a4": "rollFork", + "2ab0f529": "isOperationDone", + "187598a5": "getNewAddressCreate", "f3b7dead": "getProxyAdmin", - "f26f3c8f": "proveL2MessageInclusion", - "3558c188": "executeBatches", - "bcd1b23d": "updateFullTree", + "952a3ee7": "getERC20Getters", + "0f23da43": "revertBatchesSharedBridge", + "87d9d023": "verify", + "0ec6b0b7": "getPriorityTxMaxGasLimit", + "b25c5a25": "sign", + "3408e470": "getChainId", + "707df785": "assertEq", + "7a0ed627": "facets", + "85e4e16a": "assetDeploymentTracker", + "d0f2c663": "getBatchNumberAndTimestamp", + "01d23d4b": "diamondCut", "3a3f36f9": "codeOracleTest", - "1de72e34": "baseTokenGasPriceMultiplierDenominator", + "42842e0e": "safeTransferFrom", + "28a249b0": "getLabel", + "625387dc": "unixTime", + "997a0222": "revokePersistent", + "6478d8ed": "chainAdmin", + "823447c8": "setResult", + "47eaf474": "prompt", + "d4b9f4fa": "messageRoot", + "e76db865": "setPubdataPricingMode", + "f8e18b57": "setNonce", + "d4a4ca0d": "getBlockNumberAndTimestamp", + "6bcb2c1b": "deriveKey", + "89f9a072": "validatePubdata", + "6a0cd1f5": "removeValidator", + "fe173b97": "gasPrice", + "2fce7883": "parseJsonAddressArray", + "1f98fa08": "createNewChain", + "796b89b9": "getBlockTimestamp", + "9cd45184": "chainBalance", + "363bf964": "setAddresses", + "8e8acf87": "getL2BlockNumberAndTimestamp", + "191553a4": "getRecordedLogs", + "7b510fe8": "getAccountInfo", + "9caf9bac": "setX", + "fe74f05b": "assertEq", + "e00ad03e": "replace", + "97624631": "assertEq", + "6e9d7899": "legacyBridge", + "5e97348f": "envOr", + "7ba04809": "assertFalse", + "fc57565f": "upgradeChainFromVersion", + "3635f3e6": "resetTxNumberInBatch", + "522074ab": "parseJsonUintArray", + "97c09d34": "revertBatches", + "c87325f1": "finalizeWithdrawal", + "64af255d": "isContext", + "5af231c1": "envBytes32", + "b96213e4": "mockCall", + "f877cb19": "envString", + "6900a3ae": "toString", + "e52db4ca": "baseTokenAssetId", + "60429eb2": "assertApproxEqAbsDecimal", + "1dcd1f68": "assertNotEq", + "73c58a2d": "publishBlobs", + "7f61885c": "proveBatches", + "7ecebe00": "nonces", + "26e4ae25": "initialize", + "bb7044b6": "stateTransitionManagerIsRegistered", + "91b19874": "validators", + "c1adbbff": "expectCall", + "06bed036": "setL2Block", + "02fa5779": "setNewBatch", + "0f29772b": "rollFork", + "4f1e1be0": "storeAccountConstructingCodeHash", + "7c9bd1f3": "publishTimestampDataToL1", + "a3912ec8": "receiveEther", "81d100a3": "scheduleTransparent", - "85e4e16a": "assetDeploymentTracker", - "204e1c7a": "getProxyImplementation", - "d566afd3": "createBatchCommitment", - "70f5c679": "setMessageRoot", - "07168226": "deployBeaconProxy", - "7b574586": "publishedBlobCommitments", - "fcc73360": "updateLeaf", - "631f4bac": "getPriorityQueueSize", - "3e64a696": "getBasefee", - "facd743b": "isValidator", - "7fb67816": "setValidatorTimelock", - "ee82ac5e": "getBlockHash", - "6e9960c3": "getAdmin", - "98acd7a6": "getBaseToken", - "06e7517b": "appendTransactionToCurrentL2Block", - "b993549e": "getCommittedBatchTimestamp", - "23dc4a09": "keccakPerformUpgrade", - "cf347e17": "setValidator", - "3408e470": "getChainId", - "ae1f6aaf": "l2Bridge", - "c2e90293": "bridgeRecoverFailedTransfer", - "86b7f856": "publishPubdataAndClearState", - "b292f5f1": "proveL1ToL2TransactionStatus", - "7a592065": "calculateRoot", - "a5277a02": "initialize", - "ef939455": "keccakUpgradeTest", - "3644e515": "DOMAIN_SEPARATOR", - "306395c6": "incrementDeploymentNonce", + "2ae9c600": "protocolVersion", + "ed1d7d97": "chainIndexToId", + "c4bc59e0": "readDir", "b277f199": "uncheckedAdd", - "6fadcf72": "forward", - "ae65def1": "node", - "e0bf0850": "setShouldRevertOnProveBatches", - "a457c2d7": "decreaseAllowance", - "9f3f89dc": "getZero", - "4dd18bf5": "setPendingAdmin", - "33ce93fe": "getProtocolVersion", - "c87325f1": "finalizeWithdrawal", - "40a434d5": "transferTokenToNTV", - "e9420f8c": "whitelistedSettlementLayers", - "3f704d2a": "setAssetHandlerAddress", - "ede25608": "protocolVersionToUpgradeTimestamp", - "042901c7": "proveL1ToL2TransactionStatus", + "a5982885": "assertFalse", + "98680034": "createSelectFork", + "aa5cf90e": "stopAndReturnStateDiff", + "3b925549": "prevrandao", + "fcc73360": "updateLeaf", "cab7e8eb": "isNonceUsed", - "5aa6fa1f": "NATIVE_TOKEN_VAULT", - "b8776d4d": "chainRegistered", - "8fbb3711": "claimFailedDepositLegacyErc20Bridge", - "8dd14802": "setBridge", + "7404f1d2": "createWallet", + "7321c485": "dummySetValidator", + "501e60d5": "setUpgradeDiamondCut", + "fa9d8713": "sleep", + "310ab089": "getImmutable", + "2e1a7d4d": "withdraw", + "2986c0e5": "index", + "2f745c59": "tokenOfOwnerByIndex", + "52d1902d": "proxiableUUID", + "898e83fc": "assertNotEq", + "4f6ccce7": "tokenByIndex", + "8e214810": "parseTomlBytes32", + "15f9a2fe": "prepareForPaymaster", + "91f3b94f": "parseJsonBoolArray", + "be65940a": "setEraPostLegacyBridgeUpgradeFirstBatch", + "40a434d5": "transferTokenToNTV", + "ef011dff": "ERA_CHAIN_ID", + "c2aaf9c4": "receiveEth", + "84b0196e": "eip712Domain", + "074ae3d7": "toUppercase", + "313ce567": "decimals", + "a9f6d941": "executeUpgrade", + "d0cbbdef": "assertEqDecimal", + "4049ddd2": "chainId", + "8466d8d1": "getBridgeHubAddress", + "bcd1b23d": "updateFullTree", + "fccc11c4": "assertApproxEqRelDecimal", + "f34d1868": "setExecutionDelay", + "892a0c61": "envInt", + "41af2f52": "recordLogs", + "b4a85892": "envOr", + "ad7e232e": "setImmutables", + "74f4f547": "bridgeBurn", + "f5407abe": "setValues", + "b1fde1a8": "sharedTree", + "a972d037": "assertLtDecimal", + "a225efcb": "setPubdataInfo", + "b0f40a17": "processBatch", + "d1ba7e97": "hyperchainAddress", + "4dfe692c": "assertLe", + "7fb67816": "setValidatorTimelock", + "36f656d8": "assertEq", + "890c283b": "computeCreate2Address", + "83211b40": "signP256", + "98461504": "setUpgradeDiamondCut", + "18876a04": "chunkPubdataToBlobs", + "f30c7ba3": "expectCall", + "eb672419": "requestL2Transaction", + "7069d0c0": "executeInstant", + "6229498b": "deriveKey", + "1ecb7d33": "assertApproxEqRel", + "bf1fe420": "setGasPrice", + "72c84445": "callKeccak", + "1c72346d": "resetNonce", + "4cd88b76": "initialize", + "8c374c65": "ensNamehash", + "b12e1694": "assertNotEq", + "e5fb9b4a": "assertEq", + "bf54096e": "MAX_NUMBER_OF_HYPERCHAINS", + "24a55db9": "markBytecodeAsPublished", + "fa91454d": "parseUint", + "62ee05f4": "promptAddress", "b3160bad": "executeBatchesSharedBridge", + "08284e57": "upgrade", + "48ceb85e": "chainIndex", + "70f55728": "readLine", + "e4441b98": "initialize", + "65d5c135": "assertLt", + "8bff9133": "assertGeDecimal", + "191f1b30": "assertEq", + "ff483c54": "coinbase", + "3f704d2a": "setAssetHandlerAddress", + "c0991525": "claimFailedDeposit", + "86b9620d": "expectEmit", + "9cc7f708": "balanceOf", + "56f29cba": "assertNotEq", + "eb39e6d5": "stateTransitionManager", + "9cc395d0": "bridgeCheckCounterpartAddress", + "51ac6a33": "writeToml", + "0f28c97d": "getCurrentBlockTimestamp", + "99a88ec4": "upgrade", + "beda594a": "setHyperchain", + "60f78733": "chainSetTokenMultiplierSetter", + "9e8945d2": "verificationKeyHash", + "61f91b2e": "initialForceDeploymentHash", + "fa8f7ea6": "getAllHyperchains", + "205c2878": "withdrawTo", + "36ba0355": "bridgeMint", + "4c63e562": "assume", + "62f84b24": "sendToL1", + "1f6d6ef7": "getBlobBaseFee", + "b5b18fe5": "processL2Logs", + "9e6ea417": "depositLegacyErc20Bridge", + "c8bd0e4a": "toBase64URL", + "2281f367": "envOr", + "75fe6a99": "pushBack", + "ecf95b8a": "createAccount", + "ebf0c717": "root", + "81409b91": "mockCall", + "715018a6": "renounceOwnership", + "56142d7a": "priorityQueueFrontOperation", "f5c1182c": "getSemverProtocolVersion", + "13bc9f20": "isOperationReady", + "a9b0d128": "setPriorityTreeStartIndex", + "95ce3e93": "decodeString", + "042901c7": "proveL1ToL2TransactionStatus", + "f7fe3477": "assertEq", + "98296c54": "assertEq", + "ac22e971": "serializeBool", + "92925aa1": "serializeBool", + "3868ac34": "assertEq", + "74044673": "addStateTransitionManager", + "1e19e657": "parseJsonAddress", + "9b3358b0": "serializeJson", + "671a7131": "settlementLayer", + "ddeaa8e6": "getBatchHash", + "7da01cd6": "executeUpgrade", + "0c9fd581": "assertTrue", + "39d7d4aa": "getPriorityTreeRoot", + "53b9e632": "assetHandlerAddress", + "9884b232": "serializeBytes", + "8d1cc925": "getCode", + "23361207": "expectCall", + "77421056": "setFunctionToCall", + "af368a08": "fsMetadata", + "689992b3": "undoL1ToL2Alias", + "5a590335": "getDAValidatorPair", + "19fa7f62": "claimFailedDeposit", + "2a79c611": "getCommitment", + "202bcce7": "validateTransaction", + "155fd27a": "setValueUnderNonce", + "bb0fd610": "extendedAccountVersion", + "46d0b252": "assertNotEq", + "f5f15168": "l2TokenAddress", + "fb644fc5": "addChainBatchRoot", + "3a9d7f8d": "stmDeployer", "8b257989": "executionDelay", - "588570a5": "initialize", + "3e914080": "assertLt", + "c2eeeebd": "l1Address", + "71ee464d": "createSelectFork", + "a34edc03": "assertTrue", + "8bb8dd43": "parseTomlString", + "236e4d66": "assertNotEq", + "64e130cf": "nativeTokenVault", + "aad74262": "setProtocolVersionDeadline", + "9711715a": "snapshot", + "64949a8d": "assertGtDecimal", + "4d4a1eca": "setTokenMultiplier", + "79ba5097": "acceptOwnership", + "e2a9d554": "setUpgradeTimestamp", + "27eb6c0f": "securityCouncil", + "45b56078": "startPrank", + "652fd489": "promptUint", + "4623c91d": "setValidator", + "82b57749": "forwardedBridgeMint", + "f1afe04d": "removeFile", + "8ffe1b81": "setBridgeHubAddress", + "accdd16c": "freezeChain", + "12f43dab": "bridgehubRequestL2Transaction", + "c9f5c932": "requestL2TransactionTwoBridges", + "98f9bdbd": "assertNotEq", + "42346c5e": "parseInt", + "9b67b21c": "setNonceUnsafe", + "9f629281": "parseTomlStringArray", + "3659cfe6": "upgradeTo", + "631f4bac": "getPriorityQueueSize", + "bce38bd7": "tryAggregate", + "fea2d14f": "assertApproxEqRel", + "2d812b44": "serializeBytes32", + "94ca304b": "numNodes", + "799cd333": "sign", + "c2e90293": "bridgeRecoverFailedTransfer", + "69340beb": "multicall", + "70a08231": "balanceOf", "4cd40a02": "setLegacyTokenAssetId", - "d124dc4f": "send", - "23b872dd": "transferFrom", - "086a56f8": "getBaseTokenBridge", - "689992b3": "undoL1ToL2Alias", - "03c5d8af": "forwardTransactionOnGateway", - "48ceb85e": "chainIndex", - "ba334825": "hyperchain", - "b1fde1a8": "sharedTree", - "7069d0c0": "executeInstant", - "c2aaf9c4": "receiveEth", - "2986c0e5": "index", - "b5872958": "timestamps", - "c2e4ff97": "markAccountCodeHashAsConstructed", - "9c4d535b": "create", - "923b3b56": "forceDeployOnAddress", - "3635f3e6": "resetTxNumberInBatch", - "19698bc9": "infiniteFunction", - "315fff4e": "THIS_ADDRESS", - "52c9eacb": "upgradeCutHash", - "18e3a941": "getVerifierParams", - "29f172ad": "unsafeOverrideBatch", - "4b561753": "addValidator", - "a9059cbb": "transfer", - "949431dc": "approvalBased", + "4bed8212": "isWithdrawalFinalized", + "84da1fb4": "getNewAddressCreate2", + "ffa18649": "addr", + "9f3f89dc": "getZero", + "081812fc": "getApproved", + "6ef25c3a": "baseFee", + "29092d0e": "remove", + "201e43e2": "serializeBytes32", + "74318528": "envOr", + "ec8067c7": "updateNonceOrdering", + "841a9d42": "aggregate3Value", + "b5df27c8": "parseTomlUintArray", + "f1d357e5": "L1_SHARED_BRIDGE", + "1de72e34": "baseTokenGasPriceMultiplierDenominator", + "a5cbfe65": "toBase64", + "528a683c": "keyExists", + "71abd109": "upgrade", + "381c3f13": "checkDA", + "0ef26743": "height", + "7fefbbe0": "assertLeDecimal", + "ef939455": "keccakUpgradeTest", + "1806aa18": "getCodeSize", + "f4943a20": "protocolVersionDeadline", + "de8fa431": "getSize", + "a851ae78": "setTxOrigin", + "f45c1ce7": "tryFfi", + "39b37ab0": "fee", + "261a323e": "exists", + "7cb9357e": "gasPerPubdataByte", "8f283970": "changeAdmin", - "85df51fd": "blockHash", - "dead6f7f": "getHyperchain", - "896909dc": "getMinNonce", - "7eff275e": "changeProxyAdmin", - "27ae4c16": "freezeDiamond", - "566338a9": "getL1TokenAddress", + "266cf109": "record", + "7efda2ae": "proveL2LeafInclusion", + "246a61de": "ERA_DIAMOND_PROXY", + "b4d6c782": "etch", + "c05afaa6": "initializeDevBridge", + "52ef6b2c": "facetAddresses", + "8c1aa205": "sign", + "129e9002": "serializeUint", + "d52471c1": "requestL2TransactionDirect", + "c2e047ff": "aggregate3", + "e48a8f8d": "assertEq", + "027f12e1": "changeFeeParams", + "095ea7b3": "approve", + "8f31f052": "isWithdrawalFinalized", + "7a592065": "calculateRoot", + "1c5a9d9c": "activate", + "dd354a06": "calculateCreate2TokenAddress", + "9d1b5a81": "getL2SystemContractsUpgradeBlockNumber", + "fb1a9a57": "getDeploymentNonce", + "6d1d8363": "scheduleShadow", + "42cbb15c": "getBlockNumber", + "18160ddd": "totalSupply", + "c1350739": "parseTomlInt", + "859216bc": "envOr", + "d92f86a2": "setLegacyChainAddress", + "f280efbe": "initializeChainGovernance", + "6e9960c3": "getAdmin", + "af6a2dcd": "getTotalBlocksVerified", + "50bb0884": "toLowercase", + "9623609d": "upgradeAndCall", "8ac84c0e": "txNumberInBlock", - "53ce2061": "revertBatches", - "9a8a0592": "chainId", - "f5407abe": "setValues", - "46657fe9": "getVerifier", - "484f0505": "getHyperchainLegacy", - "b760faf9": "depositTo", - "5de097b1": "nullifyChainBalanceByNTV", - "e8295588": "zeros", - "f90eb963": "getPorterAvailability", - "57180981": "updateAccountVersion", - "579952fc": "transferFromTo", - "d505accf": "permit", - "e02da327": "readUint256", - "51d218f7": "unfreezeChain", - "8466d8d1": "getBridgeHubAddress", - "b381724e": "setFeeParams", - "d9caed12": "withdraw", - "9d1b5a81": "getL2SystemContractsUpgradeBlockNumber" + "9fa8826b": "depositHappened", + "b5872958": "timestamps", + "49eb3b50": "getTransactionHashes", + "6c0960f9": "finalizeEthWithdrawal", + "c1978d1f": "envUint", + "c4879440": "bridgehubDepositBaseToken", + "0603ea68": "assertNotEq", + "c924de35": "transferEthToSharedBridge", + "f8c1f0d2": "upgradeChainFromVersion", + "7d15d019": "isDir", + "ea060291": "allowCheatcodes", + "82ad56cb": "aggregate3", + "805b9869": "executeTransactionFromOutside", + "1624f6c6": "initialize", + "daa51a8c": "pushBack", + "286fafea": "assertNotEq", + "5df93c9b": "assertGeDecimal", + "974ef924": "parseBool" } \ No newline at end of file diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts index 4131ae88f05..0e2d5e25eda 100644 --- a/etc/utils/src/index.ts +++ b/etc/utils/src/index.ts @@ -25,7 +25,8 @@ const IGNORED_DIRS = [ 'artifacts-zk', 'cache-zk', // Ignore directories with OZ and forge submodules. - 'contracts/l1-contracts/lib' + 'contracts/l1-contracts/lib', + 'era-observability' ]; const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; @@ -33,6 +34,7 @@ const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc. // spawns a new shell and can execute arbitrary commands, like "ls -la | grep .env" // returns { stdout, stderr } const promisified = promisify(_exec); + export function exec(command: string) { command = command.replace(/\n/g, ' '); return promisified(command); diff --git a/flake.nix b/flake.nix index cc14faebfed..ef618816f9c 100644 --- a/flake.nix +++ b/flake.nix @@ -67,7 +67,6 @@ }; craneLib = (crane.mkLib pkgs).overrideToolchain rustVersion; - NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; commonArgs = { nativeBuildInputs = with pkgs;[ @@ -81,6 +80,8 @@ snappy.dev lz4.dev bzip2.dev + rocksdb + snappy.dev ]; src = with pkgs.lib.fileset; toSource { @@ -97,7 +98,9 @@ env = { OPENSSL_NO_VENDOR = "1"; - inherit NIX_OUTPATH_USED_AS_RANDOM_SEED; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb.out}/lib"; + SNAPPY_LIB_DIR = "${pkgs.snappy.out}/lib"; + NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; }; doCheck = false; diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 27de68d1d98..035061a8ed0 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -114,7 +114,7 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf if (platform != '') { buildArgs += `--platform=${platform} `; } - if (image === 'prover-gpu-fri') { + if (image === 'prover-gpu-fri' || image == 'proof-fri-gpu-compressor') { const cudaArch = process.env.CUDA_ARCH; buildArgs += `--build-arg CUDA_ARCH='${cudaArch}' `; } @@ -126,6 +126,8 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf } buildArgs += extraArgs; + console.log('Build args: ', buildArgs); + const buildCommand = `DOCKER_BUILDKIT=1 docker buildx build ${tagsToBuild}` + (buildArgs ? ` ${buildArgs}` : '') + diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 80d6325f4d1..4748acaf12b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -313,6 +313,8 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "http-body-util", + "hyper 1.4.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -321,10 +323,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper 1.0.1", + "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -345,6 +352,7 @@ dependencies = [ "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -5110,9 +5118,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5129,9 +5137,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -5150,6 +5158,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -7330,6 +7348,33 @@ dependencies = [ "zksync_pairing", ] +[[package]] +name = "zksync_circuit_prover" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "clap 4.5.11", + "shivini", + "tokio", + "tokio-util", + "tracing", + "vise", + "zkevm_test_harness", + "zksync_config", + "zksync_core_leftovers", + "zksync_env_config", + "zksync_object_store", + "zksync_prover_dal", + "zksync_prover_fri_types", + "zksync_prover_fri_utils", + "zksync_prover_keystore", + "zksync_queued_job_processor", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_concurrency" version = "0.1.1" @@ -7519,6 +7564,7 @@ dependencies = [ "tokio", "tracing", "vise", + "zksync_concurrency", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", @@ -7653,7 +7699,6 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", - "pretty_assertions", "thiserror", "tracing", "vise", @@ -7916,13 +7961,16 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "axum", "clap 4.5.11", "ctrlc", + "serde", "tokio", "tracing", "vise", "zksync_config", "zksync_core_leftovers", + "zksync_db_connection", "zksync_prover_dal", "zksync_types", "zksync_utils", @@ -7936,6 +7984,7 @@ dependencies = [ "anyhow", "bincode", "circuit_definitions", + "futures 0.3.30", "hex", "md5", "once_cell", @@ -7943,6 +7992,7 @@ dependencies = [ "serde_json", "sha3 0.10.8", "shivini", + "tokio", "tracing", "zkevm_test_harness", "zksync_basic_types", @@ -7991,6 +8041,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", @@ -8074,7 +8125,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "enum_dispatch", "primitive-types", @@ -8086,7 +8137,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "primitive-types", ] @@ -8098,6 +8149,7 @@ dependencies = [ "anyhow", "async-trait", "hex", + "pretty_assertions", "serde", "thiserror", "tracing", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 624661adc8d..b21ad800afa 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -19,6 +19,7 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" +axum = "0.7.5" async-trait = "0.1" bincode = "1" chrono = "0.4.38" @@ -50,6 +51,7 @@ structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" tokio = "1" +tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" diff --git a/prover/crates/bin/circuit_prover/Cargo.toml b/prover/crates/bin/circuit_prover/Cargo.toml new file mode 100644 index 00000000000..a5751a4cd9a --- /dev/null +++ b/prover/crates/bin/circuit_prover/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "zksync_circuit_prover" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +tokio = { workspace = true, features = ["macros", "time"] } +tokio-util.workspace = true +anyhow.workspace = true +async-trait.workspace = true +tracing.workspace = true +bincode.workspace = true +clap = { workspace = true, features = ["derive"] } + +zksync_config.workspace = true +zksync_object_store.workspace = true +zksync_prover_dal.workspace = true +zksync_prover_fri_types.workspace = true +zksync_prover_fri_utils.workspace = true +zksync_queued_job_processor.workspace = true +zksync_types.workspace = true +zksync_prover_keystore = { workspace = true, features = ["gpu"] } +zksync_env_config.workspace = true +zksync_core_leftovers.workspace = true +zksync_utils.workspace = true + +vise.workspace = true +shivini = { workspace = true, features = [ + "circuit_definitions", + "zksync", +] } +zkevm_test_harness.workspace = true diff --git a/prover/crates/bin/circuit_prover/src/backoff.rs b/prover/crates/bin/circuit_prover/src/backoff.rs new file mode 100644 index 00000000000..6ddb3d94be3 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/backoff.rs @@ -0,0 +1,39 @@ +use std::{ops::Mul, time::Duration}; + +/// Backoff - convenience structure that takes care of backoff timings. +#[derive(Debug, Clone)] +pub struct Backoff { + base_delay: Duration, + current_delay: Duration, + max_delay: Duration, +} + +impl Backoff { + /// The delay multiplication coefficient. + // Currently it's hardcoded, but could be provided in the constructor. + const DELAY_MULTIPLIER: u32 = 2; + + /// Create a backoff with base_delay (first delay) and max_delay (maximum delay possible). + pub fn new(base_delay: Duration, max_delay: Duration) -> Self { + Backoff { + base_delay, + current_delay: base_delay, + max_delay, + } + } + + /// Get current delay, handling future delays if needed + pub fn delay(&mut self) -> Duration { + let delay = self.current_delay; + self.current_delay = self + .current_delay + .mul(Self::DELAY_MULTIPLIER) + .min(self.max_delay); + delay + } + + /// Reset the backoff time for to base delay + pub fn reset(&mut self) { + self.current_delay = self.base_delay; + } +} diff --git a/prover/crates/bin/circuit_prover/src/circuit_prover.rs b/prover/crates/bin/circuit_prover/src/circuit_prover.rs new file mode 100644 index 00000000000..1a5f8aa0d97 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/circuit_prover.rs @@ -0,0 +1,397 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context; +use shivini::{ + gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, + ProverContextConfig, +}; +use tokio::{sync::mpsc::Receiver, task::JoinHandle}; +use tokio_util::sync::CancellationToken; +use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + base_layer_proof_config, + boojum::{ + cs::implementations::{pow::NoPow, witness::WitnessVec}, + field::goldilocks::GoldilocksField, + worker::Worker, + }, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerProof, recursion_layer::ZkSyncRecursionLayerProof, + }, + recursion_layer_proof_config, + }, + CircuitWrapper, FriProofWrapper, ProverArtifacts, WitnessVectorArtifactsTemp, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; +use zksync_types::protocol_version::ProtocolSemanticVersion; +use zksync_utils::panic_extractor::try_extract_panic_message; + +use crate::{ + metrics::CIRCUIT_PROVER_METRICS, + types::{DefaultTranscript, DefaultTreeHasher, Proof, VerificationKey}, + SetupDataCache, +}; + +/// In charge of proving circuits, given a Witness Vector source. +/// Both job runner & job executor. +#[derive(Debug)] +pub struct CircuitProver { + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + /// Witness Vector source receiver + receiver: Receiver, + /// Setup Data used for proving & proof verification + setup_data_cache: SetupDataCache, +} + +impl CircuitProver { + pub fn new( + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + receiver: Receiver, + max_allocation: Option, + setup_data_cache: SetupDataCache, + ) -> anyhow::Result<(Self, ProverContext)> { + // VRAM allocation + let prover_context = match max_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .context("failed initializing fixed gpu prover context")?, + None => ProverContext::create().context("failed initializing gpu prover context")?, + }; + Ok(( + Self { + connection_pool, + object_store, + protocol_version, + receiver, + setup_data_cache, + }, + prover_context, + )) + } + + /// Continuously polls `receiver` for Witness Vectors and proves them. + /// All job executions are persisted. + pub async fn run(mut self, cancellation_token: CancellationToken) -> anyhow::Result<()> { + while !cancellation_token.is_cancelled() { + let time = Instant::now(); + + let artifact = self + .receiver + .recv() + .await + .context("no Witness Vector Generators are available")?; + tracing::info!( + "Circuit Prover received job {:?} after: {:?}", + artifact.prover_job.job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS.job_wait_time.observe(time.elapsed()); + + self.prove(artifact, cancellation_token.clone()) + .await + .context("failed to prove circuit proof")?; + } + tracing::info!("Circuit Prover shut down."); + Ok(()) + } + + /// Proves a job, with persistence of execution. + async fn prove( + &self, + artifact: WitnessVectorArtifactsTemp, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + let time = Instant::now(); + let block_number = artifact.prover_job.block_number; + let job_id = artifact.prover_job.job_id; + let job_start_time = artifact.time; + let setup_data_key = artifact.prover_job.setup_data_key.crypto_setup_key(); + let setup_data = self + .setup_data_cache + .get(&setup_data_key) + .context(format!( + "failed to get setup data for key {setup_data_key:?}" + ))? + .clone(); + let task = tokio::task::spawn_blocking(move || { + let _span = tracing::info_span!("prove_circuit_proof", %block_number).entered(); + Self::prove_circuit_proof(artifact, setup_data).context("failed to prove circuit") + }); + + self.finish_task( + job_id, + time, + job_start_time, + task, + cancellation_token.clone(), + ) + .await?; + tracing::info!( + "Circuit Prover finished job {:?} in: {:?}", + job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS + .job_finished_time + .observe(time.elapsed()); + CIRCUIT_PROVER_METRICS + .full_proving_time + .observe(job_start_time.elapsed()); + Ok(()) + } + + /// Proves a job using crypto primitives (proof generation & proof verification). + #[tracing::instrument( + name = "Prover::prove_circuit_proof", + skip_all, + fields(l1_batch = % witness_vector_artifacts.prover_job.block_number) + )] + pub fn prove_circuit_proof( + witness_vector_artifacts: WitnessVectorArtifactsTemp, + setup_data: Arc, + ) -> anyhow::Result { + let time = Instant::now(); + let WitnessVectorArtifactsTemp { + witness_vector, + prover_job, + .. + } = witness_vector_artifacts; + + let job_id = prover_job.job_id; + let circuit_wrapper = prover_job.circuit_wrapper; + let block_number = prover_job.block_number; + + let (proof, circuit_id) = + Self::generate_proof(&circuit_wrapper, witness_vector, &setup_data) + .context(format!("failed to generate proof for job id {job_id}"))?; + + Self::verify_proof(&circuit_wrapper, &proof, &setup_data.vk).context(format!( + "failed to verify proof with job_id {job_id}, circuit_id: {circuit_id}" + ))?; + + let proof_wrapper = match &circuit_wrapper { + CircuitWrapper::Base(_) => { + FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::Recursive(_) => { + FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + CIRCUIT_PROVER_METRICS + .crypto_primitives_time + .observe(time.elapsed()); + Ok(ProverArtifacts::new(block_number, proof_wrapper)) + } + + /// Generates a proof from crypto primitives. + fn generate_proof( + circuit_wrapper: &CircuitWrapper, + witness_vector: WitnessVec, + setup_data: &Arc, + ) -> anyhow::Result<(Proof, u8)> { + let time = Instant::now(); + + let worker = Worker::new(); + + let (gpu_proof_config, proof_config, circuit_id) = match circuit_wrapper { + CircuitWrapper::Base(circuit) => ( + GpuProofConfig::from_base_layer_circuit(circuit), + base_layer_proof_config(), + circuit.numeric_circuit_type(), + ), + CircuitWrapper::Recursive(circuit) => ( + GpuProofConfig::from_recursive_layer_circuit(circuit), + recursion_layer_proof_config(), + circuit.numeric_circuit_type(), + ), + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + + let proof = + gpu_prove_from_external_witness_data::( + &gpu_proof_config, + &witness_vector, + proof_config, + &setup_data.setup, + &setup_data.vk, + (), + &worker, + ) + .context("crypto primitive: failed to generate proof")?; + CIRCUIT_PROVER_METRICS + .generate_proof_time + .observe(time.elapsed()); + Ok((proof.into(), circuit_id)) + } + + /// Verifies a proof from crypto primitives + fn verify_proof( + circuit_wrapper: &CircuitWrapper, + proof: &Proof, + verification_key: &VerificationKey, + ) -> anyhow::Result<()> { + let time = Instant::now(); + + let is_valid = match circuit_wrapper { + CircuitWrapper::Base(base_circuit) => { + verify_base_layer_proof::(base_circuit, proof, verification_key) + } + CircuitWrapper::Recursive(recursive_circuit) => { + verify_recursion_layer_proof::(recursive_circuit, proof, verification_key) + } + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + + CIRCUIT_PROVER_METRICS + .verify_proof_time + .observe(time.elapsed()); + + if !is_valid { + return Err(anyhow::anyhow!("crypto primitive: failed to verify proof")); + } + Ok(()) + } + + /// This code path should never trigger. All proofs are hydrated during Witness Vector Generator. + /// If this triggers, it means that proof hydration in Witness Vector Generator was not done -- logic bug. + fn partial_proof_error() -> anyhow::Result { + Err(anyhow::anyhow!("received unexpected dehydrated proof")) + } + + /// Runs task to completion and persists result. + /// NOTE: Task may be cancelled mid-flight. + async fn finish_task( + &self, + job_id: u32, + time: Instant, + job_start_time: Instant, + task: JoinHandle>, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + tokio::select! { + _ = cancellation_token.cancelled() => { + tracing::info!("Stop signal received, shutting down Circuit Prover..."); + return Ok(()) + } + result = task => { + let error_message = match result { + Ok(Ok(prover_artifact)) => { + tracing::info!("Circuit Prover executed job {:?} in: {:?}", job_id, time.elapsed()); + CIRCUIT_PROVER_METRICS.execution_time.observe(time.elapsed()); + self + .save_result(job_id, job_start_time, prover_artifact) + .await.context("failed to save result")?; + return Ok(()) + } + Ok(Err(error)) => error.to_string(), + Err(error) => try_extract_panic_message(error), + }; + tracing::error!( + "Circuit Prover failed on job {:?} with error {:?}", + job_id, + error_message + ); + + self.save_failure(job_id, error_message).await.context("failed to save failure")?; + } + } + + Ok(()) + } + + /// Persists proof generated. + /// Job metadata is saved to database, whilst artifacts go to object store. + async fn save_result( + &self, + job_id: u32, + job_start_time: Instant, + artifacts: ProverArtifacts, + ) -> anyhow::Result<()> { + let time = Instant::now(); + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let proof = artifacts.proof_wrapper; + + let (_circuit_type, is_scheduler_proof) = match &proof { + FriProofWrapper::Base(base) => (base.numeric_circuit_type(), false), + FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { + ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { + (recursive_circuit.numeric_circuit_type(), true) + } + _ => (recursive_circuit.numeric_circuit_type(), false), + }, + }; + + let upload_time = Instant::now(); + let blob_url = self + .object_store + .put(job_id, &proof) + .await + .context("failed to upload to object store")?; + CIRCUIT_PROVER_METRICS + .artifact_upload_time + .observe(upload_time.elapsed()); + + let mut transaction = connection + .start_transaction() + .await + .context("failed to start db transaction")?; + transaction + .fri_prover_jobs_dal() + .save_proof(job_id, job_start_time.elapsed(), &blob_url) + .await; + if is_scheduler_proof { + transaction + .fri_proof_compressor_dal() + .insert_proof_compression_job( + artifacts.block_number, + &blob_url, + self.protocol_version, + ) + .await; + } + transaction + .commit() + .await + .context("failed to commit db transaction")?; + + tracing::info!( + "Circuit Prover saved job {:?} after {:?}", + job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS.save_time.observe(time.elapsed()); + + Ok(()) + } + + /// Persists job execution error to database. + async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + Ok(()) + } +} diff --git a/prover/crates/bin/circuit_prover/src/lib.rs b/prover/crates/bin/circuit_prover/src/lib.rs new file mode 100644 index 00000000000..7d7ce1d9668 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/lib.rs @@ -0,0 +1,13 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. +#![feature(generic_const_exprs)] +pub use backoff::Backoff; +pub use circuit_prover::CircuitProver; +pub use metrics::PROVER_BINARY_METRICS; +pub use types::{FinalizationHintsCache, SetupDataCache}; +pub use witness_vector_generator::WitnessVectorGenerator; + +mod backoff; +mod circuit_prover; +mod metrics; +mod types; +mod witness_vector_generator; diff --git a/prover/crates/bin/circuit_prover/src/main.rs b/prover/crates/bin/circuit_prover/src/main.rs new file mode 100644 index 00000000000..e26f29ca995 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/main.rs @@ -0,0 +1,201 @@ +use std::{ + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::Context as _; +use clap::Parser; +use tokio_util::sync::CancellationToken; +use zksync_circuit_prover::{ + Backoff, CircuitProver, FinalizationHintsCache, SetupDataCache, WitnessVectorGenerator, + PROVER_BINARY_METRICS, +}; +use zksync_config::{ + configs::{FriProverConfig, ObservabilityConfig}, + ObjectStoreConfig, +}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; +use zksync_utils::wait_for_tasks::ManagedTasks; + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +struct Cli { + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, + /// Number of WVG jobs to run in parallel. + /// Default value is 1. + #[arg(long, default_value_t = 1)] + pub(crate) witness_vector_generator_count: usize, + /// Max VRAM to allocate. Useful if you want to limit the size of VRAM used. + /// None corresponds to allocating all available VRAM. + #[arg(long)] + pub(crate) max_allocation: Option, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let time = Instant::now(); + let opt = Cli::parse(); + + let (observability_config, prover_config, object_store_config) = load_configs(opt.config_path)?; + + let _observability_guard = observability_config + .install() + .context("failed to install observability")?; + + let wvg_count = opt.witness_vector_generator_count as u32; + + let (connection_pool, object_store, setup_data_cache, hints) = load_resources( + opt.secrets_path, + object_store_config, + prover_config.setup_data_path.into(), + wvg_count, + ) + .await + .context("failed to load configs")?; + + PROVER_BINARY_METRICS.start_up.observe(time.elapsed()); + + let cancellation_token = CancellationToken::new(); + let backoff = Backoff::new(Duration::from_secs(5), Duration::from_secs(30)); + + let mut tasks = vec![]; + + let (sender, receiver) = tokio::sync::mpsc::channel(5); + + tracing::info!("Starting {wvg_count} Witness Vector Generators."); + + for _ in 0..wvg_count { + let wvg = WitnessVectorGenerator::new( + object_store.clone(), + connection_pool.clone(), + PROVER_PROTOCOL_SEMANTIC_VERSION, + sender.clone(), + hints.clone(), + ); + tasks.push(tokio::spawn( + wvg.run(cancellation_token.clone(), backoff.clone()), + )); + } + + // NOTE: Prover Context is the way VRAM is allocated. If it is dropped, the claim on VRAM allocation is dropped as well. + // It has to be kept until prover dies. Whilst it may be kept in prover struct, during cancellation, prover can `drop`, but the thread doing the processing can still be alive. + // This setup prevents segmentation faults and other nasty behavior during shutdown. + let (prover, _prover_context) = CircuitProver::new( + connection_pool, + object_store, + PROVER_PROTOCOL_SEMANTIC_VERSION, + receiver, + opt.max_allocation, + setup_data_cache, + ) + .context("failed to create circuit prover")?; + tasks.push(tokio::spawn(prover.run(cancellation_token.clone()))); + + let mut tasks = ManagedTasks::new(tasks); + tokio::select! { + _ = tasks.wait_single() => {}, + result = tokio::signal::ctrl_c() => { + match result { + Ok(_) => { + tracing::info!("Stop signal received, shutting down..."); + cancellation_token.cancel(); + }, + Err(_err) => { + tracing::error!("failed to set up ctrl c listener"); + } + } + } + } + PROVER_BINARY_METRICS.run_time.observe(time.elapsed()); + tasks.complete(Duration::from_secs(5)).await; + + Ok(()) +} + +/// Loads configs necessary for proving. +/// - observability config - for observability setup +/// - prover config - necessary for setup data +/// - object store config - for retrieving artifacts for WVG & CP +fn load_configs( + config_path: Option, +) -> anyhow::Result<(ObservabilityConfig, FriProverConfig, ObjectStoreConfig)> { + tracing::info!("loading configs..."); + let general_config = + load_general_config(config_path).context("failed loading general config")?; + let observability_config = general_config + .observability + .context("failed loading observability config")?; + let prover_config = general_config + .prover_config + .context("failed loading prover config")?; + let object_store_config = prover_config + .prover_object_store + .clone() + .context("failed loading prover object store config")?; + tracing::info!("Loaded configs."); + Ok((observability_config, prover_config, object_store_config)) +} + +/// Loads resources necessary for proving. +/// - connection pool - necessary to pick & store jobs from database +/// - object store - necessary for loading and storing artifacts to object store +/// - setup data - necessary for circuit proving +/// - finalization hints - necessary for generating witness vectors +async fn load_resources( + secrets_path: Option, + object_store_config: ObjectStoreConfig, + setup_data_path: PathBuf, + wvg_count: u32, +) -> anyhow::Result<( + ConnectionPool, + Arc, + SetupDataCache, + FinalizationHintsCache, +)> { + let database_secrets = + load_database_secrets(secrets_path).context("failed to load database secrets")?; + let database_url = database_secrets + .prover_url + .context("no prover DB URl present")?; + + // 1 connection for the prover and one for each vector generator + let max_connections = 1 + wvg_count; + let connection_pool = ConnectionPool::::builder(database_url, max_connections) + .build() + .await + .context("failed to build connection pool")?; + + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await + .context("failed to create object store")?; + + tracing::info!("Loading mappings from disk..."); + + let keystore = Keystore::locate().with_setup_path(Some(setup_data_path)); + let setup_data_cache = keystore + .load_all_setup_key_mapping() + .await + .context("failed to load setup key mapping")?; + let finalization_hints = keystore + .load_all_finalization_hints_mapping() + .await + .context("failed to load finalization hints mapping")?; + + tracing::info!("Loaded mappings from disk."); + + Ok(( + connection_pool, + object_store, + setup_data_cache, + finalization_hints, + )) +} diff --git a/prover/crates/bin/circuit_prover/src/metrics.rs b/prover/crates/bin/circuit_prover/src/metrics.rs new file mode 100644 index 00000000000..e9f44591479 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/metrics.rs @@ -0,0 +1,80 @@ +use std::time::Duration; + +use vise::{Buckets, Histogram, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_binary")] +pub struct ProverBinaryMetrics { + /// How long does it take for prover to load data before it can produce proofs? + #[metrics(buckets = Buckets::LATENCIES)] + pub start_up: Histogram, + /// How long has the prover been running? + #[metrics(buckets = Buckets::LATENCIES)] + pub run_time: Histogram, +} + +#[vise::register] +pub static PROVER_BINARY_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "witness_vector_generator")] +pub struct WitnessVectorGeneratorMetrics { + /// How long does witness vector generator waits before a job is available? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_wait_time: Histogram, + /// How long does it take to load object store artifacts for a witness vector job? + #[metrics(buckets = Buckets::LATENCIES)] + pub artifact_download_time: Histogram, + /// How long does the crypto witness generation primitive take? + #[metrics(buckets = Buckets::LATENCIES)] + pub crypto_primitive_time: Histogram, + /// How long does it take for a job to be executed, from the moment it's loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub execution_time: Histogram, + /// How long does it take to send a job to prover? + /// This is relevant because prover queue can apply back-pressure. + #[metrics(buckets = Buckets::LATENCIES)] + pub send_time: Histogram, + /// How long does it take for a job to be considered finished, from the moment it's been loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_finished_time: Histogram, +} + +#[vise::register] +pub static WITNESS_VECTOR_GENERATOR_METRICS: vise::Global = + vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "circuit_prover")] +pub struct CircuitProverMetrics { + /// How long does circuit prover wait before a job is available? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_wait_time: Histogram, + /// How long does the crypto primitives (proof generation & verification) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub crypto_primitives_time: Histogram, + /// How long does proof generation (crypto primitive) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub generate_proof_time: Histogram, + /// How long does verify proof (crypto primitive) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub verify_proof_time: Histogram, + /// How long does it take for a job to be executed, from the moment it's loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub execution_time: Histogram, + /// How long does it take to upload proof to object store? + #[metrics(buckets = Buckets::LATENCIES)] + pub artifact_upload_time: Histogram, + /// How long does it take to save a job? + #[metrics(buckets = Buckets::LATENCIES)] + pub save_time: Histogram, + /// How long does it take for a job to be considered finished, from the moment it's been loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_finished_time: Histogram, + /// How long does it take a job to go from witness generation to having the proof saved? + #[metrics(buckets = Buckets::LATENCIES)] + pub full_proving_time: Histogram, +} + +#[vise::register] +pub static CIRCUIT_PROVER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/circuit_prover/src/types.rs b/prover/crates/bin/circuit_prover/src/types.rs new file mode 100644 index 00000000000..52cdd48b6b5 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/types.rs @@ -0,0 +1,31 @@ +use std::{collections::HashMap, sync::Arc}; + +use zksync_prover_fri_types::{ + circuit_definitions::boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, + }, + cs::implementations::{ + proof::Proof as CryptoProof, setup::FinalizationHintsForProver, + transcript::GoldilocksPoisedon2Transcript, + verifier::VerificationKey as CryptoVerificationKey, + }, + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + }, + ProverServiceDataKey, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; + +// prover types +pub type DefaultTranscript = GoldilocksPoisedon2Transcript; +pub type DefaultTreeHasher = GoldilocksPoseidon2Sponge; + +type F = GoldilocksField; +type H = GoldilocksPoseidon2Sponge; +type Ext = GoldilocksExt2; +pub type Proof = CryptoProof; +pub type VerificationKey = CryptoVerificationKey; + +// cache types +pub type SetupDataCache = HashMap>; +pub type FinalizationHintsCache = HashMap>; diff --git a/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs new file mode 100644 index 00000000000..cb2d2a256df --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs @@ -0,0 +1,345 @@ +use std::{collections::HashMap, sync::Arc, time::Instant}; + +use anyhow::Context; +use tokio::{sync::mpsc::Sender, task::JoinHandle}; +use tokio_util::sync::CancellationToken; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + cs::implementations::setup::FinalizationHintsForProver, + field::goldilocks::GoldilocksField, + gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness, + }, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, + }, + get_current_pod_name, + keys::RamPermutationQueueWitnessKey, + CircuitAuxData, CircuitWrapper, ProverJob, ProverServiceDataKey, RamPermutationQueueWitness, + WitnessVectorArtifactsTemp, +}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; +use zksync_utils::panic_extractor::try_extract_panic_message; + +use crate::{metrics::WITNESS_VECTOR_GENERATOR_METRICS, Backoff, FinalizationHintsCache}; + +/// In charge of generating Witness Vectors and sending them to Circuit Prover. +/// Both job runner & job executor. +#[derive(Debug)] +pub struct WitnessVectorGenerator { + object_store: Arc, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + /// Finalization Hints used for Witness Vector generation + finalization_hints_cache: FinalizationHintsCache, + /// Witness Vector sender for Circuit Prover + sender: Sender, + pod_name: String, +} + +impl WitnessVectorGenerator { + pub fn new( + object_store: Arc, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + sender: Sender, + finalization_hints: HashMap>, + ) -> Self { + Self { + object_store, + connection_pool, + protocol_version, + finalization_hints_cache: finalization_hints, + sender, + pod_name: get_current_pod_name(), + } + } + + /// Continuously polls database for new prover jobs and generates witness vectors for them. + /// All job executions are persisted. + pub async fn run( + self, + cancellation_token: CancellationToken, + mut backoff: Backoff, + ) -> anyhow::Result<()> { + let mut get_job_timer = Instant::now(); + while !cancellation_token.is_cancelled() { + if let Some(prover_job) = self + .get_job() + .await + .context("failed to get next witness generation job")? + { + tracing::info!( + "Witness Vector Generator received job {:?} after: {:?}", + prover_job.job_id, + get_job_timer.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .job_wait_time + .observe(get_job_timer.elapsed()); + if let e @ Err(_) = self.generate(prover_job, cancellation_token.clone()).await { + // this means that the witness vector receiver is closed, no need to report the error, just return + if cancellation_token.is_cancelled() { + return Ok(()); + } + e.context("failed to generate witness")? + } + + // waiting for a job timer starts as soon as the other is finished + get_job_timer = Instant::now(); + backoff.reset(); + continue; + }; + self.backoff(&mut backoff, cancellation_token.clone()).await; + } + tracing::info!("Witness Vector Generator shut down."); + Ok(()) + } + + /// Retrieves a prover job from database, loads artifacts from object store and hydrates them. + async fn get_job(&self) -> anyhow::Result> { + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let prover_job_metadata = match connection + .fri_prover_jobs_dal() + .get_job(self.protocol_version, &self.pod_name) + .await + { + None => return Ok(None), + Some(job) => job, + }; + + let time = Instant::now(); + let circuit_wrapper = self + .object_store + .get(prover_job_metadata.into()) + .await + .context("failed to get circuit_wrapper from object store")?; + let artifact = match circuit_wrapper { + a @ CircuitWrapper::Base(_) => a, + a @ CircuitWrapper::Recursive(_) => a, + CircuitWrapper::BasePartial((circuit, aux_data)) => self + .fill_witness(circuit, aux_data, prover_job_metadata.block_number) + .await + .context("failed to fill witness")?, + }; + WITNESS_VECTOR_GENERATOR_METRICS + .artifact_download_time + .observe(time.elapsed()); + + let setup_data_key = ProverServiceDataKey { + circuit_id: prover_job_metadata.circuit_id, + round: prover_job_metadata.aggregation_round, + } + .crypto_setup_key(); + let prover_job = ProverJob::new( + prover_job_metadata.block_number, + prover_job_metadata.id, + artifact, + setup_data_key, + ); + Ok(Some(prover_job)) + } + + /// Prover artifact hydration. + async fn fill_witness( + &self, + circuit: ZkSyncBaseLayerCircuit, + aux_data: CircuitAuxData, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result { + if let ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance) = circuit { + let sorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: true, + }; + let sorted_witness: RamPermutationQueueWitness = self + .object_store + .get(sorted_witness_key) + .await + .context("failed to load sorted witness key")?; + + let unsorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: false, + }; + let unsorted_witness: RamPermutationQueueWitness = self + .object_store + .get(unsorted_witness_key) + .await + .context("failed to load unsorted witness key")?; + + let mut witness = circuit_instance.witness.take().unwrap(); + witness.unsorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: unsorted_witness.witness.into(), + }; + witness.sorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: sorted_witness.witness.into(), + }; + circuit_instance.witness.store(Some(witness)); + + return Ok(CircuitWrapper::Base( + ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance), + )); + } + Err(anyhow::anyhow!( + "unexpected circuit received with partial witness, expected RAM permutation, got {:?}", + circuit.short_description() + )) + } + + /// Generates witness vector, with persistence of execution. + async fn generate( + &self, + prover_job: ProverJob, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + let start_time = Instant::now(); + let finalization_hints = self + .finalization_hints_cache + .get(&prover_job.setup_data_key) + .context(format!( + "failed to get finalization hints for key {:?}", + &prover_job.setup_data_key + ))? + .clone(); + let job_id = prover_job.job_id; + let task = tokio::task::spawn_blocking(move || { + let block_number = prover_job.block_number; + let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); + Self::generate_witness_vector(prover_job, finalization_hints) + }); + + self.finish_task(job_id, start_time, task, cancellation_token.clone()) + .await?; + + tracing::info!( + "Witness Vector Generator finished job {:?} in: {:?}", + job_id, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .job_finished_time + .observe(start_time.elapsed()); + Ok(()) + } + + /// Generates witness vector using crypto primitives. + #[tracing::instrument( + skip_all, + fields(l1_batch = % prover_job.block_number) + )] + pub fn generate_witness_vector( + prover_job: ProverJob, + finalization_hints: Arc, + ) -> anyhow::Result { + let time = Instant::now(); + let cs = match prover_job.circuit_wrapper.clone() { + CircuitWrapper::Base(base_circuit) => { + base_circuit.synthesis::(&finalization_hints) + } + CircuitWrapper::Recursive(recursive_circuit) => { + recursive_circuit.synthesis::(&finalization_hints) + } + // circuit must be hydrated during `get_job` + CircuitWrapper::BasePartial(_) => { + return Err(anyhow::anyhow!("received unexpected dehydrated proof")); + } + }; + WITNESS_VECTOR_GENERATOR_METRICS + .crypto_primitive_time + .observe(time.elapsed()); + Ok(WitnessVectorArtifactsTemp::new( + cs.witness.unwrap(), + prover_job, + time, + )) + } + + /// Runs task to completion and persists result. + /// NOTE: Task may be cancelled mid-flight. + async fn finish_task( + &self, + job_id: u32, + time: Instant, + task: JoinHandle>, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + tokio::select! { + _ = cancellation_token.cancelled() => { + tracing::info!("Stop signal received, shutting down Witness Vector Generator..."); + return Ok(()) + } + result = task => { + let error_message = match result { + Ok(Ok(witness_vector)) => { + tracing::info!("Witness Vector Generator executed job {:?} in: {:?}", job_id, time.elapsed()); + WITNESS_VECTOR_GENERATOR_METRICS.execution_time.observe(time.elapsed()); + self + .save_result(witness_vector, job_id) + .await + .context("failed to save result")?; + return Ok(()) + } + Ok(Err(error)) => error.to_string(), + Err(error) => try_extract_panic_message(error), + }; + tracing::error!("Witness Vector Generator failed on job {job_id:?} with error {error_message:?}"); + + self.save_failure(job_id, error_message).await.context("failed to save failure")?; + } + } + + Ok(()) + } + + /// Sends proof to Circuit Prover. + async fn save_result( + &self, + artifacts: WitnessVectorArtifactsTemp, + job_id: u32, + ) -> anyhow::Result<()> { + let time = Instant::now(); + self.sender + .send(artifacts) + .await + .context("failed to send witness vector to prover")?; + tracing::info!( + "Witness Vector Generator sent job {:?} after {:?}", + job_id, + time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .send_time + .observe(time.elapsed()); + Ok(()) + } + + /// Persists job execution error to database + async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + Ok(()) + } + + /// Backs off, whilst being cancellation aware. + async fn backoff(&self, backoff: &mut Backoff, cancellation_token: CancellationToken) { + let backoff_duration = backoff.delay(); + tracing::info!("Backing off for {:?}...", backoff_duration); + // Error here corresponds to a timeout w/o receiving task cancel; we're OK with this. + tokio::time::timeout(backoff_duration, cancellation_token.cancelled()) + .await + .ok(); + } +} diff --git a/prover/crates/bin/prover_cli/src/cli.rs b/prover/crates/bin/prover_cli/src/cli.rs index 41ef9498005..de5d2d2525b 100644 --- a/prover/crates/bin/prover_cli/src/cli.rs +++ b/prover/crates/bin/prover_cli/src/cli.rs @@ -44,6 +44,8 @@ pub struct ProverCLIConfig { env("PLI__DB_URL") )] pub db_url: SensitiveUrl, + #[clap(default_value = "10")] + pub max_failure_attempts: u32, } #[derive(Subcommand)] diff --git a/prover/crates/bin/prover_cli/src/commands/status/batch.rs b/prover/crates/bin/prover_cli/src/commands/status/batch.rs index 797695b0227..dc63f6bf837 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/batch.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/batch.rs @@ -4,8 +4,6 @@ use anyhow::Context as _; use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; use clap::Args as ClapArgs; use colored::*; -use zksync_config::configs::FriProverConfig; -use zksync_env_config::FromEnv; use zksync_prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; use zksync_types::{ basic_fri_types::AggregationRound, @@ -57,9 +55,9 @@ pub(crate) async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<( } if !args.verbose { - display_batch_status(batch_data); + display_batch_status(batch_data, config.max_failure_attempts); } else { - display_batch_info(batch_data); + display_batch_info(batch_data, config.max_failure_attempts); } } @@ -200,19 +198,19 @@ async fn get_proof_compression_job_info_for_batch<'a>( .await } -fn display_batch_status(batch_data: BatchData) { - display_status_for_stage(batch_data.basic_witness_generator); - display_status_for_stage(batch_data.leaf_witness_generator); - display_status_for_stage(batch_data.node_witness_generator); - display_status_for_stage(batch_data.recursion_tip_witness_generator); - display_status_for_stage(batch_data.scheduler_witness_generator); - display_status_for_stage(batch_data.compressor); +fn display_batch_status(batch_data: BatchData, max_failure_attempts: u32) { + display_status_for_stage(batch_data.basic_witness_generator, max_failure_attempts); + display_status_for_stage(batch_data.leaf_witness_generator, max_failure_attempts); + display_status_for_stage(batch_data.node_witness_generator, max_failure_attempts); + display_status_for_stage( + batch_data.recursion_tip_witness_generator, + max_failure_attempts, + ); + display_status_for_stage(batch_data.scheduler_witness_generator, max_failure_attempts); + display_status_for_stage(batch_data.compressor, max_failure_attempts); } -fn display_status_for_stage(stage_info: StageInfo) { - let max_attempts = FriProverConfig::from_env() - .expect("Fail to read prover config.") - .max_attempts; +fn display_status_for_stage(stage_info: StageInfo, max_attempts: u32) { display_aggregation_round(&stage_info); let status = stage_info.witness_generator_jobs_status(max_attempts); match status { @@ -231,19 +229,19 @@ fn display_status_for_stage(stage_info: StageInfo) { } } -fn display_batch_info(batch_data: BatchData) { - display_info_for_stage(batch_data.basic_witness_generator); - display_info_for_stage(batch_data.leaf_witness_generator); - display_info_for_stage(batch_data.node_witness_generator); - display_info_for_stage(batch_data.recursion_tip_witness_generator); - display_info_for_stage(batch_data.scheduler_witness_generator); - display_info_for_stage(batch_data.compressor); +fn display_batch_info(batch_data: BatchData, max_failure_attempts: u32) { + display_info_for_stage(batch_data.basic_witness_generator, max_failure_attempts); + display_info_for_stage(batch_data.leaf_witness_generator, max_failure_attempts); + display_info_for_stage(batch_data.node_witness_generator, max_failure_attempts); + display_info_for_stage( + batch_data.recursion_tip_witness_generator, + max_failure_attempts, + ); + display_info_for_stage(batch_data.scheduler_witness_generator, max_failure_attempts); + display_info_for_stage(batch_data.compressor, max_failure_attempts); } -fn display_info_for_stage(stage_info: StageInfo) { - let max_attempts = FriProverConfig::from_env() - .expect("Fail to read prover config.") - .max_attempts; +fn display_info_for_stage(stage_info: StageInfo, max_attempts: u32) { display_aggregation_round(&stage_info); let status = stage_info.witness_generator_jobs_status(max_attempts); match status { diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index bbfb1d5a832..5e8740d1b72 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -90,7 +90,7 @@ impl Prover { let started_at = Instant::now(); let artifact: GoldilocksProverSetupData = self .keystore - .load_cpu_setup_data_for_circuit_type(key.clone()) + .load_cpu_setup_data_for_circuit_type(key) .context("get_cpu_setup_data_for_circuit_type()")?; METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] .observe(started_at.elapsed()); @@ -226,7 +226,7 @@ impl JobProcessor for Prover { _started_at: Instant, ) -> JoinHandle> { let config = Arc::clone(&self.config); - let setup_data = self.get_setup_data(job.setup_data_key.clone()); + let setup_data = self.get_setup_data(job.setup_data_key); tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("cpu_prove", %block_number).entered(); @@ -307,7 +307,7 @@ pub fn load_setup_data_cache( for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore - .load_cpu_setup_data_for_circuit_type(key.clone()) + .load_cpu_setup_data_for_circuit_type(key) .context("get_cpu_setup_data_for_circuit_type()")?; cache.insert(key, Arc::new(setup_data)); } diff --git a/prover/crates/bin/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs index 2941c15439a..86e6568f8e4 100644 --- a/prover/crates/bin/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -143,9 +143,19 @@ pub fn verify_proof( pub fn setup_metadata_to_setup_data_key( setup_metadata: &CircuitIdRoundTuple, ) -> ProverServiceDataKey { - ProverServiceDataKey { - circuit_id: setup_metadata.circuit_id, - round: setup_metadata.aggregation_round.into(), + let round = setup_metadata.aggregation_round.into(); + match round { + AggregationRound::NodeAggregation => { + // For node aggregation only one key exist for all circuit types + ProverServiceDataKey { + circuit_id: ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + round, + } + } + _ => ProverServiceDataKey { + circuit_id: setup_metadata.circuit_id, + round, + }, } } @@ -190,7 +200,7 @@ mod tests { round: AggregationRound::BasicCircuits, }; - let result = get_setup_data_key(key.clone()); + let result = get_setup_data_key(key); // Check if the key has remained same assert_eq!(key, result); diff --git a/prover/crates/bin/prover_job_monitor/Cargo.toml b/prover/crates/bin/prover_job_monitor/Cargo.toml index 160d3a603e3..a4bf8765a94 100644 --- a/prover/crates/bin/prover_job_monitor/Cargo.toml +++ b/prover/crates/bin/prover_job_monitor/Cargo.toml @@ -16,6 +16,7 @@ zksync_prover_dal.workspace = true zksync_utils.workspace = true zksync_types.workspace = true zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_db_connection.workspace = true vise.workspace = true @@ -25,3 +26,5 @@ clap = { workspace = true, features = ["derive"] } ctrlc = { workspace = true, features = ["termination"] } tracing.workspace = true async-trait.workspace = true +serde.workspace = true +axum.workspace = true diff --git a/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs new file mode 100644 index 00000000000..aff78409dbb --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs @@ -0,0 +1,176 @@ +use std::collections::HashMap; + +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use zksync_db_connection::error::DalError; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, +}; + +#[derive(Debug, Clone)] +pub struct AutoscalerQueueReporter { + connection_pool: ConnectionPool, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct QueueReport { + pub basic_witness_jobs: JobCountStatistics, + pub leaf_witness_jobs: JobCountStatistics, + pub node_witness_jobs: JobCountStatistics, + pub recursion_tip_witness_jobs: JobCountStatistics, + pub scheduler_witness_jobs: JobCountStatistics, + pub prover_jobs: JobCountStatistics, + pub proof_compressor_jobs: JobCountStatistics, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct VersionedQueueReport { + pub version: ProtocolSemanticVersion, + pub report: QueueReport, +} + +impl AutoscalerQueueReporter { + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { connection_pool } + } + + pub async fn get_report(&self) -> Result>, ProcessorError> { + tracing::debug!("Received request to get queue report"); + + let mut result = HashMap::::new(); + + for round in AggregationRound::ALL_ROUNDS { + self.get_witness_jobs_report(round, &mut result).await?; + } + + self.get_prover_jobs_report(&mut result).await?; + self.get_proof_compressor_jobs_report(&mut result).await?; + + Ok(Json( + result + .into_iter() + .map(|(version, report)| VersionedQueueReport { version, report }) + .collect(), + )) + } + + async fn get_witness_jobs_report( + &self, + aggregation_round: AggregationRound, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_witness_jobs_stats(aggregation_round) + .await; + + for (protocol_version, job_stats) in stats { + let report = state.entry(protocol_version).or_default(); + + match aggregation_round { + AggregationRound::BasicCircuits => report.basic_witness_jobs = job_stats, + AggregationRound::LeafAggregation => report.leaf_witness_jobs = job_stats, + AggregationRound::NodeAggregation => report.node_witness_jobs = job_stats, + AggregationRound::RecursionTip => report.recursion_tip_witness_jobs = job_stats, + AggregationRound::Scheduler => report.scheduler_witness_jobs = job_stats, + } + } + Ok(()) + } + + async fn get_prover_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_generic_prover_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.prover_jobs = stats; + } + Ok(()) + } + + async fn get_proof_compressor_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_proof_compressor_dal() + .get_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.proof_compressor_jobs = stats; + } + + Ok(()) + } +} + +pub fn get_queue_reporter_router(connection_pool: ConnectionPool) -> Router { + let autoscaler_queue_reporter = AutoscalerQueueReporter::new(connection_pool); + + Router::new().route( + "/queue_report", + get(move || async move { autoscaler_queue_reporter.get_report().await }), + ) +} + +pub enum ProcessorError { + Dal(DalError), + Custom(String), +} + +impl From for ProcessorError { + fn from(err: DalError) -> Self { + ProcessorError::Dal(err) + } +} + +impl From for ProcessorError { + fn from(err: anyhow::Error) -> Self { + ProcessorError::Custom(err.to_string()) + } +} + +impl IntoResponse for ProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + ProcessorError::Dal(err) => { + tracing::error!("Sqlx error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed getting data from database", + ) + } + ProcessorError::Custom(err) => { + tracing::error!("Custom error invoked: {:?}", &err); + (StatusCode::INTERNAL_SERVER_ERROR, "Internal error") + } + }; + (status_code, message).into_response() + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/lib.rs b/prover/crates/bin/prover_job_monitor/src/lib.rs index 60d8be297cf..0d6a0ebe104 100644 --- a/prover/crates/bin/prover_job_monitor/src/lib.rs +++ b/prover/crates/bin/prover_job_monitor/src/lib.rs @@ -1,4 +1,5 @@ pub mod archiver; +pub mod autoscaler_queue_reporter; pub mod job_requeuer; pub(crate) mod metrics; pub mod queue_reporter; diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index 734a4bac38a..9195b92882d 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -1,3 +1,5 @@ +use std::{future::IntoFuture, net::SocketAddr}; + use anyhow::Context as _; use clap::Parser; use tokio::{ @@ -12,6 +14,7 @@ use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_gener use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_job_monitor::{ archiver::{GpuProverArchiver, ProverJobsArchiver}, + autoscaler_queue_reporter::get_queue_reporter_router, job_requeuer::{ProofCompressorJobRequeuer, ProverJobRequeuer, WitnessGeneratorJobRequeuer}, queue_reporter::{ ProofCompressorQueueReporter, ProverQueueReporter, WitnessGeneratorQueueReporter, @@ -85,21 +88,42 @@ async fn main() -> anyhow::Result<()> { let mut tasks = vec![tokio::spawn(exporter_config.run(stop_receiver.clone()))]; tasks.extend(get_tasks( - connection_pool, - prover_job_monitor_config, + connection_pool.clone(), + prover_job_monitor_config.clone(), proof_compressor_config, prover_config, witness_generator_config, prover_group_config, - stop_receiver, + stop_receiver.clone(), )?); let mut tasks = ManagedTasks::new(tasks); + let bind_address = SocketAddr::from(([0, 0, 0, 0], prover_job_monitor_config.http_port)); + + tracing::info!("Starting PJM server on {bind_address}"); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding PJM server to {bind_address}"))?; + + let mut receiver = stop_receiver.clone(); + let app = axum::serve(listener, get_queue_reporter_router(connection_pool)) + .with_graceful_shutdown(async move { + if receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for PJM server was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, PJM server is shutting down"); + }) + .into_future(); + tokio::select! { _ = tasks.wait_single() => {}, _ = stop_signal_receiver => { tracing::info!("Stop signal received, shutting down"); } + _ = app => {} } stop_sender.send(true).ok(); tasks.complete(graceful_shutdown_timeout).await; diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs index 5f507a75364..914f2e9ca85 100644 --- a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs @@ -58,7 +58,7 @@ impl Task for WitnessGeneratorQueueReporter { .fri_witness_generator_dal() .get_witness_jobs_stats(round) .await; - for ((round, semantic_protocol_version), job_stats) in stats { + for (semantic_protocol_version, job_stats) in stats { Self::emit_metrics_for_round(round, semantic_protocol_version, &job_stats); } } diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs index f509d3b2f64..7c444da047b 100644 --- a/prover/crates/bin/witness_generator/src/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -6,45 +6,33 @@ use zksync_prover_dal::{ConnectionPool, Prover}; #[derive(Debug)] pub(crate) struct AggregationBlobUrls { - pub aggregations_urls: String, + pub aggregation_urls: String, pub circuit_ids_and_urls: Vec<(u8, String)>, } -#[derive(Debug)] -pub(crate) struct SchedulerBlobUrls { - pub circuit_ids_and_urls: Vec<(u8, String)>, - pub closed_form_inputs_and_urls: Vec<(u8, String, usize)>, - pub scheduler_witness_url: String, -} - -pub(crate) enum BlobUrls { - Url(String), - Aggregation(AggregationBlobUrls), - Scheduler(SchedulerBlobUrls), -} - #[async_trait] pub(crate) trait ArtifactsManager { type InputMetadata; type InputArtifacts; type OutputArtifacts; + type BlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, object_store: &dyn ObjectStore, ) -> anyhow::Result; - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls; + ) -> Self::BlobUrls; - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: Self::BlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()>; } diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs index 3447659f829..aa85d185e66 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, utils::SchedulerPartialInputWrapper, }; @@ -18,6 +18,7 @@ impl ArtifactsManager for BasicWitnessGenerator { type InputMetadata = L1BatchNumber; type InputArtifacts = BasicWitnessGeneratorJob; type OutputArtifacts = BasicCircuitArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -31,38 +32,31 @@ impl ArtifactsManager for BasicWitnessGenerator { }) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); object_store .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) .await .unwrap(); let wrapper = SchedulerPartialInputWrapper(artifacts.scheduler_witness); - let url = object_store + object_store .put(L1BatchNumber(job_id), &wrapper) .await - .unwrap(); - - BlobUrls::Url(url) + .unwrap() } #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, - _artifacts: Self::OutputArtifacts, + blob_urls: String, + artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_urls = match blob_urls { - BlobUrls::Scheduler(blobs) => blobs, - _ => unreachable!(), - }; - let mut connection = connection_pool .connection() .await @@ -79,7 +73,7 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_prover_jobs_dal() .insert_prover_jobs( L1BatchNumber(job_id), - blob_urls.circuit_ids_and_urls, + artifacts.circuit_urls, AggregationRound::BasicCircuits, 0, protocol_version_id, @@ -89,8 +83,8 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_witness_generator_dal() .create_aggregation_jobs( L1BatchNumber(job_id), - &blob_urls.closed_form_inputs_and_urls, - &blob_urls.scheduler_witness_url, + &artifacts.queue_urls, + &blob_urls, get_recursive_layer_circuit_id_for_base_layer, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs index 08732689e3a..50e747b1ce1 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs @@ -4,13 +4,15 @@ use anyhow::Context as _; use tracing::Instrument; use zksync_prover_dal::ProverDal; use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls, SchedulerBlobUrls}, + artifacts::ArtifactsManager, basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, metrics::WITNESS_GENERATOR_METRICS, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -35,19 +37,15 @@ impl JobProcessor for BasicWitnessGenerator { ) .await { - Some(block_number) => { - tracing::info!( - "Processing FRI basic witness-gen for block {}", - block_number - ); - let started_at = Instant::now(); - let job = Self::get_artifacts(&block_number, &*self.object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(Some((block_number, job))) - } + Some(block_number) => Ok(Some(( + block_number, + ::prepare_job( + block_number, + &*self.object_store, + Keystore::locate(), // todo: this should be removed + ) + .await?, + ))), None => Ok(None), } } @@ -73,11 +71,15 @@ impl JobProcessor for BasicWitnessGenerator { let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, ) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await + .map(Some) }) } @@ -92,8 +94,6 @@ impl JobProcessor for BasicWitnessGenerator { None => Ok(()), Some(artifacts) => { let blob_started_at = Instant::now(); - let circuit_urls = artifacts.circuit_urls.clone(); - let queue_urls = artifacts.queue_urls.clone(); let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); @@ -105,26 +105,17 @@ impl JobProcessor for BasicWitnessGenerator { .unwrap(); } - let scheduler_witness_url = - match Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store) - .await - { - BlobUrls::Url(url) => url, - _ => unreachable!(), - }; + let blob_urls = + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] .observe(blob_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, - BlobUrls::Scheduler(SchedulerBlobUrls { - circuit_ids_and_urls: circuit_urls, - closed_form_inputs_and_urls: queue_urls, - scheduler_witness_url, - }), + blob_urls, artifacts, ) .await?; diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs index c9755c333da..e76ef180c52 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs @@ -5,6 +5,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, @@ -35,12 +36,14 @@ use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; +use zksync_prover_keystore::keystore::Keystore; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, storage_oracle::StorageOracle, @@ -49,6 +52,7 @@ use crate::{ ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -108,17 +112,24 @@ impl BasicWitnessGenerator { protocol_version, } } +} + +#[async_trait] +impl WitnessGenerator for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type Metadata = L1BatchNumber; + type Artifacts = BasicCircuitArtifacts; - async fn process_job_impl( + async fn process_job( + job: BasicWitnessGeneratorJob, object_store: Arc, - basic_job: BasicWitnessGeneratorJob, + max_circuits_in_flight: Option, started_at: Instant, - max_circuits_in_flight: usize, - ) -> Option { + ) -> anyhow::Result { let BasicWitnessGeneratorJob { block_number, data: job, - } = basic_job; + } = job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -126,65 +137,43 @@ impl BasicWitnessGenerator { block_number.0 ); - Some( - process_basic_circuits_job( - object_store, - started_at, - block_number, - job, - max_circuits_in_flight, - ) - .await, + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( + block_number, + object_store, + job, + max_circuits_in_flight.unwrap(), ) + .await; + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + tracing::info!( + "Witness generation for block {} is complete in {:?}", + block_number.0, + started_at.elapsed() + ); + + Ok(BasicCircuitArtifacts { + circuit_urls, + queue_urls, + scheduler_witness, + aux_output_witness, + }) } -} -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -pub(super) async fn process_basic_circuits_job( - object_store: Arc, - started_at: Instant, - block_number: L1BatchNumber, - job: WitnessInputData, - max_circuits_in_flight: usize, -) -> BasicCircuitArtifacts { - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = - generate_witness(block_number, object_store, job, max_circuits_in_flight).await; - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - tracing::info!( - "Witness generation for block {} is complete in {:?}", - block_number.0, - started_at.elapsed() - ); + async fn prepare_job( + metadata: L1BatchNumber, + object_store: &dyn ObjectStore, + _keystore: Keystore, + ) -> anyhow::Result { + tracing::info!("Processing FRI basic witness-gen for block {}", metadata.0); + let started_at = Instant::now(); + let job = Self::get_artifacts(&metadata, object_store).await?; - BasicCircuitArtifacts { - circuit_urls, - queue_urls, - scheduler_witness, - aux_output_witness, - } -} + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); -#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] -async fn save_recursion_queue( - block_number: L1BatchNumber, - circuit_id: u8, - recursion_queue_simulator: RecursionQueueSimulator, - closed_form_inputs: Vec>, - object_store: Arc, -) -> (u8, String, usize) { - let key = ClosedFormInputKey { - block_number, - circuit_id, - }; - let basic_circuit_count = closed_form_inputs.len(); - let closed_form_inputs = closed_form_inputs - .iter() - .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) - .collect(); - let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); - let blob_url = object_store.put(key, &wrapper).await.unwrap(); - (circuit_id, blob_url, basic_circuit_count) + Ok(job) + } } #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] @@ -464,3 +453,25 @@ async fn generate_witness( block_aux_witness, ) } + +#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] +async fn save_recursion_queue( + block_number: L1BatchNumber, + circuit_id: u8, + recursion_queue_simulator: RecursionQueueSimulator, + closed_form_inputs: Vec>, + object_store: Arc, +) -> (u8, String, usize) { + let key = ClosedFormInputKey { + block_number, + circuit_id, + }; + let basic_circuit_count = closed_form_inputs.len(); + let closed_form_inputs = closed_form_inputs + .iter() + .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) + .collect(); + let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); + let blob_url = object_store.put(key, &wrapper).await.unwrap(); + (circuit_id, blob_url, basic_circuit_count) +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs index a94587d00ec..c83997e36b8 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs @@ -3,15 +3,15 @@ use std::time::Instant; use async_trait::async_trait; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_fri_types::keys::{AggregationsKey, ClosedFormInputKey}; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, metrics::WITNESS_GENERATOR_METRICS, - utils::{save_node_aggregations_artifacts, ClosedFormInputWrapper}, + utils::{AggregationWrapper, ClosedFormInputWrapper}, }; #[async_trait] @@ -19,6 +19,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { type InputMetadata = LeafAggregationJobMetadata; type InputArtifacts = ClosedFormInputWrapper; type OutputArtifacts = LeafAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -41,38 +42,40 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), - 0, - artifacts.aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + depth: 0, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.aggregations)) + .await + .unwrap(); + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = %job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { tracing::info!( @@ -82,11 +85,6 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { artifacts.circuit_id, ); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blob_urls) => blob_urls, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); @@ -124,7 +122,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), number_of_dependent_jobs, 0, - blob_urls.aggregations_urls, + blob_urls.aggregation_urls, ) .await; tracing::info!( diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs index e032084151e..440636b85fa 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs @@ -10,10 +10,11 @@ use zksync_types::basic_fri_types::AggregationRound; use crate::{ artifacts::ArtifactsManager, leaf_aggregation::{ - prepare_leaf_aggregation_job, LeafAggregationArtifacts, LeafAggregationWitnessGenerator, + LeafAggregationArtifacts, LeafAggregationWitnessGenerator, LeafAggregationWitnessGeneratorJob, }, metrics::WITNESS_GENERATOR_METRICS, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -37,9 +38,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_leaf_aggregation_job()")?, + ::prepare_job( + metadata, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -63,7 +68,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let object_store = self.object_store.clone(); let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, + ) + .await }) } @@ -83,7 +94,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let blob_save_started_at = Instant::now(); - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(blob_save_started_at.elapsed()); @@ -93,7 +104,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { block_number.0, circuit_id, ); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id, started_at, diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs index d669a4cc97e..960843259c3 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; use tokio::sync::Semaphore; use zkevm_test_harness::{ @@ -36,6 +37,7 @@ use crate::{ load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, }, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -85,69 +87,6 @@ impl LeafAggregationWitnessGenerator { keystore, } } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %leaf_job.block_number, circuit_id = %leaf_job.circuit_id) - )] - pub async fn process_job_impl( - leaf_job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - object_store: Arc, - max_circuits_in_flight: usize, - ) -> LeafAggregationArtifacts { - tracing::info!( - "Starting witness generation of type {:?} for block {} with circuit {}", - AggregationRound::LeafAggregation, - leaf_job.block_number.0, - leaf_job.circuit_id, - ); - process_leaf_aggregation_job(started_at, leaf_job, object_store, max_circuits_in_flight) - .await - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -pub async fn prepare_leaf_aggregation_job( - metadata: LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let closed_form_input = - LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let base_vk = keystore - .load_base_layer_verification_key(metadata.circuit_id) - .context("get_base_layer_vk_for_circuit_type()")?; - - let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( - BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), - ) as u8; - - let leaf_vk = keystore - .load_recursive_layer_verification_key(leaf_circuit_id) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - Ok(LeafAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - closed_form_inputs: closed_form_input, - proofs_ids: metadata.prover_job_ids_for_proofs, - base_vk, - leaf_params, - }) } #[tracing::instrument( @@ -261,3 +200,78 @@ pub async fn process_leaf_aggregation_job( closed_form_inputs: job.closed_form_inputs.0, } } + +#[async_trait] +impl WitnessGenerator for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type Metadata = LeafAggregationJobMetadata; + type Artifacts = LeafAggregationArtifacts; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + )] + async fn process_job( + job: LeafAggregationWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting witness generation of type {:?} for block {} with circuit {}", + AggregationRound::LeafAggregation, + job.block_number.0, + job.circuit_id, + ); + Ok(process_leaf_aggregation_job( + started_at, + job, + object_store, + max_circuits_in_flight.unwrap(), + ) + .await) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + )] + async fn prepare_job( + metadata: LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let closed_form_input = + LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let base_vk = keystore + .load_base_layer_verification_key(metadata.circuit_id) + .context("get_base_layer_vk_for_circuit_type()")?; + + let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), + ) as u8; + + let leaf_vk = keystore + .load_recursive_layer_verification_key(leaf_circuit_id) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + Ok(LeafAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + closed_form_inputs: closed_form_input, + proofs_ids: metadata.prover_job_ids_for_proofs, + base_vk, + leaf_params, + }) + } +} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index c0ac9718c6e..b24b548a49b 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -14,3 +14,4 @@ mod storage_oracle; mod tests; pub mod utils; mod witness; +pub mod witness_generator; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs index 245027f0d67..09f01899bf3 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs @@ -7,10 +7,10 @@ use zksync_prover_fri_types::keys::AggregationsKey; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, metrics::WITNESS_GENERATOR_METRICS, node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, - utils::{save_node_aggregations_artifacts, AggregationWrapper}, + utils::AggregationWrapper, }; #[async_trait] @@ -18,6 +18,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { type InputMetadata = NodeAggregationJobMetadata; type InputArtifacts = AggregationWrapper; type OutputArtifacts = NodeAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; #[tracing::instrument( skip_all, @@ -46,46 +47,43 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - artifacts.circuit_id, - artifacts.depth, - artifacts.next_aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: artifacts.circuit_id, + depth: artifacts.depth, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.next_aggregations)) + .await + .unwrap(); WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = % job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { let mut prover_connection = connection_pool.connection().await.unwrap(); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blobs) => blobs, - _ => unreachable!(), - }; let mut transaction = prover_connection.start_transaction().await.unwrap(); let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); let protocol_version_id = transaction @@ -111,7 +109,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { artifacts.circuit_id, Some(dependent_jobs as i32), artifacts.depth, - &blob_urls.aggregations_urls, + &blob_urls.aggregation_urls, protocol_version_id, ) .await; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs index a015462cd6f..0f66c988c10 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs @@ -11,9 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, node_aggregation::{ - prepare_job, NodeAggregationArtifacts, NodeAggregationWitnessGenerator, + NodeAggregationArtifacts, NodeAggregationWitnessGenerator, NodeAggregationWitnessGeneratorJob, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -37,9 +38,13 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_job()")?, + ::prepare_job( + metadata, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, ))) } @@ -63,7 +68,13 @@ impl JobProcessor for NodeAggregationWitnessGenerator { let object_store = self.object_store.clone(); let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, + ) + .await }) } @@ -79,12 +90,12 @@ impl JobProcessor for NodeAggregationWitnessGenerator { ) -> anyhow::Result<()> { let blob_save_started_at = Instant::now(); - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id, started_at, diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs index 047caa363a8..f2c9a6fb891 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ @@ -30,6 +31,7 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -81,17 +83,24 @@ impl NodeAggregationWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type Metadata = NodeAggregationJobMetadata; + type Artifacts = NodeAggregationArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = % job.block_number, circuit_id = % job.circuit_id) )] - pub async fn process_job_impl( + async fn process_job( job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, object_store: Arc, - max_circuits_in_flight: usize, - ) -> NodeAggregationArtifacts { + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result { let node_vk_commitment = compute_node_vk_commitment(job.node_vk.clone()); tracing::info!( "Starting witness generation of type {:?} for block {} circuit id {} depth {}", @@ -117,7 +126,7 @@ impl NodeAggregationWitnessGenerator { proofs_ids.len() ); - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight.unwrap())); let mut handles = vec![]; for (circuit_idx, (chunk, proofs_ids_for_chunk)) in job @@ -205,52 +214,54 @@ impl NodeAggregationWitnessGenerator { started_at.elapsed(), ); - NodeAggregationArtifacts { + Ok(NodeAggregationArtifacts { circuit_id: job.circuit_id, block_number: job.block_number, depth: job.depth + 1, next_aggregations, recursive_circuit_ids_and_urls, - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) -)] -pub async fn prepare_job( - metadata: NodeAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let artifacts = NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + #[tracing::instrument( + skip_all, + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) + )] + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let artifacts = + NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - let started_at = Instant::now(); - let leaf_vk = keystore - .load_recursive_layer_verification_key(metadata.circuit_id) - .context("get_recursive_layer_vk_for_circuit_type")?; - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + let started_at = Instant::now(); + let leaf_vk = keystore + .load_recursive_layer_verification_key(metadata.circuit_id) + .context("get_recursive_layer_vk_for_circuit_type")?; + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - Ok(NodeAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - depth: metadata.depth, - aggregations: artifacts.0, - proofs_ids: metadata.prover_job_ids_for_proofs, - leaf_vk, - node_vk, - all_leafs_layer_params: get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?, - }) + Ok(NodeAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + depth: metadata.depth, + aggregations: artifacts.0, + proofs_ids: metadata.prover_job_ids_for_proofs, + leaf_vk, + node_vk, + all_leafs_layer_params: get_leaf_vk_params(&keystore) + .context("get_leaf_vk_params()")?, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs index 8379fcf9f93..b61aa948100 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs @@ -12,7 +12,7 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, }; @@ -21,6 +21,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { type InputMetadata = Vec<(u8, u32)>; type InputArtifacts = Vec; type OutputArtifacts = RecursionTipArtifacts; + type BlobUrls = String; /// Loads all proofs for a given recursion tip's job ids. /// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). @@ -73,11 +74,11 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { Ok(proofs) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 255, @@ -86,29 +87,22 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { aggregation_round: AggregationRound::RecursionTip, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -123,7 +117,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { 0, 0, AggregationRound::RecursionTip, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs index f114724cfec..9ab7d934a3e 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs @@ -11,9 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, recursion_tip::{ - prepare_job, RecursionTipArtifacts, RecursionTipWitnessGenerator, + RecursionTipArtifacts, RecursionTipJobMetadata, RecursionTipWitnessGenerator, RecursionTipWitnessGeneratorJob, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -49,9 +50,11 @@ impl JobProcessor for RecursionTipWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job( - l1_batch_number, - final_node_proof_job_ids, + ::prepare_job( + RecursionTipJobMetadata { + l1_batch_number, + final_node_proof_job_ids, + }, &*self.object_store, self.keystore.clone(), ) @@ -77,7 +80,10 @@ impl JobProcessor for RecursionTipWitnessGenerator { job: RecursionTipWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) + let object_store = self.object_store.clone(); + tokio::spawn(async move { + ::process_job(job, object_store, None, started_at).await + }) } #[tracing::instrument( @@ -93,12 +99,12 @@ impl JobProcessor for RecursionTipWitnessGenerator { let blob_save_started_at = Instant::now(); let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs index 4abb56a7d78..40abb756c8a 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context; +use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::recursion_layer::{ recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, @@ -45,6 +46,7 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -66,6 +68,11 @@ pub struct RecursionTipArtifacts { pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, } +pub struct RecursionTipJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub final_node_proof_job_ids: Vec<(u8, u32)>, +} + #[derive(Debug)] pub struct RecursionTipWitnessGenerator { config: FriWitnessGeneratorConfig, @@ -91,15 +98,24 @@ impl RecursionTipWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for RecursionTipWitnessGenerator { + type Job = RecursionTipWitnessGeneratorJob; + type Metadata = RecursionTipJobMetadata; + type Artifacts = RecursionTipArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = %job.block_number) )] - pub fn process_job_sync( - job: RecursionTipWitnessGeneratorJob, + async fn process_job( + job: Self::Job, + _object_store: Arc, + _max_circuits_in_flight: Option, started_at: Instant, - ) -> RecursionTipArtifacts { + ) -> anyhow::Result { tracing::info!( "Starting fri witness generation of type {:?} for block {}", AggregationRound::RecursionTip, @@ -127,100 +143,102 @@ impl RecursionTipWitnessGenerator { started_at.elapsed() ); - RecursionTipArtifacts { + Ok(RecursionTipArtifacts { recursion_tip_circuit: ZkSyncRecursiveLayerCircuit::RecursionTipCircuit( recursive_tip_circuit, ), - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - final_node_proof_job_ids: Vec<(u8, u32)>, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let recursion_tip_proofs = - RecursionTipWitnessGenerator::get_artifacts(&final_node_proof_job_ids, object_store) - .await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: RecursionTipJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let recursion_tip_proofs = RecursionTipWitnessGenerator::get_artifacts( + &metadata.final_node_proof_job_ids, + object_store, ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + .await?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); + + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + + let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + + let mut recursion_queues = vec![]; + for circuit_id in BaseLayerCircuitType::as_iter_u8() { + let key = ClosedFormInputKey { + block_number: metadata.l1_batch_number, + circuit_id, + }; + let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; + recursion_queues.push((circuit_id, recursion_queue)); + } + + // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. + // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. + assert!( + RECURSION_TIP_ARITY >= recursion_queues.len(), + "recursion tip received more circuits ({}) than supported ({})", + recursion_queues.len(), + RECURSION_TIP_ARITY + ); + let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; + let mut queue_set: [_; RECURSION_TIP_ARITY] = + std::array::from_fn(|_| QueueState::placeholder_witness()); + + for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { + branch_circuit_type_set[index] = + GoldilocksField::from_u64_unchecked(*circuit_id as u64); + queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); + } - let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + assert_eq!( + leaf_vk_commits.len(), + 16, + "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", + leaf_vk_commits.len() + ); + let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = + leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + let input = RecursionTipInputWitness { + leaf_layer_parameters, + node_layer_vk_commitment, + branch_circuit_type_set, + queue_set, + }; - let mut recursion_queues = vec![]; - for circuit_id in BaseLayerCircuitType::as_iter_u8() { - let key = ClosedFormInputKey { - block_number: l1_batch_number, - circuit_id, + let recursion_tip_witness = RecursionTipInstanceWitness { + input, + vk_witness: node_vk.clone().into_inner(), + proof_witnesses: recursion_tip_proofs.into(), }; - let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; - recursion_queues.push((circuit_id, recursion_queue)); - } - // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. - // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. - assert!( - RECURSION_TIP_ARITY >= recursion_queues.len(), - "recursion tip received more circuits ({}) than supported ({})", - recursion_queues.len(), - RECURSION_TIP_ARITY - ); - let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; - let mut queue_set: [_; RECURSION_TIP_ARITY] = - std::array::from_fn(|_| QueueState::placeholder_witness()); - - for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { - branch_circuit_type_set[index] = GoldilocksField::from_u64_unchecked(*circuit_id as u64); - queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); - } + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - assert_eq!( - leaf_vk_commits.len(), - 16, - "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", - leaf_vk_commits.len() - ); - let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = - leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - let input = RecursionTipInputWitness { - leaf_layer_parameters, - node_layer_vk_commitment, - branch_circuit_type_set, - queue_set, - }; - - let recursion_tip_witness = RecursionTipInstanceWitness { - input, - vk_witness: node_vk.clone().into_inner(), - proof_witnesses: recursion_tip_proofs.into(), - }; - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - Ok(RecursionTipWitnessGeneratorJob { - block_number: l1_batch_number, - recursion_tip_witness, - node_vk, - }) + Ok(RecursionTipWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + recursion_tip_witness, + node_vk, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs index b20a9764188..77d1da685d0 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, }; @@ -17,6 +17,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { type InputMetadata = u32; type InputArtifacts = FriProofWrapper; type OutputArtifacts = SchedulerArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -27,11 +28,11 @@ impl ArtifactsManager for SchedulerWitnessGenerator { Ok(artifacts) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 1, @@ -40,29 +41,22 @@ impl ArtifactsManager for SchedulerWitnessGenerator { aggregation_round: AggregationRound::Scheduler, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -77,7 +71,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { 0, 0, AggregationRound::Scheduler, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs index fe4f2db4090..b5745f98091 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs @@ -11,8 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, scheduler::{ - prepare_job, SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + SchedulerWitnessJobMetadata, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -44,9 +46,11 @@ impl JobProcessor for SchedulerWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job( - l1_batch_number, - recursion_tip_job_id, + ::prepare_job( + SchedulerWitnessJobMetadata { + l1_batch_number, + recursion_tip_job_id, + }, &*self.object_store, self.keystore.clone(), ) @@ -72,10 +76,9 @@ impl JobProcessor for SchedulerWitnessGenerator { job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - let block_number = job.block_number; - let _span = tracing::info_span!("scheduler", %block_number).entered(); - Ok(Self::process_job_sync(job, started_at)) + let object_store = self.object_store.clone(); + tokio::spawn(async move { + ::process_job(job, object_store, None, started_at).await }) } @@ -92,12 +95,12 @@ impl JobProcessor for SchedulerWitnessGenerator { let blob_save_started_at = Instant::now(); let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, diff --git a/prover/crates/bin/witness_generator/src/scheduler/mod.rs b/prover/crates/bin/witness_generator/src/scheduler/mod.rs index 10230b35c4f..7af3d68d5a7 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/mod.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/mod.rs @@ -1,6 +1,7 @@ use std::{convert::TryInto, sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; @@ -29,7 +30,7 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::SchedulerPartialInputWrapper, + utils::SchedulerPartialInputWrapper, witness_generator::WitnessGenerator, }; mod artifacts; @@ -54,6 +55,11 @@ pub struct SchedulerWitnessGeneratorJob { [RecursionLeafParametersWitness; NUM_BASE_LAYER_CIRCUITS], } +pub struct SchedulerWitnessJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub recursion_tip_job_id: u32, +} + #[derive(Debug)] pub struct SchedulerWitnessGenerator { config: FriWitnessGeneratorConfig, @@ -79,15 +85,24 @@ impl SchedulerWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type Metadata = SchedulerWitnessJobMetadata; + type Artifacts = SchedulerArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = %job.block_number) )] - pub fn process_job_sync( + async fn process_job( job: SchedulerWitnessGeneratorJob, + _object_store: Arc, + _max_circuits_in_flight: Option, started_at: Instant, - ) -> SchedulerArtifacts { + ) -> anyhow::Result { tracing::info!( "Starting fri witness generation of type {:?} for block {}", AggregationRound::Scheduler, @@ -118,66 +133,67 @@ impl SchedulerWitnessGenerator { started_at.elapsed() ); - SchedulerArtifacts { + Ok(SchedulerArtifacts { scheduler_circuit: ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit), - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - recursion_tip_job_id: u32, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let wrapper = - SchedulerWitnessGenerator::get_artifacts(&recursion_tip_job_id, object_store).await?; - let recursion_tip_proof = match wrapper { - FriProofWrapper::Base(_) => Err(anyhow::anyhow!( - "Expected only recursive proofs for scheduler l1 batch {l1_batch_number}, got Base" - )), - FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), - }?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let SchedulerPartialInputWrapper(mut scheduler_witness) = - object_store.get(l1_batch_number).await?; - - let recursion_tip_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, - ) - .context("get_recursion_tip_vk()")?; - scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); - - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - let leaf_layer_parameters = leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - Ok(SchedulerWitnessGeneratorJob { - block_number: l1_batch_number, - scheduler_witness, - node_vk, - leaf_layer_parameters, - recursion_tip_vk, - }) + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: SchedulerWitnessJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let wrapper = + SchedulerWitnessGenerator::get_artifacts(&metadata.recursion_tip_job_id, object_store) + .await?; + let recursion_tip_proof = match wrapper { + FriProofWrapper::Base(_) => Err(anyhow::anyhow!( + "Expected only recursive proofs for scheduler l1 batch {}, got Base", + metadata.l1_batch_number + )), + FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), + }?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let SchedulerPartialInputWrapper(mut scheduler_witness) = + object_store.get(metadata.l1_batch_number).await?; + + let recursion_tip_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, + ) + .context("get_recursion_tip_vk()")?; + scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); + + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + let leaf_layer_parameters = leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + Ok(SchedulerWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + scheduler_witness, + node_vk, + leaf_layer_parameters, + recursion_tip_vk, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 3ea2b539773..8524bdae9ff 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -204,28 +204,6 @@ pub async fn save_recursive_layer_prover_input_artifacts( ids_and_urls } -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -pub async fn save_node_aggregations_artifacts( - block_number: L1BatchNumber, - circuit_id: u8, - depth: u16, - aggregations: Vec<(u64, RecursionQueueSimulator)>, - object_store: &dyn ObjectStore, -) -> String { - let key = AggregationsKey { - block_number, - circuit_id, - depth, - }; - object_store - .put(key, &AggregationWrapper(aggregations)) - .await - .unwrap() -} - #[tracing::instrument(skip_all)] pub async fn load_proofs_for_job_ids( job_ids: &[u32], diff --git a/prover/crates/bin/witness_generator/src/witness_generator.rs b/prover/crates/bin/witness_generator/src/witness_generator.rs new file mode 100644 index 00000000000..eb9200d7950 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/witness_generator.rs @@ -0,0 +1,25 @@ +use std::{sync::Arc, time::Instant}; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_keystore::keystore::Keystore; + +#[async_trait] +pub trait WitnessGenerator { + type Job: Send + 'static; + type Metadata; + type Artifacts; + + async fn process_job( + job: Self::Job, + object_store: Arc, + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result; + + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result; +} diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index 3323e3c681e..379ddc3a4eb 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -15,9 +15,9 @@ use zksync_types::{ L1BatchNumber, }; use zksync_witness_generator::{ - leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, - node_aggregation::{self, NodeAggregationWitnessGenerator}, - utils::AggregationWrapper, + leaf_aggregation::LeafAggregationWitnessGenerator, + node_aggregation::NodeAggregationWitnessGenerator, utils::AggregationWrapper, + witness_generator::WitnessGenerator, }; fn compare_serialized(expected: &T, actual: &T) { @@ -52,17 +52,22 @@ async fn test_leaf_witness_gen() { .unwrap(); let keystore = Keystore::locate(); - let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); + let job = LeafAggregationWitnessGenerator::prepare_job( + leaf_aggregation_job_metadata, + &*object_store, + keystore, + ) + .await + .unwrap(); - let artifacts = LeafAggregationWitnessGenerator::process_job_impl( + let artifacts = LeafAggregationWitnessGenerator::process_job( job, - Instant::now(), object_store.clone(), - 500, + Some(500), + Instant::now(), ) - .await; + .await + .unwrap(); let aggregations = AggregationWrapper(artifacts.aggregations); @@ -142,18 +147,23 @@ async fn test_node_witness_gen() { }; let keystore = Keystore::locate(); - let job = - node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); + let job = NodeAggregationWitnessGenerator::prepare_job( + node_aggregation_job_metadata, + &*object_store, + keystore, + ) + .await + .unwrap(); - let artifacts = NodeAggregationWitnessGenerator::process_job_impl( + let artifacts = NodeAggregationWitnessGenerator::process_job( job, - Instant::now(), object_store.clone(), - 500, + Some(500), + Instant::now(), ) - .await; + .await + .unwrap(); + let aggregations = AggregationWrapper(artifacts.next_aggregations); let expected_results_object_store_config = ObjectStoreConfig { diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index 6695905c07e..646dd8ffda7 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -70,7 +70,7 @@ impl WitnessVectorGenerator { keystore: &Keystore, ) -> anyhow::Result { let finalization_hints = keystore - .load_finalization_hints(job.setup_data_key.clone()) + .load_finalization_hints(job.setup_data_key) .context("get_finalization_hints()")?; let cs = match job.circuit_wrapper.clone() { CircuitWrapper::Base(base_circuit) => { diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml index 617030754f8..4d9addc26bc 100644 --- a/prover/crates/lib/keystore/Cargo.toml +++ b/prover/crates/lib/keystore/Cargo.toml @@ -27,6 +27,8 @@ once_cell.workspace = true md5.workspace = true sha3.workspace = true hex.workspace = true +tokio.workspace = true +futures = { workspace = true, features = ["compat"] } [features] default = [] diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 28ce989287c..6225943e3cd 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -1,7 +1,9 @@ use std::{ + collections::HashMap, fs::{self, File}, io::Read, path::{Path, PathBuf}, + sync::Arc, }; use anyhow::Context as _; @@ -14,7 +16,7 @@ use circuit_definitions::{ }, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_prover_fri_types::ProverServiceDataKey; @@ -24,6 +26,7 @@ use zksync_utils::env::Workspace; use crate::GoldilocksGpuProverSetupData; use crate::{GoldilocksProverSetupData, VkCommitments}; +#[derive(Debug, Clone, Copy)] pub enum ProverServiceDataType { VerificationKey, SetupData, @@ -209,7 +212,7 @@ impl Keystore { key: ProverServiceDataKey, hint: &FinalizationHintsForProver, ) -> anyhow::Result<()> { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::FinalizationHints); + let filepath = self.get_file_path(key, ProverServiceDataType::FinalizationHints); tracing::info!("saving finalization hints for {:?} to: {:?}", key, filepath); let serialized = @@ -267,7 +270,7 @@ impl Keystore { &self, key: ProverServiceDataKey, ) -> anyhow::Result { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); let mut file = File::open(filepath.clone()) .with_context(|| format!("Failed reading setup-data from path: {filepath:?}"))?; @@ -286,7 +289,7 @@ impl Keystore { &self, key: ProverServiceDataKey, ) -> anyhow::Result { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); let mut file = File::open(filepath.clone()) .with_context(|| format!("Failed reading setup-data from path: {filepath:?}"))?; @@ -301,7 +304,7 @@ impl Keystore { } pub fn is_setup_data_present(&self, key: &ProverServiceDataKey) -> bool { - Path::new(&self.get_file_path(key.clone(), ProverServiceDataType::SetupData)).exists() + Path::new(&self.get_file_path(*key, ProverServiceDataType::SetupData)).exists() } pub fn save_setup_data_for_circuit_type( @@ -309,7 +312,7 @@ impl Keystore { key: ProverServiceDataKey, serialized_setup_data: &Vec, ) -> anyhow::Result<()> { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); tracing::info!("saving {:?} setup data to: {:?}", key, filepath); std::fs::write(filepath.clone(), serialized_setup_data) .with_context(|| format!("Failed saving setup-data at path: {filepath:?}")) @@ -465,4 +468,49 @@ impl Keystore { pub fn save_commitments(&self, commitments: &VkCommitments) -> anyhow::Result<()> { Self::save_json_pretty(self.get_base_path().join("commitments.json"), &commitments) } + + /// Async loads mapping of all circuits to setup key, if successful + pub async fn load_all_setup_key_mapping( + &self, + ) -> anyhow::Result>> { + self.load_key_mapping(ProverServiceDataType::SetupData) + .await + } + + /// Async loads mapping of all circuits to finalization hints, if successful + pub async fn load_all_finalization_hints_mapping( + &self, + ) -> anyhow::Result>> { + self.load_key_mapping(ProverServiceDataType::FinalizationHints) + .await + } + + /// Async function that loads mapping from disk. + /// Whilst IO is not parallelizable, ser/de is. + async fn load_key_mapping( + &self, + data_type: ProverServiceDataType, + ) -> anyhow::Result>> { + let mut mapping: HashMap> = HashMap::new(); + + // Load each file in parallel. Note that FS access is not necessarily parallel, but + // deserialization is. For larger files, it makes a big difference. + // Note: `collect` is important, because iterators are lazy, and otherwise we won't actually + // spawn threads. + let handles: Vec<_> = ProverServiceDataKey::all() + .into_iter() + .map(|key| { + let filepath = self.get_file_path(key, data_type); + tokio::task::spawn_blocking(move || { + let data = Self::load_bincode_from_file(filepath)?; + anyhow::Ok((key, Arc::new(data))) + }) + }) + .collect(); + for handle in futures::future::join_all(handles).await { + let (key, setup_data) = handle.context("future loading key panicked")??; + mapping.insert(key, setup_data); + } + Ok(mapping) + } } diff --git a/prover/crates/lib/keystore/src/setup_data_generator.rs b/prover/crates/lib/keystore/src/setup_data_generator.rs index e69184ee936..c4790d67fea 100644 --- a/prover/crates/lib/keystore/src/setup_data_generator.rs +++ b/prover/crates/lib/keystore/src/setup_data_generator.rs @@ -33,7 +33,7 @@ pub fn generate_setup_data_common( let (finalization, vk) = if circuit.is_base_layer() { ( - Some(keystore.load_finalization_hints(circuit.clone())?), + Some(keystore.load_finalization_hints(circuit)?), data_source .get_base_layer_vk(circuit.circuit_id) .unwrap() @@ -41,7 +41,7 @@ pub fn generate_setup_data_common( ) } else { ( - Some(keystore.load_finalization_hints(circuit.clone())?), + Some(keystore.load_finalization_hints(circuit)?), data_source .get_recursion_layer_vk(circuit.circuit_id) .unwrap() @@ -86,7 +86,7 @@ pub trait SetupDataGenerator { ); return Ok("Skipped".to_string()); } - let serialized = self.generate_setup_data(circuit.clone())?; + let serialized = self.generate_setup_data(circuit)?; let digest = md5::compute(&serialized); if !dry_run { @@ -109,7 +109,7 @@ pub trait SetupDataGenerator { .iter() .map(|circuit| { let digest = self - .generate_and_write_setup_data(circuit.clone(), dry_run, recompute_if_missing) + .generate_and_write_setup_data(*circuit, dry_run, recompute_if_missing) .context(circuit.name()) .unwrap(); (circuit.name(), digest) diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json new file mode 100644 index 00000000000..c99572bcc8e --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1\n WHERE\n l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json new file mode 100644 index 00000000000..05163dcfa2e --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "protocol_version_patch!", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "queued", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "in_progress", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false, + null, + null + ] + }, + "hash": "29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json new file mode 100644 index 00000000000..50d121213fb --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND sequence_number = $4\n AND aggregation_round = $5\n AND circuit_id = $6\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json new file mode 100644 index 00000000000..bf8db798e7d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json new file mode 100644 index 00000000000..d7eb6a32b42 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json new file mode 100644 index 00000000000..c97fe7f4042 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json new file mode 100644 index 00000000000..f8b141a8dac --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_proofs', 1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json b/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json new file mode 100644 index 00000000000..140b8f12675 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json @@ -0,0 +1,60 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "aggregation_round", + "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "sequence_number", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "depth", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "is_node_final_proof", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json new file mode 100644 index 00000000000..d23ed8d9fc8 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND circuit_id = $4\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json new file mode 100644 index 00000000000..93532150f7f --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json new file mode 100644 index 00000000000..cadc931fa1c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at)\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json new file mode 100644 index 00000000000..4ee9278fe42 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json new file mode 100644 index 00000000000..f8e92b1ad66 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e" +} diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs index 069fa9c6a41..d0841820337 100644 --- a/prover/crates/lib/prover_dal/src/cli_test_dal.rs +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -20,14 +20,23 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, sequence_number: usize, ) { - sqlx::query(&format!( - "UPDATE prover_jobs_fri SET status = '{}' - WHERE l1_batch_number = {} - AND sequence_number = {} - AND aggregation_round = {} - AND circuit_id = {}", - status, batch_number.0, sequence_number, aggregation_round, circuit_id, - )) + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = $1 + WHERE + l1_batch_number = $2 + AND sequence_number = $3 + AND aggregation_round = $4 + AND circuit_id = $5 + "#, + status.to_string(), + batch_number.0 as i64, + sequence_number as i64, + aggregation_round as i16, + circuit_id as i64, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -39,8 +48,8 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, circuit_id: u8, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO leaf_aggregation_witness_jobs_fri ( l1_batch_number, @@ -51,13 +60,16 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, {}, 'waiting_for_proofs', 2, NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE - SET status = '{}' - ", - batch_number.0, circuit_id, status - )) + SET + status = $3 + "#, + batch_number.0 as i64, + circuit_id as i16, + status.to_string() + ) .execute(self.storage.conn()) .await .unwrap(); @@ -69,48 +81,41 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, circuit_id: u8, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO - node_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - status, - created_at, - updated_at - ) + node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at) VALUES - ({}, {}, 'waiting_for_proofs', NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE - SET status = '{}' - ", - batch_number.0, circuit_id, status, - )) + SET + status = $3 + "#, + batch_number.0 as i64, + circuit_id as i16, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); } pub async fn insert_rt_job(&mut self, status: WitnessJobStatus, batch_number: L1BatchNumber) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO - recursion_tip_witness_jobs_fri ( - l1_batch_number, - status, - number_of_final_node_jobs, - created_at, - updated_at - ) + recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at) VALUES - ({}, 'waiting_for_proofs',1, NOW(), NOW()) + ($1, 'waiting_for_proofs', 1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' - ", - batch_number.0, status, - )) + SET + status = $2 + "#, + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -121,8 +126,8 @@ impl CliTestDal<'_, '_> { status: WitnessJobStatus, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO scheduler_witness_jobs_fri ( l1_batch_number, @@ -132,13 +137,15 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, '', 'waiting_for_proofs', NOW(), NOW()) + ($1, '', 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' - ", - batch_number.0, status, - )) + SET + status = $2 + "#, + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -149,23 +156,20 @@ impl CliTestDal<'_, '_> { status: ProofCompressionJobStatus, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO - proof_compression_jobs_fri ( - l1_batch_number, - status, - created_at, - updated_at - ) + proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at) VALUES - ({}, '{}', NOW(), NOW()) + ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' - ", - batch_number.0, status, status, - )) + SET + status = $2 + "#, + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -180,15 +184,25 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, sequence_number: usize, ) { - sqlx::query(&format!( - "UPDATE prover_jobs_fri - SET status = '{}', attempts = {} - WHERE l1_batch_number = {} - AND sequence_number = {} - AND aggregation_round = {} - AND circuit_id = {}", - status, attempts, batch_number.0, sequence_number, aggregation_round, circuit_id, - )) + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND sequence_number = $4 + AND aggregation_round = $5 + AND circuit_id = $6 + "#, + status.to_string(), + attempts as i64, + batch_number.0 as i64, + sequence_number as i64, + aggregation_round as i64, + circuit_id as i16, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -201,13 +215,21 @@ impl CliTestDal<'_, '_> { circuit_id: u8, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( - "UPDATE leaf_aggregation_witness_jobs_fri - SET status = '{}', attempts = {} - WHERE l1_batch_number = {} - AND circuit_id = {}", - status, attempts, batch_number.0, circuit_id, - )) + sqlx::query!( + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND circuit_id = $4 + "#, + status.to_string(), + attempts as i64, + batch_number.0 as i64, + circuit_id as i16, + ) .execute(self.storage.conn()) .await .unwrap(); diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 4e68154290d..71d0c11728b 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -6,8 +6,10 @@ use zksync_basic_types::{ AggregationRound, CircuitIdRoundTuple, CircuitProverStatsEntry, ProtocolVersionedCircuitProverStats, }, - protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, - prover_dal::{FriProverJobMetadata, ProverJobFriInfo, ProverJobStatus, StuckJobs}, + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + prover_dal::{ + FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, ProverJobStatus, StuckJobs, + }, L1BatchNumber, }; use zksync_db_connection::{ @@ -49,6 +51,78 @@ impl FriProverDal<'_, '_> { drop(latency); } + /// Retrieves the next prover job to be proven. Called by WVGs. + /// + /// Prover jobs must be thought of as ordered. + /// Prover must prioritize proving such jobs that will make the chain move forward the fastest. + /// Current ordering: + /// - pick the lowest batch + /// - within the lowest batch, look at the lowest aggregation level (move up the proof tree) + /// - pick the same type of circuit for as long as possible, this maximizes GPU cache reuse + /// + /// NOTE: Most of this function is a duplicate of `get_next_job()`. Get next job will be deleted together with old prover. + pub async fn get_job( + &mut self, + protocol_version: ProtocolSemanticVersion, + picked_by: &str, + ) -> Option { + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = 'in_progress', + attempts = attempts + 1, + updated_at = NOW(), + processing_started_at = NOW(), + picked_by = $3 + WHERE + id = ( + SELECT + id + FROM + prover_jobs_fri + WHERE + status = 'queued' + AND protocol_version = $1 + AND protocol_version_patch = $2 + ORDER BY + l1_batch_number ASC, + aggregation_round ASC, + circuit_id ASC, + id ASC + LIMIT + 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + "#, + protocol_version.minor as i32, + protocol_version.patch.0 as i32, + picked_by, + ) + .fetch_optional(self.storage.conn()) + .await + .expect("failed to get prover job") + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(i32::from(row.aggregation_round)) + .unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }) + } + pub async fn get_next_job( &mut self, protocol_version: ProtocolSemanticVersion, @@ -445,6 +519,53 @@ impl FriProverDal<'_, '_> { } } + pub async fn get_generic_prover_jobs_stats( + &mut self, + ) -> HashMap { + { + sqlx::query!( + r#" + SELECT + protocol_version AS "protocol_version!", + protocol_version_patch AS "protocol_version_patch!", + COUNT(*) FILTER ( + WHERE + status = 'queued' + ) AS queued, + COUNT(*) FILTER ( + WHERE + status = 'in_progress' + ) AS in_progress + FROM + prover_jobs_fri + WHERE + status IN ('queued', 'in_progress') + AND protocol_version IS NOT NULL + GROUP BY + protocol_version, + protocol_version_patch + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let protocol_semantic_version = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(row.protocol_version as u16).unwrap(), + VersionPatch(row.protocol_version_patch as u32), + ); + let key = protocol_semantic_version; + let value = JobCountStatistics { + queued: row.queued.unwrap() as usize, + in_progress: row.in_progress.unwrap() as usize, + }; + (key, value) + }) + .collect() + } + } + pub async fn min_unproved_l1_batch_number(&mut self) -> HashMap<(u8, u8), L1BatchNumber> { { sqlx::query!( diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 9958527a98b..c7ba0f60ef3 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1378,7 +1378,7 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, - ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { + ) -> HashMap { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" @@ -1407,7 +1407,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(), VersionPatch(row.get::("protocol_version_patch") as u32), ); - let key = (aggregation_round, protocol_semantic_version); + let key = protocol_semantic_version; let value = JobCountStatistics { queued: row.get::("queued") as usize, in_progress: row.get::("in_progress") as usize, @@ -1709,7 +1709,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE witness_inputs_fri SET @@ -1717,9 +1717,12 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} - AND (status = 'in_progress' OR status = 'failed') + l1_batch_number = $1 + AND attempts >= $2 + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1728,22 +1731,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } pub async fn requeue_stuck_leaf_aggregation_jobs_for_batch( @@ -1777,7 +1779,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE recursion_tip_witness_jobs_fri SET @@ -1785,9 +1787,12 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} - AND (status = 'in_progress' OR status = 'failed') + l1_batch_number = $1 + AND attempts >= $2 + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1796,22 +1801,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } pub async fn requeue_stuck_scheduler_jobs_for_batch( @@ -1819,7 +1823,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE scheduler_witness_jobs_fri SET @@ -1827,9 +1831,12 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} - AND (status = 'in_progress' OR status = 'failed') + l1_batch_number = $1 + AND attempts >= $2 + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1838,22 +1845,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } async fn requeue_stuck_jobs_for_batch_in_aggregation_round( diff --git a/prover/crates/lib/prover_fri_types/src/keys.rs b/prover/crates/lib/prover_fri_types/src/keys.rs index 2948fc5f84e..26aa679b4a9 100644 --- a/prover/crates/lib/prover_fri_types/src/keys.rs +++ b/prover/crates/lib/prover_fri_types/src/keys.rs @@ -1,6 +1,8 @@ //! Different key types for object store. -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::AggregationRound, prover_dal::FriProverJobMetadata, L1BatchNumber, +}; /// Storage key for a [AggregationWrapper`]. #[derive(Debug, Clone, Copy)] @@ -27,6 +29,18 @@ pub struct FriCircuitKey { pub depth: u16, } +impl From for FriCircuitKey { + fn from(prover_job_metadata: FriProverJobMetadata) -> Self { + FriCircuitKey { + block_number: prover_job_metadata.block_number, + sequence_number: prover_job_metadata.sequence_number, + circuit_id: prover_job_metadata.circuit_id, + aggregation_round: prover_job_metadata.aggregation_round, + depth: prover_job_metadata.depth, + } + } +} + /// Storage key for a [`ZkSyncCircuit`]. #[derive(Debug, Clone, Copy)] pub struct CircuitKey<'a> { diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index c14bc190563..4a8a1b3e406 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -1,4 +1,4 @@ -use std::env; +use std::{env, time::Instant}; pub use circuit_definitions; use circuit_definitions::{ @@ -66,7 +66,7 @@ impl StoredObject for CircuitWrapper { serialize_using_bincode!(); } -#[derive(Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum FriProofWrapper { Base(ZkSyncBaseLayerProof), Recursive(ZkSyncRecursionLayerProof), @@ -98,6 +98,45 @@ impl WitnessVectorArtifacts { } } +/// This structure exists for the transition period between old prover and new prover. +/// We want the 2 codebases to coexist, without impacting each other. +/// Once old prover is deleted, this struct will be renamed to `WitnessVectorArtifacts`. +pub struct WitnessVectorArtifactsTemp { + pub witness_vector: WitnessVec, + pub prover_job: ProverJob, + pub time: Instant, +} + +impl WitnessVectorArtifactsTemp { + pub fn new( + witness_vector: WitnessVec, + prover_job: ProverJob, + time: Instant, + ) -> Self { + Self { + witness_vector, + prover_job, + time, + } + } +} + +/// Data structure containing the proof generated by the circuit prover. +#[derive(Debug)] +pub struct ProverArtifacts { + pub block_number: L1BatchNumber, + pub proof_wrapper: FriProofWrapper, +} + +impl ProverArtifacts { + pub fn new(block_number: L1BatchNumber, proof_wrapper: FriProofWrapper) -> Self { + Self { + block_number, + proof_wrapper, + } + } +} + #[derive(Clone, serde::Serialize, serde::Deserialize)] pub struct ProverJob { pub block_number: L1BatchNumber, @@ -122,12 +161,30 @@ impl ProverJob { } } -#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] pub struct ProverServiceDataKey { pub circuit_id: u8, pub round: AggregationRound, } +impl ProverServiceDataKey { + /// Returns the crypto version of the setup key. + /// + /// Setup key is overloaded in our system. On one hand, it is used as identifier for figuring out which type of proofs are ready. + /// On the other hand, it is also a setup key from prover perspective. + /// The 2 overlap on all aggregation rounds, but NodeAggregation. + /// There's only 1 node key and that belongs to circuit 2. + pub fn crypto_setup_key(self) -> Self { + if let AggregationRound::NodeAggregation = self.round { + return Self { + circuit_id: 2, + round: self.round, + }; + } + self + } +} + fn get_round_for_recursive_circuit_type(circuit_type: u8) -> AggregationRound { match circuit_type { circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8 => { @@ -186,6 +243,12 @@ impl ProverServiceDataKey { } } + pub fn all() -> Vec { + let mut keys = Self::all_boojum(); + keys.push(Self::snark()); + keys + } + pub fn is_base_layer(&self) -> bool { self.round == AggregationRound::BasicCircuits } diff --git a/yarn.lock b/yarn.lock index 2a136c31a3e..a7a02fa940a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1583,7 +1583,7 @@ chalk "4.1.2" ts-morph "^19.0.0" -"@matterlabs/hardhat-zksync-deploy@^1.3.0": +"@matterlabs/hardhat-zksync-deploy@^1.5.0": version "1.5.0" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.5.0.tgz#40cb454fb187da4bb354f3acb48762a6657fcb36" integrity sha512-7LAgYYwoKWHeR+3CyWEvA3NKBKtt7ktcr7SX6ZPgbEYqHAdXH02vxJZGwNADtMWpyYm8h+fEQkpPIgErD4NhmA== @@ -1658,7 +1658,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4", "@matterlabs/hardhat-zksync-solc@^1.2.0", "@matterlabs/hardhat-zksync-solc@^1.2.1": +"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.2.0", "@matterlabs/hardhat-zksync-solc@^1.2.1": version "1.2.1" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.1.tgz#07235fbe3905a144c58cddd3d8bfe8cc1b1824ac" integrity sha512-009FEm1qSYTooamd+T8iylIhpk6zT80RnHd9fqZoCWFM49xR1foegAv76oOMyFMsHuSHDbwkWyTSNDo7U5vAzQ== @@ -1675,6 +1675,23 @@ sinon-chai "^3.7.0" undici "^6.18.2" +"@matterlabs/hardhat-zksync-solc@^1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.4.tgz#b14a1dbfe751058bf2d79eab747b87c7ca7d2361" + integrity sha512-9Nk95kxOZ9rl26trP/pXDLw5MqFAd0CD8FMTGDvA5HBGk6CL2wg4tS0gmucYz5R4qj09KUYOO4FW4rgd/atcGg== + dependencies: + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" + debug "^4.3.5" + dockerode "^4.0.2" + fs-extra "^11.2.0" + proper-lockfile "^4.1.2" + semver "^7.6.2" + sinon "^18.0.0" + sinon-chai "^3.7.0" + undici "^6.18.2" + "@matterlabs/hardhat-zksync-verify@^0.4.0": version "0.4.0" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.4.0.tgz#f812c19950022fc36728f3796f6bdae5633e2fcd" @@ -1705,20 +1722,20 @@ sinon "^18.0.0" sinon-chai "^3.7.0" -"@matterlabs/hardhat-zksync-vyper@^1.0.8": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" - integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== +"@matterlabs/hardhat-zksync-vyper@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.1.0.tgz#b3fb304429e88a84b4abc3fe4e5a83b2f5e907bd" + integrity sha512-zDjHPeIuHRpumXiWZUbhoji4UJe09jTDRn4xnxsuVkLH7qLAm0VDFzCXYNMvEuySZSdhbSbekxJsH9Kunc5ycA== dependencies: - "@nomiclabs/hardhat-docker" "^2.0.0" - chai "^4.3.6" - chalk "4.1.2" + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" dockerode "^4.0.2" - fs-extra "^11.1.1" - semver "^7.5.4" - sinon "^17.0.1" + fs-extra "^11.2.0" + semver "^7.6.2" + sinon "^18.0.0" sinon-chai "^3.7.0" - undici "^5.14.0" + undici "^6.18.2" "@matterlabs/prettier-config@^1.0.3": version "1.0.3" @@ -2066,11 +2083,13 @@ integrity sha512-+Wz0hwmJGSI17B+BhU/qFRZ1l6/xMW82QGXE/Gi+WTmwgJrQefuBs1lIf7hzQ1hLk6hpkvb/zwcNkpVKRYTQYg== "@openzeppelin/contracts-upgradeable-v4@npm:@openzeppelin/contracts-upgradeable@4.9.5", "@openzeppelin/contracts-upgradeable@4.9.5": + name "@openzeppelin/contracts-upgradeable-v4" version "4.9.5" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts-upgradeable/-/contracts-upgradeable-4.9.5.tgz#572b5da102fc9be1d73f34968e0ca56765969812" integrity sha512-f7L1//4sLlflAN7fVzJLoRedrf5Na3Oal5PZfIq55NFcVZ90EpV1q5xOvL4lFvg3MNICSDr2hH0JUBxwlxcoPg== "@openzeppelin/contracts-v4@npm:@openzeppelin/contracts@4.9.5", "@openzeppelin/contracts@4.9.5": + name "@openzeppelin/contracts-v4" version "4.9.5" resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.5.tgz#1eed23d4844c861a1835b5d33507c1017fa98de8" integrity sha512-ZK+W5mVhRppff9BE6YdR8CC52C8zAvsVAiWhEtQ5+oNxFE6h1WdeWo+FJSF8KKvtxxVYZ7MTP/5KoVpAU3aSWg== @@ -6879,7 +6898,7 @@ jest-each@^29.7.0: jest-util "^29.7.0" pretty-format "^29.7.0" -jest-environment-node@^29.7.0: +jest-environment-node@^29.0.3, jest-environment-node@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw== @@ -7958,7 +7977,14 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.3, minimatch@^9.0.4: +minimatch@^9.0.3: + version "9.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" + integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^9.0.4: version "9.0.5" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== @@ -7977,7 +8003,12 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2: +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": + version "7.1.1" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" + integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== + +minipass@^7.1.2: version "7.1.2" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== @@ -10097,6 +10128,7 @@ synckit@^0.9.1: fast-glob "^3.3.2" hardhat "=2.22.2" preprocess "^3.2.0" + zksync-ethers "^5.9.0" table-layout@^1.0.2: version "1.0.2" diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index e45e1b853e4..1bacb22cc95 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -684,6 +684,7 @@ dependencies = [ "console", "ethers", "futures", + "git_version_macro", "once_cell", "serde", "serde_json", @@ -724,6 +725,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", + "serde_yaml", "strum", "thiserror", "types", @@ -733,6 +735,7 @@ dependencies = [ "zksync_config", "zksync_protobuf", "zksync_protobuf_config", + "zksync_system_constants", ] [[package]] @@ -1962,6 +1965,13 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +[[package]] +name = "git_version_macro" +version = "0.1.0" +dependencies = [ + "chrono", +] + [[package]] name = "glob" version = "0.3.1" @@ -4598,6 +4608,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ + "hex", "serde", "serde_with_macros", ] @@ -6427,6 +6438,7 @@ dependencies = [ "zksync_config", "zksync_consensus_crypto", "zksync_consensus_roles", + "zksync_system_constants", ] [[package]] @@ -6434,6 +6446,7 @@ name = "zk_supervisor" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "clap", "clap-markdown", "common", @@ -6449,6 +6462,7 @@ dependencies = [ "types", "url", "xshell", + "zksync_basic_types", ] [[package]] @@ -6701,6 +6715,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 33309872ea3..b8967466bf8 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -5,6 +5,7 @@ members = [ "crates/types", "crates/zk_inception", "crates/zk_supervisor", + "crates/git_version_macro", ] resolver = "2" @@ -25,11 +26,13 @@ keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] common = { path = "crates/common" } config = { path = "crates/config" } types = { path = "crates/types" } +git_version_macro = { path = "crates/git_version_macro" } # ZkSync deps zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } +zksync_system_constants = { path = "../core/lib/constants" } zksync_consensus_roles = "=0.1.1" zksync_consensus_crypto = "=0.1.1" zksync_protobuf = "=0.1.1" @@ -40,6 +43,7 @@ clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" +chrono = "0.4.38" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" diff --git a/zk_toolbox/crates/common/Cargo.toml b/zk_toolbox/crates/common/Cargo.toml index 1f6fb6fd9fe..5fdf481bea6 100644 --- a/zk_toolbox/crates/common/Cargo.toml +++ b/zk_toolbox/crates/common/Cargo.toml @@ -29,3 +29,4 @@ url.workspace = true xshell.workspace = true thiserror.workspace = true strum.workspace = true +git_version_macro.workspace = true diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zk_toolbox/crates/common/src/cmd.rs index 7bf0147b69c..130a3b2c100 100644 --- a/zk_toolbox/crates/common/src/cmd.rs +++ b/zk_toolbox/crates/common/src/cmd.rs @@ -147,10 +147,7 @@ impl<'a> Cmd<'a> { fn check_output_status(command_text: &str, output: &std::process::Output) -> CmdResult<()> { if !output.status.success() { logger::new_line(); - logger::error_note( - &format!("Command failed to run: {}", command_text), - &log_output(output), - ); + logger::error_note("Command failed to run", &log_output(output)); return Err(CmdError { stderr: Some(String::from_utf8(output.stderr.clone())?), source: anyhow::anyhow!("Command failed to run: {}", command_text), diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zk_toolbox/crates/common/src/ethereum.rs index 93cc524568c..4f000ed0fd5 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zk_toolbox/crates/common/src/ethereum.rs @@ -10,7 +10,7 @@ use ethers::{ }; use types::TokenInfo; -use crate::{logger, wallets::Wallet}; +use crate::wallets::Wallet; pub fn create_ethers_client( private_key: H256, @@ -89,35 +89,30 @@ pub async fn mint_token( chain_id: u64, amount: u128, ) -> anyhow::Result<()> { - let client = Arc::new(create_ethers_client( - main_wallet.private_key.unwrap(), - l1_rpc, - Some(chain_id), - )?); + let client = Arc::new( + create_ethers_client(main_wallet.private_key.unwrap(), l1_rpc, Some(chain_id))? + .nonce_manager(main_wallet.address), + ); let contract = TokenContract::new(token_address, client); - // contract + + let mut pending_calls = vec![]; for address in addresses { - if let Err(err) = mint(&contract, address, amount).await { - logger::warn(format!("Failed to mint {err}")) - } + pending_calls.push(contract.mint(address, amount.into())); } - Ok(()) -} + let mut pending_txs = vec![]; + for call in &pending_calls { + pending_txs.push( + call.send() + .await? + // It's safe to set such low number of confirmations and low interval for localhost + .confirmations(3) + .interval(Duration::from_millis(30)), + ); + } + + futures::future::join_all(pending_txs).await; -async fn mint( - contract: &TokenContract, - address: Address, - amount: u128, -) -> anyhow::Result<()> { - contract - .mint(address, amount.into()) - .send() - .await? - // It's safe to set such low number of confirmations and low interval for localhost - .confirmations(1) - .interval(Duration::from_millis(30)) - .await?; Ok(()) } diff --git a/zk_toolbox/crates/common/src/forge.rs b/zk_toolbox/crates/common/src/forge.rs index f00921a0bf2..4421edca663 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zk_toolbox/crates/common/src/forge.rs @@ -69,6 +69,17 @@ impl ForgeScript { return Ok(res?); } } + + // TODO: This line is very helpful for debugging purposes, + // maybe it makes sense to make it conditionally displayed. + let command = format!( + "forge script {} --legacy {}", + script_path.to_str().unwrap(), + args_no_resume.join(" ") + ); + + println!("Command: {}", command); + let mut cmd = Cmd::new(cmd!( shell, "forge script {script_path} --legacy {args_no_resume...}" @@ -96,6 +107,12 @@ impl ForgeScript { self } + /// Add the sender address to the forge script command. + pub fn with_sender(mut self, address: String) -> Self { + self.args.add_arg(ForgeScriptArg::Sender { address }); + self + } + /// Add the rpc-url flag to the forge script command. pub fn with_rpc_url(mut self, rpc_url: String) -> Self { self.args.add_arg(ForgeScriptArg::RpcUrl { url: rpc_url }); @@ -135,6 +152,7 @@ impl ForgeScript { }); self } + // Do not start the script if balance is not enough pub fn private_key(&self) -> Option { self.args.args.iter().find_map(|a| { @@ -244,6 +262,10 @@ pub enum ForgeScriptArg { }, Verify, Resume, + #[strum(to_string = "sender={address}")] + Sender { + address: String, + }, } /// ForgeScriptArgs is a set of arguments that can be passed to the forge script command. diff --git a/zk_toolbox/crates/common/src/hardhat.rs b/zk_toolbox/crates/common/src/hardhat.rs new file mode 100644 index 00000000000..e15e94be5ad --- /dev/null +++ b/zk_toolbox/crates/common/src/hardhat.rs @@ -0,0 +1,17 @@ +use std::path::Path; + +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +pub fn build_l2_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts")); + Ok(Cmd::new(cmd!(shell, "yarn l2 build")).run()?) +} + +/// Builds L1 contracts using hardhat. This is a temporary measure, mainly needed to +/// compile the contracts with zksolc (for some reason doing it via foundry took too much time). +pub fn build_l1_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts")); + Ok(Cmd::new(cmd!(shell, "yarn l1 build")).run()?) +} diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index 7be4af74070..436d44fef87 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -11,8 +11,11 @@ pub mod external_node; pub mod files; pub mod forge; pub mod git; +pub mod hardhat; pub mod server; +pub mod version; pub mod wallets; +pub mod yaml; pub use prerequisites::{ check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, diff --git a/zk_toolbox/crates/common/src/server.rs b/zk_toolbox/crates/common/src/server.rs index 40da1cf8032..7f8c2a90e58 100644 --- a/zk_toolbox/crates/common/src/server.rs +++ b/zk_toolbox/crates/common/src/server.rs @@ -40,6 +40,7 @@ impl Server { general_path: P, secrets_path: P, contracts_path: P, + gateway_contracts_config_path: Option

, mut additional_args: Vec, ) -> anyhow::Result<()> where @@ -56,6 +57,16 @@ impl Server { let uring = self.uring.then_some("--features=rocksdb/io-uring"); + let (gateway_config_param, gateway_config_path) = + if let Some(gateway_contracts_config_path) = gateway_contracts_config_path { + ( + Some("--gateway-contracts-config-path"), + Some(gateway_contracts_config_path), + ) + } else { + (None, None) + }; + let mut cmd = Cmd::new( cmd!( shell, @@ -65,6 +76,7 @@ impl Server { --config-path {general_path} --secrets-path {secrets_path} --contracts-config-path {contracts_path} + {gateway_config_param...} {gateway_config_path...} " ) .args(additional_args) diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zk_toolbox/crates/common/src/term/logger.rs index 33a88bd961e..17e518d9ad9 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zk_toolbox/crates/common/src/term/logger.rs @@ -56,10 +56,9 @@ pub fn note(msg: impl Display, content: impl Display) { } pub fn error_note(msg: &str, content: &str) { - let symbol = CliclackTheme.state_symbol(&ThemeState::Submit); - let note = CliclackTheme - .format_note(msg, content) - .replace(&symbol, &CliclackTheme.error_symbol()); + let note = CliclackTheme.format_log(msg, &CliclackTheme.error_symbol()); + term_write(note); + let note = CliclackTheme.format_log(content, &CliclackTheme.error_symbol()); term_write(note); } diff --git a/zk_toolbox/crates/common/src/version.rs b/zk_toolbox/crates/common/src/version.rs new file mode 100644 index 00000000000..43be7a07b7e --- /dev/null +++ b/zk_toolbox/crates/common/src/version.rs @@ -0,0 +1,24 @@ +const GIT_VERSION: &str = git_version_macro::build_git_revision!(); +const GIT_BRANCH: &str = git_version_macro::build_git_branch!(); +const GIT_SUBMODULES: &[(&str, &str)] = git_version_macro::build_git_submodules!(); +const BUILD_TIMESTAMP: &str = git_version_macro::build_timestamp!(); + +/// Returns a multi-line version message that includes: +/// - provided crate version +/// - git revision +/// - git branch +/// - git submodules +/// - build timestamp +pub fn version_message(crate_version: &str) -> String { + let mut version = format!("v{}-{}\n", crate_version, GIT_VERSION); + version.push_str(&format!("Branch: {}\n", GIT_BRANCH)); + #[allow(clippy::const_is_empty)] // Proc-macro generated. + if !GIT_SUBMODULES.is_empty() { + version.push_str("Submodules:\n"); + for (name, rev) in GIT_SUBMODULES { + version.push_str(&format!(" - {}: {}\n", name, rev)); + } + } + version.push_str(&format!("Build timestamp: {}\n", BUILD_TIMESTAMP)); + version +} diff --git a/zk_toolbox/crates/common/src/yaml.rs b/zk_toolbox/crates/common/src/yaml.rs new file mode 100644 index 00000000000..83b59ad6764 --- /dev/null +++ b/zk_toolbox/crates/common/src/yaml.rs @@ -0,0 +1,475 @@ +use anyhow::Context; + +use crate::logger; + +pub(super) const MSG_INVALID_KEY_TYPE_ERR: &str = "Invalid key type"; + +/// Holds the differences between two YAML configurations. +#[derive(Default)] +pub struct ConfigDiff { + /// Fields that have different values between the two configurations + /// This contains the new values + pub differing_values: serde_yaml::Mapping, + + /// Fields that are present in the new configuration but not in the old one. + pub new_fields: serde_yaml::Mapping, +} + +impl ConfigDiff { + pub fn print(&self, msg: &str, is_warning: bool) { + if self.new_fields.is_empty() { + return; + } + + if is_warning { + logger::warn(msg); + logger::warn(logger::object_to_string(&self.new_fields)); + } else { + logger::info(msg); + logger::info(logger::object_to_string(&self.new_fields)); + } + } +} + +fn merge_yaml_internal( + a: &mut serde_yaml::Value, + b: serde_yaml::Value, + current_key: String, + diff: &mut ConfigDiff, + override_values: bool, +) -> anyhow::Result<()> { + match (a, b) { + (serde_yaml::Value::Mapping(a), serde_yaml::Value::Mapping(b)) => { + for (key, value) in b { + let k = key.as_str().context(MSG_INVALID_KEY_TYPE_ERR)?.to_string(); + let current_key = if current_key.is_empty() { + k.clone() + } else { + format!("{}.{}", current_key, k) + }; + + if a.contains_key(&key) { + let a_value = a.get_mut(&key).unwrap(); + if value.is_null() && override_values { + a.remove(&key); + diff.differing_values + .insert(current_key.into(), serde_yaml::Value::Null); + } else { + merge_yaml_internal(a_value, value, current_key, diff, override_values)?; + } + } else if !value.is_null() { + a.insert(key.clone(), value.clone()); + diff.new_fields.insert(current_key.into(), value); + } else if override_values { + diff.differing_values + .insert(current_key.into(), serde_yaml::Value::Null); + } + } + } + (a, b) => { + if a != &b { + diff.differing_values.insert(current_key.into(), b.clone()); + if override_values { + *a = b; + } + } + } + } + Ok(()) +} + +pub fn merge_yaml( + a: &mut serde_yaml::Value, + b: serde_yaml::Value, + override_values: bool, +) -> anyhow::Result { + let mut diff = ConfigDiff::default(); + merge_yaml_internal(a, b, "".into(), &mut diff, override_values)?; + Ok(diff) +} + +#[cfg(test)] +mod tests { + #[test] + fn test_merge_yaml_both_are_equal_returns_no_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let diff = super::merge_yaml(&mut a, b, false).unwrap(); + assert!(diff.differing_values.is_empty()); + assert!(diff.new_fields.is_empty()); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_b_has_extra_field_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert!(diff.differing_values.is_empty()); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key5".into()).unwrap(), + b.clone().get("key5").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_extra_field_no_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b, false).unwrap(); + assert!(diff.differing_values.is_empty()); + assert!(diff.new_fields.is_empty()); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_extra_field_and_b_has_extra_field_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key6: value6 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + key6: value6 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert_eq!(diff.differing_values.len(), 0); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key6".into()).unwrap(), + b.clone().get("key6").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_different_value_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_different_value_and_b_has_extra_field_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + key5: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key5".into()).unwrap(), + b.get("key5").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_override_values() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), true).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_override_values_with_extra_field() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + key5: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), true).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key5".into()).unwrap(), + b.get("key5").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_override_values_with_null() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: null + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), true).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3".into()) + .unwrap(), + b.get("key3").unwrap() + ); + assert_eq!(a, expected); + } +} diff --git a/zk_toolbox/crates/config/Cargo.toml b/zk_toolbox/crates/config/Cargo.toml index 5f1419c7ce9..3e54ef8eafb 100644 --- a/zk_toolbox/crates/config/Cargo.toml +++ b/zk_toolbox/crates/config/Cargo.toml @@ -18,6 +18,7 @@ ethers.workspace = true rand.workspace = true serde.workspace = true serde_json.workspace = true +serde_yaml.workspace = true strum.workspace = true thiserror.workspace = true types.workspace = true @@ -28,3 +29,4 @@ zksync_protobuf_config.workspace = true zksync_protobuf.workspace = true zksync_config.workspace = true zksync_basic_types.workspace = true +zksync_system_constants.workspace = true diff --git a/zk_toolbox/crates/config/src/chain.rs b/zk_toolbox/crates/config/src/chain.rs index affc8ccc770..7240260b380 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zk_toolbox/crates/config/src/chain.rs @@ -7,6 +7,7 @@ use serde::{Deserialize, Serialize, Serializer}; use types::{BaseToken, L1BatchCommitmentMode, L1Network, ProverMode, WalletCreation}; use xshell::Shell; use zksync_basic_types::L2ChainId; +use zksync_config::configs::{gateway::GatewayChainConfig, GatewayConfig}; use crate::{ consts::{ @@ -18,7 +19,7 @@ use crate::{ FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, SaveConfigWithBasePath, ZkToolboxConfig, }, - ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, + ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, GATEWAY_FILE, }; /// Chain configuration file. This file is created in the chain @@ -105,6 +106,14 @@ impl ChainConfig { SecretsConfig::read_with_base_path(self.get_shell(), &self.configs) } + pub fn get_gateway_config(&self) -> anyhow::Result { + GatewayConfig::read_with_base_path(self.get_shell(), &self.configs) + } + + pub fn get_gateway_chain_config(&self) -> anyhow::Result { + GatewayChainConfig::read_with_base_path(self.get_shell(), &self.configs) + } + pub fn path_to_general_config(&self) -> PathBuf { self.configs.join(GENERAL_FILE) } @@ -125,6 +134,10 @@ impl ChainConfig { self.configs.join(SECRETS_FILE) } + pub fn path_to_gateway_config(&self) -> PathBuf { + self.configs.join(GATEWAY_FILE) + } + pub fn save_general_config(&self, general_config: &GeneralConfig) -> anyhow::Result<()> { general_config.save_with_base_path(self.get_shell(), &self.configs) } diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 80b204cc619..4323e3166e9 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -20,6 +20,10 @@ pub(crate) const INITIAL_DEPLOYMENT_FILE: &str = "initial_deployments.yaml"; pub(crate) const ERC20_DEPLOYMENT_FILE: &str = "erc20_deployments.yaml"; /// Name of the contracts file pub const CONTRACTS_FILE: &str = "contracts.yaml"; +/// Name of the gateway contracts file +pub const GATEWAY_FILE: &str = "gateway.yaml"; +/// Name of the gateway contracts file +pub const GATEWAY_CHAIN_FILE: &str = "gateway_chain.yaml"; /// Main repository for the ZKsync project pub const ZKSYNC_ERA_GIT_REPO: &str = "https://github.com/matter-labs/zksync-era"; /// Name of the docker-compose file inside zksync repository diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 0d4b1c7b1f8..1d43e086f7c 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -1,5 +1,6 @@ use ethers::types::{Address, H256}; use serde::{Deserialize, Serialize}; +use zksync_system_constants::{L2_ASSET_ROUTER_ADDRESS, L2_NATIVE_TOKEN_VAULT_ADDRESS}; use crate::{ consts::CONTRACTS_FILE, @@ -7,6 +8,7 @@ use crate::{ deploy_ecosystem::output::DeployL1Output, deploy_l2_contracts::output::{ ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + Multicall3Output, }, register_chain::output::RegisterChainOutput, }, @@ -21,6 +23,9 @@ pub struct ContractsConfig { pub bridges: BridgesContracts, pub l1: L1Contracts, pub l2: L2Contracts, + // TODO: maybe move these guys to L1 + pub user_facing_bridgehub: Address, + pub user_facing_diamond_proxy: Address, #[serde(flatten)] pub other: serde_json::Value, } @@ -37,6 +42,10 @@ impl ContractsConfig { .deployed_addresses .bridges .shared_bridge_proxy_addr; + self.bridges.l1_nullifier_addr = deploy_l1_output + .deployed_addresses + .bridges + .l1_nullifier_proxy_addr; self.ecosystem_contracts.bridgehub_proxy_addr = deploy_l1_output .deployed_addresses .bridgehub @@ -48,6 +57,14 @@ impl ContractsConfig { self.ecosystem_contracts.transparent_proxy_admin_addr = deploy_l1_output .deployed_addresses .transparent_proxy_admin_addr; + self.ecosystem_contracts.stm_deployment_tracker_proxy_addr = deploy_l1_output + .deployed_addresses + .bridgehub + .ctm_deployment_tracker_proxy_addr; + self.ecosystem_contracts.force_deployments_data = deploy_l1_output + .contracts_config + .force_deployments_data + .clone(); self.l1.default_upgrade_addr = deploy_l1_output .deployed_addresses .state_transition @@ -60,6 +77,8 @@ impl ContractsConfig { self.l1.multicall3_addr = deploy_l1_output.multicall3_addr; self.ecosystem_contracts.validator_timelock_addr = deploy_l1_output.deployed_addresses.validator_timelock_addr; + self.ecosystem_contracts.native_token_vault_addr = + deploy_l1_output.deployed_addresses.native_token_vault_addr; self.l1.verifier_addr = deploy_l1_output .deployed_addresses .state_transition @@ -69,24 +88,51 @@ impl ContractsConfig { self.ecosystem_contracts .diamond_cut_data .clone_from(&deploy_l1_output.contracts_config.diamond_cut_data); + self.l1.rollup_l1_da_validator_addr = deploy_l1_output + .deployed_addresses + .rollup_l1_da_validator_addr; + self.l1.validium_l1_da_validator_addr = deploy_l1_output + .deployed_addresses + .validium_l1_da_validator_addr; self.l1.chain_admin_addr = deploy_l1_output.deployed_addresses.chain_admin; + + self.user_facing_bridgehub = deploy_l1_output + .deployed_addresses + .bridgehub + .bridgehub_proxy_addr; + self.user_facing_diamond_proxy = deploy_l1_output + .deployed_addresses + .state_transition + .diamond_proxy_addr; } pub fn set_chain_contracts(&mut self, register_chain_output: &RegisterChainOutput) { self.l1.diamond_proxy_addr = register_chain_output.diamond_proxy_addr; self.l1.governance_addr = register_chain_output.governance_addr; self.l1.chain_admin_addr = register_chain_output.chain_admin_addr; + self.l1.access_control_restriction_addr = + register_chain_output.access_control_restriction_addr; + self.l1.chain_proxy_admin_addr = register_chain_output.chain_proxy_admin_addr; + self.l2.l2_legacy_shared_bridge_addr = register_chain_output.l2_legacy_shared_bridge_addr; + + self.user_facing_diamond_proxy = register_chain_output.diamond_proxy_addr; } pub fn set_l2_shared_bridge( &mut self, initialize_bridges_output: &InitializeBridgeOutput, ) -> anyhow::Result<()> { - self.bridges.shared.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); - self.bridges.erc20.l2_address = Some(initialize_bridges_output.l2_shared_bridge_proxy); + self.bridges.shared.l2_address = Some(L2_ASSET_ROUTER_ADDRESS); + self.bridges.erc20.l2_address = Some(L2_ASSET_ROUTER_ADDRESS); + self.l2.l2_native_token_vault_proxy_addr = L2_NATIVE_TOKEN_VAULT_ADDRESS; + self.l2.l2_da_validator_addr = initialize_bridges_output.l2_da_validator_address; Ok(()) } + pub fn set_transaction_filterer(&mut self, transaction_filterer_addr: Address) { + self.l1.transaction_filterer_addr = transaction_filterer_addr; + } + pub fn set_consensus_registry( &mut self, consensus_registry_output: &ConsensusRegistryOutput, @@ -102,6 +148,11 @@ impl ContractsConfig { self.l2.default_l2_upgrader = default_upgrade_output.l2_default_upgrader; Ok(()) } + + pub fn set_multicall3(&mut self, multicall3_output: &Multicall3Output) -> anyhow::Result<()> { + self.l2.multicall3 = Some(multicall3_output.multicall3); + Ok(()) + } } impl FileConfigWithDefaultName for ContractsConfig { @@ -115,8 +166,11 @@ pub struct EcosystemContracts { pub bridgehub_proxy_addr: Address, pub state_transition_proxy_addr: Address, pub transparent_proxy_admin_addr: Address, + pub stm_deployment_tracker_proxy_addr: Address, pub validator_timelock_addr: Address, pub diamond_cut_data: String, + pub force_deployments_data: String, + pub native_token_vault_addr: Address, } impl ZkToolboxConfig for EcosystemContracts {} @@ -125,6 +179,7 @@ impl ZkToolboxConfig for EcosystemContracts {} pub struct BridgesContracts { pub erc20: BridgeContractsDefinition, pub shared: BridgeContractsDefinition, + pub l1_nullifier_addr: Address, } #[derive(Debug, Serialize, Deserialize, Clone, Default)] @@ -141,15 +196,24 @@ pub struct L1Contracts { pub governance_addr: Address, #[serde(default)] pub chain_admin_addr: Address, + pub access_control_restriction_addr: Address, + pub chain_proxy_admin_addr: Address, pub multicall3_addr: Address, pub verifier_addr: Address, pub validator_timelock_addr: Address, pub base_token_addr: Address, + pub rollup_l1_da_validator_addr: Address, + pub validium_l1_da_validator_addr: Address, + pub transaction_filterer_addr: Address, } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct L2Contracts { pub testnet_paymaster_addr: Address, pub default_l2_upgrader: Address, + pub l2_da_validator_addr: Address, + pub l2_native_token_vault_proxy_addr: Address, + pub l2_legacy_shared_bridge_addr: Address, pub consensus_registry: Option

, + pub multicall3: Option
, } diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 7ff65d4612d..0fe6e9389cf 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -208,7 +208,7 @@ impl EcosystemConfig { ContractsConfig::read(self.get_shell(), self.config.join(CONTRACTS_FILE)) } - pub fn path_to_foundry(&self) -> PathBuf { + pub fn path_to_l1_foundry(&self) -> PathBuf { self.link_to_code.join(L1_CONTRACTS_FOUNDRY) } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 30ec0eeb9c4..21c67cde5f2 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -11,7 +11,7 @@ use zksync_basic_types::L2ChainId; use crate::{ consts::INITIAL_DEPLOYMENT_FILE, traits::{FileConfigWithDefaultName, ZkToolboxConfig}, - ContractsConfig, GenesisConfig, WalletsConfig, + ContractsConfig, GenesisConfig, WalletsConfig, ERC20_DEPLOYMENT_FILE, }; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -69,7 +69,7 @@ pub struct Erc20DeploymentConfig { } impl FileConfigWithDefaultName for Erc20DeploymentConfig { - const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; + const FILE_NAME: &'static str = ERC20_DEPLOYMENT_FILE; } impl ZkToolboxConfig for Erc20DeploymentConfig {} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs index 7f35cf0357c..afda8d30988 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -16,28 +16,11 @@ pub struct DeployL1Output { pub era_chain_id: u32, pub l1_chain_id: u32, pub multicall3_addr: Address, - pub owner_addr: Address, + pub owner_address: Address, pub contracts_config: DeployL1ContractsConfigOutput, pub deployed_addresses: DeployL1DeployedAddressesOutput, } -impl ZkToolboxConfig for DeployL1Output {} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct DeployL1ContractsConfigOutput { - pub diamond_init_max_l2_gas_per_batch: u64, - pub diamond_init_batch_overhead_l1_gas: u64, - pub diamond_init_max_pubdata_per_batch: u64, - pub diamond_init_minimal_l2_gas_price: u64, - pub diamond_init_priority_tx_max_pubdata: u64, - pub diamond_init_pubdata_pricing_mode: u64, - pub priority_tx_max_gas_limit: u64, - pub recursion_circuits_set_vks_hash: H256, - pub recursion_leaf_level_vk_hash: H256, - pub recursion_node_level_vk_hash: H256, - pub diamond_cut_data: String, -} - #[derive(Debug, Deserialize, Serialize, Clone)] pub struct DeployL1DeployedAddressesOutput { pub blob_versioned_hash_retriever_addr: Address, @@ -45,15 +28,31 @@ pub struct DeployL1DeployedAddressesOutput { pub transparent_proxy_admin_addr: Address, pub validator_timelock_addr: Address, pub chain_admin: Address, + pub access_control_restriction_addr: Address, pub bridgehub: L1BridgehubOutput, pub bridges: L1BridgesOutput, pub state_transition: L1StateTransitionOutput, + pub rollup_l1_da_validator_addr: Address, + pub validium_l1_da_validator_addr: Address, + pub native_token_vault_addr: Address, +} + +impl ZkToolboxConfig for DeployL1Output {} + +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct DeployL1ContractsConfigOutput { + pub diamond_cut_data: String, + pub force_deployments_data: String, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct L1BridgehubOutput { pub bridgehub_implementation_addr: Address, pub bridgehub_proxy_addr: Address, + pub ctm_deployment_tracker_proxy_addr: Address, + pub ctm_deployment_tracker_implementation_addr: Address, + pub message_root_proxy_addr: Address, + pub message_root_implementation_addr: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -62,21 +61,23 @@ pub struct L1BridgesOutput { pub erc20_bridge_proxy_addr: Address, pub shared_bridge_implementation_addr: Address, pub shared_bridge_proxy_addr: Address, + pub l1_nullifier_implementation_addr: Address, + pub l1_nullifier_proxy_addr: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] pub struct L1StateTransitionOutput { + pub state_transition_proxy_addr: Address, + pub state_transition_implementation_addr: Address, + pub verifier_addr: Address, pub admin_facet_addr: Address, - pub default_upgrade_addr: Address, - pub diamond_init_addr: Address, - pub diamond_proxy_addr: Address, + pub mailbox_facet_addr: Address, pub executor_facet_addr: Address, - pub genesis_upgrade_addr: Address, pub getters_facet_addr: Address, - pub mailbox_facet_addr: Address, - pub state_transition_implementation_addr: Address, - pub state_transition_proxy_addr: Address, - pub verifier_addr: Address, + pub diamond_init_addr: Address, + pub genesis_upgrade_addr: Address, + pub default_upgrade_addr: Address, + pub diamond_proxy_addr: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs new file mode 100644 index 00000000000..88db31b7563 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs @@ -0,0 +1,116 @@ +use ethers::abi::Address; +use serde::{Deserialize, Serialize}; +use types::ProverMode; +use zksync_basic_types::{H256, U256}; +use zksync_config::GenesisConfig; + +use crate::{ + forge_interface::deploy_ecosystem::input::InitialDeploymentConfig, traits::ZkToolboxConfig, + ChainConfig, ContractsConfig, EcosystemConfig, +}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DeployGatewayCTMInput { + bridgehub_proxy_addr: Address, + ctm_deployment_tracker_proxy_addr: Address, + native_token_vault_addr: Address, + chain_type_manager_proxy_addr: Address, + shared_bridge_proxy_addr: Address, + governance: Address, + + chain_chain_id: U256, + era_chain_id: U256, + l1_chain_id: U256, + + testnet_verifier: bool, + + recursion_node_level_vk_hash: H256, + recursion_leaf_level_vk_hash: H256, + recursion_circuits_set_vks_hash: H256, + + diamond_init_pubdata_pricing_mode: u64, + diamond_init_batch_overhead_l1_gas: u64, + diamond_init_max_pubdata_per_batch: u64, + diamond_init_max_l2_gas_per_batch: u64, + diamond_init_priority_tx_max_pubdata: u64, + diamond_init_minimal_l2_gas_price: u64, + + bootloader_hash: H256, + default_aa_hash: H256, + + priority_tx_max_gas_limit: u64, + + genesis_root: H256, + genesis_rollup_leaf_index: u64, + genesis_batch_commitment: H256, + + latest_protocol_version: U256, + + force_deployments_data: String, +} + +impl ZkToolboxConfig for DeployGatewayCTMInput {} + +impl DeployGatewayCTMInput { + pub fn new( + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + genesis_config: &GenesisConfig, + contracts_config: &ContractsConfig, + initial_deployment_config: &InitialDeploymentConfig, + ) -> Self { + Self { + bridgehub_proxy_addr: contracts_config.ecosystem_contracts.bridgehub_proxy_addr, + ctm_deployment_tracker_proxy_addr: contracts_config + .ecosystem_contracts + .stm_deployment_tracker_proxy_addr, + native_token_vault_addr: contracts_config.ecosystem_contracts.native_token_vault_addr, + chain_type_manager_proxy_addr: contracts_config + .ecosystem_contracts + .state_transition_proxy_addr, + shared_bridge_proxy_addr: contracts_config.bridges.shared.l1_address, + governance: contracts_config.l1.governance_addr, + + chain_chain_id: U256::from(chain_config.chain_id.0), + era_chain_id: U256::from(ecosystem_config.era_chain_id.0), + l1_chain_id: U256::from(ecosystem_config.l1_network.chain_id()), + + // TODO: import it similar to DeployL1 config? + testnet_verifier: ecosystem_config.prover_version == ProverMode::NoProofs, + + // TODO: we should store it in ecosystem config somehwow and reuse here + recursion_node_level_vk_hash: H256::zero(), + recursion_leaf_level_vk_hash: H256::zero(), + recursion_circuits_set_vks_hash: H256::zero(), + + diamond_init_pubdata_pricing_mode: initial_deployment_config + .diamond_init_pubdata_pricing_mode, + diamond_init_batch_overhead_l1_gas: initial_deployment_config + .diamond_init_batch_overhead_l1_gas, + diamond_init_max_pubdata_per_batch: initial_deployment_config + .diamond_init_max_pubdata_per_batch, + diamond_init_max_l2_gas_per_batch: initial_deployment_config + .diamond_init_max_l2_gas_per_batch, + diamond_init_priority_tx_max_pubdata: initial_deployment_config + .diamond_init_priority_tx_max_pubdata, + diamond_init_minimal_l2_gas_price: initial_deployment_config + .diamond_init_minimal_l2_gas_price, + + bootloader_hash: genesis_config.bootloader_hash.unwrap(), + default_aa_hash: genesis_config.default_aa_hash.unwrap(), + + priority_tx_max_gas_limit: initial_deployment_config.priority_tx_max_gas_limit, + + genesis_root: genesis_config.genesis_root_hash.unwrap(), + genesis_rollup_leaf_index: genesis_config.rollup_last_leaf_index.unwrap(), + genesis_batch_commitment: genesis_config.genesis_commitment.unwrap(), + + latest_protocol_version: genesis_config.protocol_version.unwrap().pack(), + + force_deployments_data: contracts_config + .ecosystem_contracts + .force_deployments_data + .clone(), + } + } +} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs new file mode 100644 index 00000000000..7d1a54844d0 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs @@ -0,0 +1,2 @@ +pub mod input; +pub mod output; diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs new file mode 100644 index 00000000000..7d2a2cadb2c --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs @@ -0,0 +1,32 @@ +use ethers::abi::Address; +use serde::{Deserialize, Serialize}; +use zksync_basic_types::web3::Bytes; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DeployGatewayCTMOutput { + pub gateway_state_transition: StateTransitionDeployedAddresses, + pub multicall3_addr: Address, + pub validium_da_validator: Address, + pub relayed_sl_da_validator: Address, + pub diamond_cut_data: Bytes, +} + +impl ZkToolboxConfig for DeployGatewayCTMOutput {} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct StateTransitionDeployedAddresses { + pub chain_type_manager_proxy_addr: Address, + pub chain_type_manager_implementation_addr: Address, + pub verifier_addr: Address, + pub admin_facet_addr: Address, + pub mailbox_facet_addr: Address, + pub executor_facet_addr: Address, + pub getters_facet_addr: Address, + pub diamond_init_addr: Address, + pub genesis_upgrade_addr: Address, + pub default_upgrade_addr: Address, + pub validator_timelock_addr: Address, + // The `diamond_proxy` field is removed as indicated by the TODO comment in the Solidity struct. +} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index b20b58f99c5..68b637c2d52 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -1,8 +1,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; +use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; impl ZkToolboxConfig for DeployL2ContractsInput {} @@ -16,20 +17,30 @@ pub struct DeployL2ContractsInput { pub bridgehub: Address, pub governance: Address, pub erc20_bridge: Address, + pub validium_mode: bool, pub consensus_registry_owner: Address, } impl DeployL2ContractsInput { - pub fn new(chain_config: &ChainConfig, era_chain_id: L2ChainId) -> anyhow::Result { + pub fn new( + chain_config: &ChainConfig, + contracts_config: &ContractsConfig, + era_chain_id: L2ChainId, + ) -> anyhow::Result { let contracts = chain_config.get_contracts_config()?; let wallets = chain_config.get_wallets_config()?; + + let validium_mode = + chain_config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium; + Ok(Self { era_chain_id, chain_id: chain_config.chain_id, l1_shared_bridge: contracts.bridges.shared.l1_address, bridgehub: contracts.ecosystem_contracts.bridgehub_proxy_addr, - governance: wallets.governor.address, + governance: contracts_config.l1.governance_addr, erc20_bridge: contracts.bridges.erc20.l1_address, + validium_mode, consensus_registry_owner: wallets.governor.address, }) } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 860e7e293f9..623eb9d4d65 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -6,11 +6,11 @@ use crate::traits::ZkToolboxConfig; impl ZkToolboxConfig for InitializeBridgeOutput {} impl ZkToolboxConfig for DefaultL2UpgradeOutput {} impl ZkToolboxConfig for ConsensusRegistryOutput {} +impl ZkToolboxConfig for Multicall3Output {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { - pub l2_shared_bridge_implementation: Address, - pub l2_shared_bridge_proxy: Address, + pub l2_da_validator_address: Address, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -23,3 +23,8 @@ pub struct ConsensusRegistryOutput { pub consensus_registry_implementation: Address, pub consensus_registry_proxy: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Multicall3Output { + pub multicall3: Address, +} diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs b/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs new file mode 100644 index 00000000000..99465b07d73 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs @@ -0,0 +1,48 @@ +use serde::{Deserialize, Serialize}; +use zksync_basic_types::{web3::Bytes, Address}; +use zksync_config::configs::GatewayConfig; + +use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GatewayPreparationConfig { + pub bridgehub_proxy_addr: Address, + pub ctm_deployment_tracker_proxy_addr: Address, + pub chain_type_manager_proxy_addr: Address, + pub shared_bridge_proxy_addr: Address, + pub governance: Address, + pub chain_chain_id: u64, // Assuming uint256 can be represented as u64 for chain ID, use U256 for full uint256 support + pub gateway_diamond_cut_data: Bytes, + pub chain_proxy_admin: Address, + pub chain_admin: Address, + pub access_control_restriction: Address, +} +impl ZkToolboxConfig for GatewayPreparationConfig {} + +impl GatewayPreparationConfig { + pub fn new( + chain_config: &ChainConfig, + chain_contracts_config: &ContractsConfig, + ecosystem_contracts_config: &ContractsConfig, + gateway_config: &GatewayConfig, + ) -> anyhow::Result { + let contracts = chain_config.get_contracts_config()?; + + Ok(Self { + bridgehub_proxy_addr: contracts.ecosystem_contracts.bridgehub_proxy_addr, + chain_chain_id: chain_config.chain_id.0, + ctm_deployment_tracker_proxy_addr: contracts + .ecosystem_contracts + .stm_deployment_tracker_proxy_addr, + chain_type_manager_proxy_addr: contracts + .ecosystem_contracts + .state_transition_proxy_addr, + shared_bridge_proxy_addr: contracts.bridges.shared.l1_address, + governance: ecosystem_contracts_config.l1.governance_addr, + gateway_diamond_cut_data: gateway_config.diamond_cut_data.clone(), + chain_proxy_admin: chain_contracts_config.l1.chain_proxy_admin_addr, + chain_admin: chain_contracts_config.l1.chain_admin_addr, + access_control_restriction: chain_contracts_config.l1.access_control_restriction_addr, + }) + } +} diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs b/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs new file mode 100644 index 00000000000..7d1a54844d0 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs @@ -0,0 +1,2 @@ +pub mod input; +pub mod output; diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs b/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs new file mode 100644 index 00000000000..72373eebc53 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; +use zksync_basic_types::{Address, H256}; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GatewayPreparationOutput { + pub governance_l2_tx_hash: H256, + pub gateway_transaction_filterer_implementation: Address, + pub gateway_transaction_filterer_proxy: Address, +} + +impl ZkToolboxConfig for GatewayPreparationOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zk_toolbox/crates/config/src/forge_interface/mod.rs index c7033c45ed2..1959ee4c3de 100644 --- a/zk_toolbox/crates/config/src/forge_interface/mod.rs +++ b/zk_toolbox/crates/config/src/forge_interface/mod.rs @@ -1,6 +1,8 @@ pub mod accept_ownership; pub mod deploy_ecosystem; +pub mod deploy_gateway_ctm; pub mod deploy_l2_contracts; +pub mod gateway_preparation; pub mod paymaster; pub mod register_chain; pub mod script_params; diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs b/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs index 29494ba5d8f..bb6d61c6f8d 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs @@ -6,34 +6,43 @@ use zksync_basic_types::L2ChainId; use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +#[derive(Debug, Deserialize, Serialize, Clone)] +pub struct RegisterChainL1Config { + contracts_config: Contracts, + deployed_addresses: DeployedAddresses, + chain: ChainL1Config, + owner_address: Address, +} + #[derive(Debug, Deserialize, Serialize, Clone)] struct Bridgehub { bridgehub_proxy_addr: Address, } +#[derive(Debug, Deserialize, Serialize, Clone)] +struct Bridges { + shared_bridge_proxy_addr: Address, + l1_nullifier_proxy_addr: Address, +} + #[derive(Debug, Deserialize, Serialize, Clone)] struct StateTransition { - state_transition_proxy_addr: Address, + chain_type_manager_proxy_addr: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] struct DeployedAddresses { state_transition: StateTransition, bridgehub: Bridgehub, + bridges: Bridges, validator_timelock_addr: Address, + native_token_vault_addr: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] struct Contracts { diamond_cut_data: String, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct RegisterChainL1Config { - contracts_config: Contracts, - deployed_addresses: DeployedAddresses, - chain: ChainL1Config, - owner_address: Address, + force_deployments_data: String, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -54,25 +63,33 @@ impl ZkToolboxConfig for RegisterChainL1Config {} impl RegisterChainL1Config { pub fn new(chain_config: &ChainConfig, contracts: &ContractsConfig) -> anyhow::Result { - let genesis_config = chain_config.get_genesis_config()?; let wallets_config = chain_config.get_wallets_config()?; Ok(Self { contracts_config: Contracts { diamond_cut_data: contracts.ecosystem_contracts.diamond_cut_data.clone(), + force_deployments_data: contracts + .ecosystem_contracts + .force_deployments_data + .clone(), }, deployed_addresses: DeployedAddresses { state_transition: StateTransition { - state_transition_proxy_addr: contracts + chain_type_manager_proxy_addr: contracts .ecosystem_contracts .state_transition_proxy_addr, }, bridgehub: Bridgehub { bridgehub_proxy_addr: contracts.ecosystem_contracts.bridgehub_proxy_addr, }, + bridges: Bridges { + shared_bridge_proxy_addr: contracts.bridges.shared.l1_address, + l1_nullifier_proxy_addr: contracts.bridges.l1_nullifier_addr, + }, validator_timelock_addr: contracts.ecosystem_contracts.validator_timelock_addr, + native_token_vault_addr: contracts.ecosystem_contracts.native_token_vault_addr, }, chain: ChainL1Config { - chain_chain_id: genesis_config.l2_chain_id, + chain_chain_id: chain_config.chain_id, base_token_gas_price_multiplier_nominator: chain_config.base_token.nominator, base_token_gas_price_multiplier_denominator: chain_config.base_token.denominator, base_token_addr: chain_config.base_token.address, diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs b/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs index f9521b16328..2281e8fc2d5 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs @@ -8,6 +8,9 @@ pub struct RegisterChainOutput { pub diamond_proxy_addr: Address, pub governance_addr: Address, pub chain_admin_addr: Address, + pub l2_legacy_shared_bridge_addr: Address, + pub access_control_restriction_addr: Address, + pub chain_proxy_admin_addr: Address, } impl ZkToolboxConfig for RegisterChainOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zk_toolbox/crates/config/src/forge_interface/script_params.rs index e7e21ad132b..f05b1abfb37 100644 --- a/zk_toolbox/crates/config/src/forge_interface/script_params.rs +++ b/zk_toolbox/crates/config/src/forge_interface/script_params.rs @@ -39,9 +39,9 @@ pub const DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptPara }; pub const REGISTER_CHAIN_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { - input: "script-config/register-hyperchain.toml", - output: "script-out/output-register-hyperchain.toml", - script_path: "deploy-scripts/RegisterHyperchain.s.sol", + input: "script-config/register-zk-chain.toml", + output: "script-out/output-register-zk-chain.toml", + script_path: "deploy-scripts/RegisterZKChain.s.sol", }; pub const DEPLOY_ERC20_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams { @@ -67,3 +67,15 @@ pub const SETUP_LEGACY_BRIDGE: ForgeScriptParams = ForgeScriptParams { output: "script-out/setup-legacy-bridge.toml", script_path: "deploy-scripts/dev/SetupLegacyBridge.s.sol", }; + +pub const DEPLOY_GATEWAY_CTM: ForgeScriptParams = ForgeScriptParams { + input: "script-config/config-deploy-gateway-ctm.toml", + output: "script-out/output-deploy-gateway-ctm.toml", + script_path: "deploy-scripts/GatewayCTMFromL1.s.sol", +}; + +pub const GATEWAY_PREPARATION: ForgeScriptParams = ForgeScriptParams { + input: "script-config/gateway-preparation-l1.toml", + output: "script-out/output-gateway-preparation-l1.toml", + script_path: "deploy-scripts/GatewayPreparation.s.sol", +}; diff --git a/zk_toolbox/crates/config/src/gateway.rs b/zk_toolbox/crates/config/src/gateway.rs new file mode 100644 index 00000000000..2e21589cb9b --- /dev/null +++ b/zk_toolbox/crates/config/src/gateway.rs @@ -0,0 +1,45 @@ +use zksync_config::configs::{gateway::GatewayChainConfig, GatewayConfig}; + +use crate::{ + forge_interface::deploy_gateway_ctm::output::DeployGatewayCTMOutput, + traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + GATEWAY_CHAIN_FILE, GATEWAY_FILE, +}; + +impl FileConfigWithDefaultName for GatewayConfig { + const FILE_NAME: &'static str = GATEWAY_FILE; +} + +impl ZkToolboxConfig for GatewayConfig {} + +impl From for GatewayConfig { + fn from(output: DeployGatewayCTMOutput) -> Self { + GatewayConfig { + state_transition_proxy_addr: output + .gateway_state_transition + .chain_type_manager_proxy_addr, + state_transition_implementation_addr: output + .gateway_state_transition + .chain_type_manager_implementation_addr, + verifier_addr: output.gateway_state_transition.verifier_addr, + admin_facet_addr: output.gateway_state_transition.admin_facet_addr, + mailbox_facet_addr: output.gateway_state_transition.mailbox_facet_addr, + executor_facet_addr: output.gateway_state_transition.executor_facet_addr, + getters_facet_addr: output.gateway_state_transition.getters_facet_addr, + diamond_init_addr: output.gateway_state_transition.diamond_init_addr, + genesis_upgrade_addr: output.gateway_state_transition.genesis_upgrade_addr, + default_upgrade_addr: output.gateway_state_transition.default_upgrade_addr, + multicall3_addr: output.multicall3_addr, + diamond_cut_data: output.diamond_cut_data, + validator_timelock_addr: output.gateway_state_transition.validator_timelock_addr, + relayed_sl_da_validator: output.relayed_sl_da_validator, + validium_da_validator: output.validium_da_validator, + } + } +} + +impl FileConfigWithDefaultName for GatewayChainConfig { + const FILE_NAME: &'static str = GATEWAY_CHAIN_FILE; +} + +impl ZkToolboxConfig for GatewayChainConfig {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 6498beb0f53..87eb3a7eb19 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -1,6 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::Context; +use common::yaml::merge_yaml; use url::Url; use xshell::Shell; pub use zksync_config::configs::GeneralConfig; @@ -10,7 +11,7 @@ use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, - DEFAULT_CONSENSUS_PORT, + ChainConfig, DEFAULT_CONSENSUS_PORT, }; pub struct RocksDbs { @@ -174,6 +175,15 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a Ok(()) } +pub fn override_config(shell: &Shell, path: PathBuf, chain: &ChainConfig) -> anyhow::Result<()> { + let chain_config_path = chain.path_to_general_config(); + let override_config = serde_yaml::from_str(&shell.read_file(path)?)?; + let mut chain_config = serde_yaml::from_str(&shell.read_file(chain_config_path.clone())?)?; + merge_yaml(&mut chain_config, override_config, true)?; + shell.write_file(chain_config_path, serde_yaml::to_string(&chain_config)?)?; + Ok(()) +} + fn update_port_in_url(http_url: &mut String, port: u16) -> anyhow::Result<()> { let mut http_url_url = Url::parse(http_url)?; if let Err(()) = http_url_url.set_port(Some(port)) { diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 1a7c5bf1d7e..53ac423b823 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -18,6 +18,7 @@ mod consts; mod contracts; mod ecosystem; mod file_config; +mod gateway; mod general; mod genesis; mod manipulations; diff --git a/zk_toolbox/crates/git_version_macro/Cargo.toml b/zk_toolbox/crates/git_version_macro/Cargo.toml new file mode 100644 index 00000000000..eb70b450a4c --- /dev/null +++ b/zk_toolbox/crates/git_version_macro/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "git_version_macro" +edition = "2021" +description = "Procedural macro to generate metainformation about build in compile time" +version.workspace = true +homepage.workspace = true +license.workspace = true +authors.workspace = true +exclude.workspace = true +repository.workspace = true +keywords.workspace = true + +[lib] +proc-macro = true + +[dependencies] +chrono.workspace = true diff --git a/zk_toolbox/crates/git_version_macro/src/lib.rs b/zk_toolbox/crates/git_version_macro/src/lib.rs new file mode 100644 index 00000000000..34b83efce19 --- /dev/null +++ b/zk_toolbox/crates/git_version_macro/src/lib.rs @@ -0,0 +1,81 @@ +extern crate proc_macro; +use std::{process::Command, str::FromStr}; + +use proc_macro::TokenStream; + +/// Outputs the current date and time as a string literal. +/// Can be used to include the build timestamp in the binary. +#[proc_macro] +pub fn build_timestamp(_item: TokenStream) -> TokenStream { + let now = chrono::Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + encode_as_str(&now) +} + +/// Outputs the current git branch as a string literal. +#[proc_macro] +pub fn build_git_branch(_item: TokenStream) -> TokenStream { + let out = run_cmd("git", &["rev-parse", "--abbrev-ref", "HEAD"]); + encode_as_str(&out) +} + +/// Outputs the current git commit hash as a string literal. +#[proc_macro] +pub fn build_git_revision(_item: TokenStream) -> TokenStream { + let out = run_cmd("git", &["rev-parse", "--short", "HEAD"]); + encode_as_str(&out) +} + +/// Creates a slice of `&[(&str, &str)]` tuples that correspond to +/// the submodule name -> revision. +/// Results in an empty list if there are no submodules or if +/// the command fails. +#[proc_macro] +pub fn build_git_submodules(_item: TokenStream) -> TokenStream { + let Some(out) = run_cmd_opt("git", &["submodule", "status"]) else { + return TokenStream::from_str("&[]").unwrap(); + }; + let submodules = out + .lines() + .filter_map(|line| { + let parts: Vec<&str> = line.split_whitespace().collect(); + // Index 0 is commit hash, index 1 is the path to the folder, and there + // may be some metainformation after that. + if parts.len() >= 2 { + let folder_name = parts[1].split('/').last().unwrap_or(parts[1]); + Some((folder_name, parts[0])) + } else { + None + } + }) + .collect::>(); + let submodules = submodules + .iter() + .map(|(name, rev)| format!("(\"{}\", \"{}\")", name, rev)) + .collect::>() + .join(", "); + TokenStream::from_str(format!("&[{}]", submodules).as_str()) + .unwrap_or_else(|_| panic!("Unable to encode submodules: {}", submodules)) +} + +/// Tries to run the command, only returns `Some` if the command +/// succeeded and the output was valid utf8. +fn run_cmd(cmd: &str, args: &[&str]) -> String { + run_cmd_opt(cmd, args).unwrap_or("unknown".to_string()) +} + +fn run_cmd_opt(cmd: &str, args: &[&str]) -> Option { + let output = Command::new(cmd).args(args).output().ok()?; + if output.status.success() { + String::from_utf8(output.stdout) + .ok() + .map(|s| s.trim().to_string()) + } else { + None + } +} + +/// Encodes string as a literal. +fn encode_as_str(s: &str) -> TokenStream { + TokenStream::from_str(format!("\"{}\"", s).as_str()) + .unwrap_or_else(|_| panic!("Unable to encode string: {}", s)) +} diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 61983d59e6e..6b5d17c760c 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -31,6 +31,7 @@ toml.workspace = true url.workspace = true thiserror.workspace = true zksync_config.workspace = true +zksync_system_constants.workspace = true slugify-rs.workspace = true zksync_basic_types.workspace = true clap-markdown.workspace = true diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs index d2bab928374..a41246e1de0 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zk_toolbox/crates/zk_inception/src/accept_ownership.rs @@ -21,7 +21,7 @@ lazy_static! { parse_abi(&[ "function governanceAcceptOwner(address governor, address target) public", "function chainAdminAcceptAdmin(address admin, address target) public", - "function chainSetTokenMultiplierSetter(address chainAdmin, address target) public" + "function setDAValidatorPair(address chainAdmin, address target, address l1DaValidator, address l2DaValidator) public" ]) .unwrap(), ); @@ -45,7 +45,7 @@ pub async fn accept_admin( let calldata = ACCEPT_ADMIN .encode("chainAdminAcceptAdmin", (admin, target_address)) .unwrap(); - let foundry_contracts_path = ecosystem_config.path_to_foundry(); + let foundry_contracts_path = ecosystem_config.path_to_l1_foundry(); let forge = Forge::new(&foundry_contracts_path) .script( &ACCEPT_GOVERNANCE_SCRIPT_PARAMS.script(), @@ -74,7 +74,47 @@ pub async fn accept_owner( let calldata = ACCEPT_ADMIN .encode("governanceAcceptOwner", (governor_contract, target_address)) .unwrap(); - let foundry_contracts_path = ecosystem_config.path_to_foundry(); + let foundry_contracts_path = ecosystem_config.path_to_l1_foundry(); + let forge = Forge::new(&foundry_contracts_path) + .script( + &ACCEPT_GOVERNANCE_SCRIPT_PARAMS.script(), + forge_args.clone(), + ) + .with_ffi() + .with_rpc_url(l1_rpc_url) + .with_broadcast() + .with_calldata(&calldata); + accept_ownership(shell, governor, forge).await +} + +#[allow(clippy::too_many_arguments)] +pub async fn set_da_validator_pair( + shell: &Shell, + ecosystem_config: &EcosystemConfig, + chain_admin_addr: Address, + governor: Option, + diamond_proxy_address: Address, + l1_da_validator_address: Address, + l2_da_validator_address: Address, + forge_args: &ForgeScriptArgs, + l1_rpc_url: String, +) -> anyhow::Result<()> { + // resume doesn't properly work here. + let mut forge_args = forge_args.clone(); + forge_args.resume = false; + + let calldata = ACCEPT_ADMIN + .encode( + "setDAValidatorPair", + ( + chain_admin_addr, + diamond_proxy_address, + l1_da_validator_address, + l2_da_validator_address, + ), + ) + .unwrap(); + let foundry_contracts_path = ecosystem_config.path_to_l1_foundry(); let forge = Forge::new(&foundry_contracts_path) .script( &ACCEPT_GOVERNANCE_SCRIPT_PARAMS.script(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs new file mode 100644 index 00000000000..793bea487f7 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs @@ -0,0 +1,60 @@ +use std::path::PathBuf; + +use clap::Parser; +use common::{config::global_config, forge::ForgeScriptArgs, Prompt}; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, + defaults::LOCAL_RPC_URL, + messages::{MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT}, +}; + +const CHAIN_SUBDIR: &str = "chain"; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct BuildTransactionsArgs { + /// Output directory for the generated files. + #[arg(long, short)] + pub out: Option, + /// All ethereum environment related arguments + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, +} + +impl BuildTransactionsArgs { + pub fn fill_values_with_prompt(self, default_chain: String) -> BuildTransactionsArgsFinal { + let chain_name = global_config().chain_name.clone(); + + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_L1_RPC_URL_PROMPT) + .default(LOCAL_RPC_URL) + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }); + + BuildTransactionsArgsFinal { + out: self + .out + .unwrap_or(PathBuf::from(DEFAULT_UNSIGNED_TRANSACTIONS_DIR).join(CHAIN_SUBDIR)) + .join(chain_name.unwrap_or(default_chain)), + forge_args: self.forge_args, + l1_rpc_url, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct BuildTransactionsArgsFinal { + pub out: PathBuf, + pub forge_args: ForgeScriptArgs, + pub l1_rpc_url: String, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs index 08f39a90a84..f2a5f6b8be1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs @@ -1,3 +1,4 @@ +pub mod build_transactions; pub mod create; pub mod genesis; pub mod init; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs new file mode 100644 index 00000000000..68cb7a9a074 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs @@ -0,0 +1,90 @@ +use anyhow::Context; +use common::{config::global_config, git, logger, spinner::Spinner}; +use config::{ + copy_configs, traits::SaveConfigWithBasePath, update_from_chain_config, EcosystemConfig, +}; +use ethers::utils::hex::ToHex; +use xshell::Shell; + +use super::common::register_chain; +use crate::{ + commands::chain::args::build_transactions::BuildTransactionsArgs, + messages::{ + MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER, MSG_CHAIN_NOT_FOUND_ERR, + MSG_CHAIN_TRANSACTIONS_BUILT, MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG, + MSG_CHAIN_TXN_OUT_PATH_INVALID_ERR, MSG_PREPARING_CONFIG_SPINNER, MSG_SELECTED_CONFIG, + MSG_WRITING_OUTPUT_FILES_SPINNER, + }, +}; + +const REGISTER_CHAIN_TXNS_FILE_SRC: &str = + "contracts/l1-contracts/broadcast/RegisterHyperchain.s.sol/9/dry-run/run-latest.json"; +const REGISTER_CHAIN_TXNS_FILE_DST: &str = "register-hyperchain-txns.json"; + +const SCRIPT_CONFIG_FILE_SRC: &str = + "contracts/l1-contracts/script-config/register-hyperchain.toml"; +const SCRIPT_CONFIG_FILE_DST: &str = "register-hyperchain.toml"; + +pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::Result<()> { + let config = EcosystemConfig::from_file(shell)?; + let chain_name = global_config().chain_name.clone(); + let chain_config = config + .load_chain(chain_name) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let args = args.fill_values_with_prompt(config.default_chain.clone()); + + git::submodule_update(shell, config.link_to_code.clone())?; + + let spinner = Spinner::new(MSG_PREPARING_CONFIG_SPINNER); + copy_configs(shell, &config.link_to_code, &chain_config.configs)?; + + logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); + + let mut genesis_config = chain_config.get_genesis_config()?; + update_from_chain_config(&mut genesis_config, &chain_config); + + // Copy ecosystem contracts + let mut contracts_config = config + .get_contracts_config() + .context(MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG)?; + contracts_config.l1.base_token_addr = chain_config.base_token.address; + spinner.finish(); + + let spinner = Spinner::new(MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER); + let governor: String = config.get_wallets()?.governor.address.encode_hex_upper(); + + register_chain( + shell, + args.forge_args.clone(), + &config, + &chain_config, + &mut contracts_config, + args.l1_rpc_url.clone(), + Some(governor), + false, + ) + .await?; + + contracts_config.save_with_base_path(shell, &args.out)?; + spinner.finish(); + + let spinner = Spinner::new(MSG_WRITING_OUTPUT_FILES_SPINNER); + shell + .create_dir(&args.out) + .context(MSG_CHAIN_TXN_OUT_PATH_INVALID_ERR)?; + + shell.copy_file( + config.link_to_code.join(REGISTER_CHAIN_TXNS_FILE_SRC), + args.out.join(REGISTER_CHAIN_TXNS_FILE_DST), + )?; + + shell.copy_file( + config.link_to_code.join(SCRIPT_CONFIG_FILE_SRC), + args.out.join(SCRIPT_CONFIG_FILE_DST), + )?; + spinner.finish(); + + logger::success(MSG_CHAIN_TRANSACTIONS_BUILT); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs new file mode 100644 index 00000000000..6886f0d55f0 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs @@ -0,0 +1,125 @@ +use common::{ + forge::{Forge, ForgeScriptArgs}, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, + script_params::REGISTER_CHAIN_SCRIPT_PARAMS, + }, + traits::{ReadConfig, SaveConfig}, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use types::{BaseToken, L1Network, WalletCreation}; +use xshell::Shell; + +use crate::{ + consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, + messages::{MSG_DISTRIBUTING_ETH_SPINNER, MSG_MINT_BASE_TOKEN_SPINNER}, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +#[allow(clippy::too_many_arguments)] +pub async fn register_chain( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + contracts: &mut ContractsConfig, + l1_rpc_url: String, + sender: Option, + broadcast: bool, +) -> anyhow::Result<()> { + let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); + + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url); + + if broadcast { + forge = forge.with_broadcast(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let register_chain_output = RegisterChainOutput::read( + shell, + REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) +} + +// Distribute eth to the chain wallets for localhost environment +pub async fn distribute_eth( + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + if chain_config.wallet_creation == WalletCreation::Localhost + && ecosystem_config.l1_network == L1Network::Localhost + { + let spinner = Spinner::new(MSG_DISTRIBUTING_ETH_SPINNER); + let wallets = ecosystem_config.get_wallets()?; + let chain_wallets = chain_config.get_wallets_config()?; + let mut addresses = vec![ + chain_wallets.operator.address, + chain_wallets.blob_operator.address, + chain_wallets.governor.address, + ]; + if let Some(deployer) = chain_wallets.deployer { + addresses.push(deployer.address) + } + common::ethereum::distribute_eth( + wallets.operator, + addresses, + l1_rpc_url, + ecosystem_config.l1_network.chain_id(), + AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, + ) + .await?; + spinner.finish(); + } + Ok(()) +} + +pub async fn mint_base_token( + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + if chain_config.wallet_creation == WalletCreation::Localhost + && ecosystem_config.l1_network == L1Network::Localhost + && chain_config.base_token != BaseToken::eth() + { + let spinner = Spinner::new(MSG_MINT_BASE_TOKEN_SPINNER); + let wallets = ecosystem_config.get_wallets()?; + let chain_wallets = chain_config.get_wallets_config()?; + let base_token = &chain_config.base_token; + let addresses = vec![wallets.governor.address, chain_wallets.governor.address]; + let amount = AMOUNT_FOR_DISTRIBUTION_TO_WALLETS * base_token.nominator as u128 + / base_token.denominator as u128; + common::ethereum::mint_token( + wallets.operator, + base_token.address, + addresses, + l1_rpc_url, + ecosystem_config.l1_network.chain_id(), + amount, + ) + .await?; + spinner.finish(); + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs new file mode 100644 index 00000000000..5e3021ba0f5 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs @@ -0,0 +1,270 @@ +use anyhow::Context; +use common::{ + config::global_config, + forge::{Forge, ForgeScriptArgs}, +}; +use config::{ + forge_interface::{ + deploy_ecosystem::input::InitialDeploymentConfig, + deploy_gateway_ctm::{input::DeployGatewayCTMInput, output::DeployGatewayCTMOutput}, + gateway_preparation::{input::GatewayPreparationConfig, output::GatewayPreparationOutput}, + script_params::{DEPLOY_GATEWAY_CTM, GATEWAY_PREPARATION}, + }, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, EcosystemConfig, GenesisConfig, +}; +use ethers::{abi::parse_abi, contract::BaseContract, types::Bytes, utils::hex}; +use lazy_static::lazy_static; +use xshell::Shell; +use zksync_basic_types::H256; +use zksync_config::configs::GatewayConfig; + +use crate::{ + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_L1_SECRETS_MUST_BE_PRESENTED}, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +lazy_static! { + static ref GATEWAY_PREPARATION_INTERFACE: BaseContract = BaseContract::from( + parse_abi(&[ + "function governanceRegisterGateway() public", + "function deployAndSetGatewayTransactionFilterer() public", + "function governanceWhitelistGatewayCTM(address gatewaySTMAddress, bytes32 governanoceOperationSalt) public", + "function governanceSetCTMAssetHandler(bytes32 governanoceOperationSalt)", + "function registerAssetIdInBridgehub(address gatewaySTMAddress, bytes32 governanoceOperationSalt)", + ]) + .unwrap(), + ); +} + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let chain_name = global_config().chain_name.clone(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(chain_name) + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let l1_url = chain_config + .get_secrets_config()? + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + let mut chain_contracts_config = chain_config.get_contracts_config()?; + let chain_genesis_config = chain_config.get_genesis_config()?; + + // Firstly, deploying gateway contracts + let gateway_config = deploy_gateway_ctm( + shell, + args.clone(), + &ecosystem_config, + &chain_config, + &chain_genesis_config, + &ecosystem_config.get_initial_deployment_config().unwrap(), + l1_url.clone(), + ) + .await?; + + let gateway_preparation_config_path = GATEWAY_PREPARATION.input(&chain_config.link_to_code); + let preparation_config = GatewayPreparationConfig::new( + &chain_config, + &chain_contracts_config, + &ecosystem_config.get_contracts_config()?, + &gateway_config, + )?; + preparation_config.save(shell, gateway_preparation_config_path)?; + + gateway_governance_whitelisting( + shell, + args.clone(), + &ecosystem_config, + &chain_config, + gateway_config, + l1_url.clone(), + ) + .await?; + + let output = call_script( + shell, + args, + &GATEWAY_PREPARATION_INTERFACE + .encode("deployAndSetGatewayTransactionFilterer", ()) + .unwrap(), + &ecosystem_config, + &chain_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url, + ) + .await?; + + chain_contracts_config.set_transaction_filterer(output.gateway_transaction_filterer_proxy); + + chain_contracts_config.save_with_base_path(shell, chain_config.configs)?; + + Ok(()) +} + +async fn deploy_gateway_ctm( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + chain_genesis_config: &GenesisConfig, + initial_deployemnt_config: &InitialDeploymentConfig, + l1_rpc_url: String, +) -> anyhow::Result { + let contracts_config = chain_config.get_contracts_config()?; + let deploy_config_path = DEPLOY_GATEWAY_CTM.input(&config.link_to_code); + + let deploy_config = DeployGatewayCTMInput::new( + chain_config, + config, + chain_genesis_config, + &contracts_config, + initial_deployemnt_config, + ); + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(&DEPLOY_GATEWAY_CTM.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url) + .with_broadcast(); + + // Governor private key should not be needed for this script + forge = fill_forge_private_key(forge, config.get_wallets()?.deployer_private_key())?; + check_the_balance(&forge).await?; + forge.run(shell)?; + + let register_chain_output = + DeployGatewayCTMOutput::read(shell, DEPLOY_GATEWAY_CTM.output(&chain_config.link_to_code))?; + + let gateway_config: GatewayConfig = register_chain_output.into(); + + gateway_config.save_with_base_path(shell, chain_config.configs.clone())?; + + Ok(gateway_config) +} + +async fn gateway_governance_whitelisting( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + gateway_config: GatewayConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + let hash = call_script( + shell, + forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode("governanceRegisterGateway", ()) + .unwrap(), + config, + chain_config, + config.get_wallets()?.governor_private_key(), + l1_rpc_url.clone(), + ) + .await? + .governance_l2_tx_hash; + + println!( + "Gateway registered as a settlement layer with L2 hash: {}", + hash + ); + + let hash = call_script( + shell, + forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "governanceWhitelistGatewayCTM", + (gateway_config.state_transition_proxy_addr, H256::random()), + ) + .unwrap(), + config, + chain_config, + config.get_wallets()?.governor_private_key(), + l1_rpc_url.clone(), + ) + .await? + .governance_l2_tx_hash; + + // Just in case, the L2 tx may or may not fail depending on whether it was executed previously, + println!( + "Gateway STM whitelisted L2 hash: {}", + hex::encode(hash.as_bytes()) + ); + + let hash = call_script( + shell, + forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode("governanceSetCTMAssetHandler", H256::random()) + .unwrap(), + config, + chain_config, + config.get_wallets()?.governor_private_key(), + l1_rpc_url.clone(), + ) + .await? + .governance_l2_tx_hash; + + // Just in case, the L2 tx may or may not fail depending on whether it was executed previously, + println!( + "Gateway STM asset handler is set L2 hash: {}", + hex::encode(hash.as_bytes()) + ); + + let hash = call_script( + shell, + forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "registerAssetIdInBridgehub", + (gateway_config.state_transition_proxy_addr, H256::random()), + ) + .unwrap(), + config, + chain_config, + config.get_wallets()?.governor_private_key(), + l1_rpc_url.clone(), + ) + .await? + .governance_l2_tx_hash; + + // Just in case, the L2 tx may or may not fail depending on whether it was executed previously, + println!( + "Asset Id is registered in L2 bridgehub. L2 hash: {}", + hex::encode(hash.as_bytes()) + ); + + Ok(()) +} + +async fn call_script( + shell: &Shell, + forge_args: ForgeScriptArgs, + data: &Bytes, + config: &EcosystemConfig, + chain_config: &ChainConfig, + private_key: Option, + l1_rpc_url: String, +) -> anyhow::Result { + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(&GATEWAY_PREPARATION.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url) + .with_broadcast() + .with_calldata(data); + + // Governor private key is required for this script + forge = fill_forge_private_key(forge, private_key)?; + check_the_balance(&forge).await?; + forge.run(shell)?; + + GatewayPreparationOutput::read( + shell, + GATEWAY_PREPARATION.output(&chain_config.link_to_code), + ) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 7545ec2ec26..50b1324bf5d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -2,23 +2,26 @@ use std::path::Path; use anyhow::Context; use common::{ - cmd::Cmd, config::global_config, forge::{Forge, ForgeScriptArgs}, + hardhat::build_l2_contracts, spinner::Spinner, }; use config::{ forge_interface::{ deploy_l2_contracts::{ input::DeployL2ContractsInput, - output::{ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput}, + output::{ + ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + Multicall3Output, + }, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, }, traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, ChainConfig, ContractsConfig, EcosystemConfig, }; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::{ messages::{ @@ -205,6 +208,7 @@ pub async fn deploy_l2_contracts( contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?)?; contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; + contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?)?; Ok(()) }, ) @@ -218,7 +222,13 @@ async fn call_forge( forge_args: ForgeScriptArgs, signature: Option<&str>, ) -> anyhow::Result<()> { - let input = DeployL2ContractsInput::new(chain_config, ecosystem_config.era_chain_id)?; + let input = DeployL2ContractsInput::new( + chain_config, + &ecosystem_config + .get_contracts_config() + .expect("contracts config"), + ecosystem_config.era_chain_id, + )?; let foundry_contracts_path = chain_config.path_to_foundry(); let secrets = chain_config.get_secrets_config()?; input.save( @@ -255,8 +265,3 @@ async fn call_forge( forge.run(shell)?; Ok(()) } - -fn build_l2_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code.join("contracts")); - Ok(Cmd::new(cmd!(shell, "yarn l2 build")).run()?) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs index 81ac457cd88..58c199189bd 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs @@ -2,7 +2,6 @@ use anyhow::Context; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, - spinner::Spinner, }; use config::{ forge_interface::{ @@ -15,9 +14,7 @@ use config::{ use xshell::Shell; use crate::{ - messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_PAYMASTER, MSG_L1_SECRETS_MUST_BE_PRESENTED, - }, + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_L1_SECRETS_MUST_BE_PRESENTED}, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -28,7 +25,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .load_chain(chain_name) .context(MSG_CHAIN_NOT_INITIALIZED)?; let mut contracts = chain_config.get_contracts_config()?; - deploy_paymaster(shell, &chain_config, &mut contracts, args).await?; + deploy_paymaster(shell, &chain_config, &mut contracts, args, None, true).await?; contracts.save_with_base_path(shell, chain_config.configs) } @@ -37,6 +34,8 @@ pub async fn deploy_paymaster( chain_config: &ChainConfig, contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, + sender: Option, + broadcast: bool, ) -> anyhow::Result<()> { let input = DeployPaymasterInput::new(chain_config)?; let foundry_contracts_path = chain_config.path_to_foundry(); @@ -56,18 +55,23 @@ pub async fn deploy_paymaster( .l1_rpc_url .expose_str() .to_string(), - ) - .with_broadcast(); + ); - forge = fill_forge_private_key( - forge, - chain_config.get_wallets_config()?.governor_private_key(), - )?; + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key( + forge, + chain_config.get_wallets_config()?.governor_private_key(), + )?; + } + + if broadcast { + forge = forge.with_broadcast(); + check_the_balance(&forge).await?; + } - let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); - check_the_balance(&forge).await?; forge.run(shell)?; - spinner.finish(); let output = DeployPaymasterOutput::read( shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs index bfa3f94916b..d57dae4a370 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs @@ -9,7 +9,7 @@ use common::{ spinner::Spinner, }; use config::{ - set_databases, set_file_artifacts, set_rocks_db_config, + override_config, set_databases, set_file_artifacts, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, @@ -17,12 +17,14 @@ use config::{ use types::ProverMode; use xshell::Shell; use zksync_basic_types::commitment::L1BatchCommitmentMode; -use zksync_config::configs::eth_sender::{ProofSendingMode, PubdataSendingMode}; use super::args::genesis::GenesisArgsFinal; use crate::{ commands::chain::args::genesis::GenesisArgs, - consts::{PROVER_MIGRATIONS, SERVER_MIGRATIONS}, + consts::{ + PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, + PROVER_MIGRATIONS, SERVER_MIGRATIONS, + }, messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, @@ -55,41 +57,31 @@ pub async fn genesis( ) -> anyhow::Result<()> { shell.create_dir(&config.rocks_db_path)?; + let link_to_code = config.link_to_code.clone(); let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; let mut general = config.get_general_config()?; let file_artifacts = FileArtifacts::new(config.artifacts.clone()); set_rocks_db_config(&mut general, rocks_db)?; set_file_artifacts(&mut general, file_artifacts); + general.save_with_base_path(shell, &config.configs)?; + if config.prover_version != ProverMode::NoProofs { - general - .eth - .as_mut() - .context("eth")? - .sender - .as_mut() - .context("sender")? - .proof_sending_mode = ProofSendingMode::OnlyRealProofs; + override_config( + shell, + link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), + config, + )?; } if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { - general - .eth - .as_mut() - .context("eth")? - .sender - .as_mut() - .context("sender")? - .pubdata_sending_mode = PubdataSendingMode::Custom; - general - .state_keeper_config - .as_mut() - .context("state_keeper_config")? - .pubdata_overhead_part = 0.0; + override_config( + shell, + link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), + config, + )?; } - general.save_with_base_path(shell, &config.configs)?; - let mut secrets = config.get_secrets_config()?; set_databases(&mut secrets, &args.server_db, &args.prover_db)?; secrets.save_with_base_path(shell, &config.configs)?; @@ -178,6 +170,7 @@ fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Resu GeneralConfig::get_path_with_base_path(&chain_config.configs), SecretsConfig::get_path_with_base_path(&chain_config.configs), ContractsConfig::get_path_with_base_path(&chain_config.configs), + None, vec![], ) .context(MSG_FAILED_TO_RUN_SERVER_ERR) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 734e5e54863..46f88509a4d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -1,26 +1,15 @@ use anyhow::{bail, Context}; -use common::{ - config::global_config, - forge::{Forge, ForgeScriptArgs}, - git, logger, - spinner::Spinner, -}; +use common::{config::global_config, git, logger, spinner::Spinner}; use config::{ - copy_configs, - forge_interface::{ - register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, - script_params::REGISTER_CHAIN_SCRIPT_PARAMS, - }, - ports_config, set_l1_rpc_url, - traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, - update_from_chain_config, update_ports, ChainConfig, ContractsConfig, EcosystemConfig, - GeneralConfig, + copy_configs, ports_config, set_l1_rpc_url, traits::SaveConfigWithBasePath, + update_from_chain_config, update_ports, ChainConfig, EcosystemConfig, GeneralConfig, }; -use types::{BaseToken, L1Network, WalletCreation}; +use types::{BaseToken, L1BatchCommitmentMode}; use xshell::Shell; +use super::common::{distribute_eth, mint_base_token, register_chain}; use crate::{ - accept_ownership::accept_admin, + accept_ownership::{accept_admin, set_da_validator_pair}, commands::{ chain::{ args::init::{InitArgs, InitArgsFinal}, @@ -31,18 +20,14 @@ use crate::{ }, portal::update_portal_config, }, - consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, - MSG_CHAIN_NOT_FOUND_ERR, MSG_DISTRIBUTING_ETH_SPINNER, MSG_GENESIS_DATABASE_ERR, - MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, + MSG_CHAIN_NOT_FOUND_ERR, MSG_DA_PAIR_REGISTRATION_SPINNER, MSG_DEPLOYING_PAYMASTER, + MSG_GENESIS_DATABASE_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, - forge::{check_the_balance, fill_forge_private_key}, - }, + utils::consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, }; pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { @@ -106,6 +91,8 @@ pub async fn init( chain_config, &mut contracts_config, init_args.l1_rpc_url.clone(), + None, + true, ) .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; @@ -125,11 +112,13 @@ pub async fn init( if chain_config.base_token != BaseToken::eth() { let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); + let chain_contracts = chain_config.get_contracts_config()?; set_token_multiplier_setter( shell, ecosystem_config, chain_config.get_wallets_config()?.governor_private_key(), - contracts_config.l1.chain_admin_addr, + chain_contracts.l1.access_control_restriction_addr, + chain_contracts.l1.diamond_proxy_addr, chain_config .get_wallets_config() .unwrap() @@ -153,6 +142,30 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + let validium_mode = + chain_config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium; + + let l1_da_validator_addr = if validium_mode { + contracts_config.l1.validium_l1_da_validator_addr + } else { + contracts_config.l1.rollup_l1_da_validator_addr + }; + + let spinner = Spinner::new(MSG_DA_PAIR_REGISTRATION_SPINNER); + set_da_validator_pair( + shell, + ecosystem_config, + contracts_config.l1.chain_admin_addr, + chain_config.get_wallets_config()?.governor_private_key(), + contracts_config.l1.diamond_proxy_addr, + l1_da_validator_addr, + contracts_config.l2.l2_da_validator_addr, + &init_args.forge_args.clone(), + init_args.l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + if let Some(true) = chain_config.legacy_bridge { setup_legacy_bridge( shell, @@ -165,14 +178,18 @@ pub async fn init( } if init_args.deploy_paymaster { + let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); deploy_paymaster::deploy_paymaster( shell, chain_config, &mut contracts_config, init_args.forge_args.clone(), + None, + true, ) .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + spinner.finish(); } genesis(init_args.genesis_args.clone(), shell, chain_config) @@ -186,100 +203,6 @@ pub async fn init( Ok(()) } -async fn register_chain( - shell: &Shell, - forge_args: ForgeScriptArgs, - config: &EcosystemConfig, - chain_config: &ChainConfig, - contracts: &mut ContractsConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - - let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_foundry()) - .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url) - .with_broadcast(); - - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; - check_the_balance(&forge).await?; - forge.run(shell)?; - - let register_chain_output = RegisterChainOutput::read( - shell, - REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - contracts.set_chain_contracts(®ister_chain_output); - Ok(()) -} - -// Distribute eth to the chain wallets for localhost environment -pub async fn distribute_eth( - ecosystem_config: &EcosystemConfig, - chain_config: &ChainConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - if chain_config.wallet_creation == WalletCreation::Localhost - && ecosystem_config.l1_network == L1Network::Localhost - { - let spinner = Spinner::new(MSG_DISTRIBUTING_ETH_SPINNER); - let wallets = ecosystem_config.get_wallets()?; - let chain_wallets = chain_config.get_wallets_config()?; - let mut addresses = vec![ - chain_wallets.operator.address, - chain_wallets.blob_operator.address, - chain_wallets.governor.address, - ]; - if let Some(deployer) = chain_wallets.deployer { - addresses.push(deployer.address) - } - common::ethereum::distribute_eth( - wallets.operator, - addresses, - l1_rpc_url, - ecosystem_config.l1_network.chain_id(), - AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, - ) - .await?; - spinner.finish(); - } - Ok(()) -} - -pub async fn mint_base_token( - ecosystem_config: &EcosystemConfig, - chain_config: &ChainConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - if chain_config.wallet_creation == WalletCreation::Localhost - && ecosystem_config.l1_network == L1Network::Localhost - && chain_config.base_token != BaseToken::eth() - { - let spinner = Spinner::new(MSG_MINT_BASE_TOKEN_SPINNER); - let wallets = ecosystem_config.get_wallets()?; - let chain_wallets = chain_config.get_wallets_config()?; - let base_token = &chain_config.base_token; - let addresses = vec![wallets.governor.address, chain_wallets.governor.address]; - let amount = AMOUNT_FOR_DISTRIBUTION_TO_WALLETS * base_token.nominator as u128 - / base_token.denominator as u128; - common::ethereum::mint_token( - wallets.operator, - base_token.address, - addresses, - l1_rpc_url, - ecosystem_config.l1_network.chain_id(), - amount, - ) - .await?; - spinner.finish(); - } - Ok(()) -} - fn apply_port_offset(port_offset: u16, general_config: &mut GeneralConfig) -> anyhow::Result<()> { let Some(mut ports_config) = ports_config(general_config) else { bail!("Missing ports config"); diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs new file mode 100644 index 00000000000..7d8ad2ce6b7 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs @@ -0,0 +1,446 @@ +use anyhow::Context; +use clap::Parser; +use common::{ + config::global_config, + forge::{Forge, ForgeScriptArgs}, +}; +use config::{ + forge_interface::{ + gateway_preparation::{input::GatewayPreparationConfig, output::GatewayPreparationOutput}, + script_params::GATEWAY_PREPARATION, + }, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + EcosystemConfig, +}; +use ethers::{ + abi::parse_abi, + contract::BaseContract, + providers::{Http, Middleware, Provider}, + types::Bytes, + utils::hex, +}; +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use types::L1BatchCommitmentMode; +use xshell::Shell; +use zksync_basic_types::{settlement::SettlementMode, Address, H256, U256, U64}; +use zksync_config::configs::{eth_sender::PubdataSendingMode, gateway::GatewayChainConfig}; +use zksync_system_constants::L2_BRIDGEHUB_ADDRESS; + +use crate::{ + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_L1_SECRETS_MUST_BE_PRESENTED}, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +#[derive(Debug, Serialize, Deserialize, Parser)] +pub struct MigrateToGatewayArgs { + /// All ethereum environment related arguments + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, + + #[clap(long)] + pub gateway_chain_name: String, +} + +// TODO: use a different script here (i.e. make it have a different file) +lazy_static! { + static ref GATEWAY_PREPARATION_INTERFACE: BaseContract = BaseContract::from( + parse_abi(&[ + "function migrateChainToGateway(address chainAdmin,address accessControlRestriction,uint256 chainId) public", + "function setDAValidatorPair(address chainAdmin,address accessControlRestriction,uint256 chainId,address l1DAValidator,address l2DAValidator,address chainDiamondProxyOnGateway)", + "function supplyGatewayWallet(address addr, uint256 addr) public", + "function enableValidator(address chainAdmin,address accessControlRestriction,uint256 chainId,address validatorAddress,address gatewayValidatorTimelock) public", + "function grantWhitelist(address filtererProxy, address[] memory addr) public" + ]) + .unwrap(), + ); + + static ref BRDIGEHUB_INTERFACE: BaseContract = BaseContract::from( + parse_abi(&[ + "function getHyperchain(uint256 chainId) public returns (address)" + ]) + .unwrap(), + ); +} + +pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain_name = global_config().chain_name.clone(); + let chain_config = ecosystem_config + .load_chain(chain_name) + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let gateway_chain_config = ecosystem_config + .load_chain(Some(args.gateway_chain_name.clone())) + .context("Gateway not present")?; + let gateway_chain_id = gateway_chain_config.chain_id.0; + let gateway_gateway_config = gateway_chain_config + .get_gateway_config() + .context("Gateway config not present")?; + + let l1_url = chain_config + .get_secrets_config()? + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + + let genesis_config = chain_config.get_genesis_config()?; + + // Firstly, deploying gateway contracts + + let preparation_config_path = GATEWAY_PREPARATION.input(&ecosystem_config.link_to_code); + let preparation_config = GatewayPreparationConfig::new( + &gateway_chain_config, + &gateway_chain_config.get_contracts_config()?, + &ecosystem_config.get_contracts_config()?, + &gateway_gateway_config, + )?; + preparation_config.save(shell, preparation_config_path)?; + + let chain_contracts_config = chain_config.get_contracts_config().unwrap(); + let chain_admin_addr = chain_contracts_config.l1.chain_admin_addr; + let chain_access_control_restriction = + chain_contracts_config.l1.access_control_restriction_addr; + + println!("Whitelisting the chains' addresseses..."); + call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "grantWhitelist", + ( + gateway_chain_config + .get_contracts_config()? + .l1 + .transaction_filterer_addr, + vec![ + chain_config.get_wallets_config()?.governor.address, + chain_config.get_contracts_config()?.l1.chain_admin_addr, + ], + ), + ) + .unwrap(), + &ecosystem_config, + gateway_chain_config + .get_wallets_config()? + .governor_private_key(), + l1_url.clone(), + ) + .await?; + + println!("Migrating the chain..."); + + let hash = call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "migrateChainToGateway", + ( + chain_admin_addr, + chain_access_control_restriction, + U256::from(chain_config.chain_id.0), + ), + ) + .unwrap(), + &ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url.clone(), + ) + .await?; + + let gateway_provider = Provider::::try_from( + gateway_chain_config + .get_general_config() + .unwrap() + .api_config + .unwrap() + .web3_json_rpc + .http_url, + )?; + + if hash == H256::zero() { + println!("Chain already migrated!"); + } else { + println!("Migration started! Migration hash: {}", hex::encode(hash)); + await_for_tx_to_complete(&gateway_provider, hash).await?; + } + + // After the migration is done, there are a few things left to do: + // Let's grab the new diamond proxy address + + // TODO: maybe move to using a precalculated address, just like for EN + let chain_id = U256::from(chain_config.chain_id.0); + let contract = BRDIGEHUB_INTERFACE + .clone() + .into_contract(L2_BRIDGEHUB_ADDRESS, gateway_provider); + + let method = contract.method::("getHyperchain", chain_id)?; + + let new_diamond_proxy_address = method.call().await?; + + println!( + "New diamond proxy address: {}", + hex::encode(new_diamond_proxy_address.as_bytes()) + ); + + let chain_contracts_config = chain_config.get_contracts_config().unwrap(); + + let is_rollup = matches!( + genesis_config.l1_batch_commit_data_generator_mode, + L1BatchCommitmentMode::Rollup + ); + + let gateway_da_validator_address = if is_rollup { + gateway_gateway_config.relayed_sl_da_validator + } else { + gateway_gateway_config.validium_da_validator + }; + + println!("Setting DA validator pair..."); + let hash = call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "setDAValidatorPair", + ( + chain_admin_addr, + chain_access_control_restriction, + U256::from(chain_config.chain_id.0), + gateway_da_validator_address, + chain_contracts_config.l2.l2_da_validator_addr, + new_diamond_proxy_address, + ), + ) + .unwrap(), + &ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url.clone(), + ) + .await?; + println!( + "DA validator pair set! Hash: {}", + hex::encode(hash.as_bytes()) + ); + + let chain_secrets_config = chain_config.get_wallets_config().unwrap(); + + println!("Enabling validators..."); + let hash = call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "enableValidator", + ( + chain_admin_addr, + chain_access_control_restriction, + U256::from(chain_config.chain_id.0), + chain_secrets_config.blob_operator.address, + gateway_gateway_config.validator_timelock_addr, + ), + ) + .unwrap(), + &ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url.clone(), + ) + .await?; + println!( + "blob_operator enabled! Hash: {}", + hex::encode(hash.as_bytes()) + ); + + let hash = call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "supplyGatewayWallet", + ( + chain_secrets_config.blob_operator.address, + U256::from_dec_str("10000000000000000000").unwrap(), + ), + ) + .unwrap(), + &ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url.clone(), + ) + .await?; + println!( + "blob_operator supplied with 10 ETH! Hash: {}", + hex::encode(hash.as_bytes()) + ); + + let hash = call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "enableValidator", + ( + chain_admin_addr, + chain_access_control_restriction, + U256::from(chain_config.chain_id.0), + chain_secrets_config.operator.address, + gateway_gateway_config.validator_timelock_addr, + ), + ) + .unwrap(), + &ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url.clone(), + ) + .await?; + println!("operator enabled! Hash: {}", hex::encode(hash.as_bytes())); + + let hash = call_script( + shell, + args.forge_args.clone(), + &GATEWAY_PREPARATION_INTERFACE + .encode( + "supplyGatewayWallet", + ( + chain_secrets_config.operator.address, + U256::from_dec_str("10000000000000000000").unwrap(), + ), + ) + .unwrap(), + &ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + l1_url.clone(), + ) + .await?; + println!( + "operator supplied with 10 ETH! Hash: {}", + hex::encode(hash.as_bytes()) + ); + + let gateway_url = gateway_chain_config + .get_general_config() + .unwrap() + .api_config + .unwrap() + .web3_json_rpc + .http_url + .clone(); + + let mut chain_secrets_config = chain_config.get_secrets_config().unwrap(); + chain_secrets_config.l1.as_mut().unwrap().gateway_url = + Some(url::Url::parse(&gateway_url).unwrap().into()); + chain_secrets_config.save_with_base_path(shell, chain_config.configs.clone())?; + + let gateway_chain_config = GatewayChainConfig::from_gateway_and_chain_data( + &gateway_gateway_config, + new_diamond_proxy_address, + // TODO: for now we do not use a noraml chain admin + Address::zero(), + ); + gateway_chain_config.save_with_base_path(shell, chain_config.configs.clone())?; + + let mut general_config = chain_config.get_general_config().unwrap(); + + let eth_config = general_config.eth.as_mut().context("eth")?; + let api_config = general_config.api_config.as_mut().context("api config")?; + let state_keeper = general_config + .state_keeper_config + .as_mut() + .context("state_keeper")?; + + eth_config + .gas_adjuster + .as_mut() + .expect("gas_adjuster") + .settlement_mode = SettlementMode::Gateway; + if is_rollup { + // For rollups, new type of commitment should be used, but + // not for validium. + eth_config + .sender + .as_mut() + .expect("sender") + .pubdata_sending_mode = PubdataSendingMode::RelayedL2Calldata; + } + eth_config + .sender + .as_mut() + .context("sender")? + .wait_confirmations = Some(0); + // FIXME: do we need to move the following to be u64? + eth_config + .sender + .as_mut() + .expect("sender") + .max_aggregated_tx_gas = 4294967295; + api_config.web3_json_rpc.settlement_layer_url = Some(gateway_url); + // we need to ensure that this value is lower than in blob + state_keeper.max_pubdata_per_batch = 120_000; + + general_config.save_with_base_path(shell, chain_config.configs.clone())?; + let mut chain_genesis_config = chain_config.get_genesis_config().unwrap(); + chain_genesis_config.sl_chain_id = Some(gateway_chain_id.into()); + chain_genesis_config.save_with_base_path(shell, chain_config.configs.clone())?; + + Ok(()) +} + +async fn await_for_tx_to_complete( + gateway_provider: &Provider, + hash: H256, +) -> anyhow::Result<()> { + println!("Waiting for transaction to complete..."); + while gateway_provider + .get_transaction_receipt(hash) + .await? + .is_none() + { + tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; + } + + // We do not handle network errors + let receipt = gateway_provider + .get_transaction_receipt(hash) + .await? + .unwrap(); + + if receipt.status == Some(U64::from(1)) { + println!("Transaction completed successfully!"); + } else { + panic!("Transaction failed!"); + } + + Ok(()) +} + +async fn call_script( + shell: &Shell, + forge_args: ForgeScriptArgs, + data: &Bytes, + config: &EcosystemConfig, + private_key: Option, + l1_rpc_url: String, +) -> anyhow::Result { + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(&GATEWAY_PREPARATION.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url) + .with_broadcast() + .with_calldata(data); + + // Governor private key is required for this script + forge = fill_forge_private_key(forge, private_key)?; + check_the_balance(&forge).await?; + forge.run(shell)?; + + let gateway_preparation_script_output = + GatewayPreparationOutput::read(shell, GATEWAY_PREPARATION.output(&config.link_to_code))?; + + Ok(gateway_preparation_script_output.governance_l2_tx_hash) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 61a164c1655..e56fb5cf612 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -1,7 +1,9 @@ +use ::common::forge::ForgeScriptArgs; +use args::build_transactions::BuildTransactionsArgs; pub(crate) use args::create::ChainCreateArgsFinal; use clap::Subcommand; -use common::forge::ForgeScriptArgs; pub(crate) use create::create_chain_inner; +use migrate_to_gateway::MigrateToGatewayArgs; use xshell::Shell; use crate::commands::chain::{ @@ -10,11 +12,15 @@ use crate::commands::chain::{ }; pub(crate) mod args; +mod build_transactions; +mod common; +mod convert_to_gateway; mod create; pub mod deploy_l2_contracts; pub mod deploy_paymaster; pub mod genesis; pub(crate) mod init; +mod migrate_to_gateway; mod set_token_multiplier_setter; mod setup_legacy_bridge; @@ -22,6 +28,8 @@ mod setup_legacy_bridge; pub enum ChainCommands { /// Create a new chain, setting the necessary configurations for later initialization Create(ChainCreateArgs), + /// Create unsigned transactions for chain deployment + BuildTransactions(BuildTransactionsArgs), /// Initialize chain, deploying necessary contracts and performing on-chain operations Init(InitArgs), /// Run server genesis @@ -42,12 +50,17 @@ pub enum ChainCommands { DeployPaymaster(ForgeScriptArgs), /// Update Token Multiplier Setter address on L1 UpdateTokenMultiplierSetter(ForgeScriptArgs), + /// Prepare chain to be an eligible gateway + ConvertToGateway(ForgeScriptArgs), + /// Migrate chain to gateway + MigrateToGateway(MigrateToGatewayArgs), } pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<()> { match args { ChainCommands::Create(args) => create::run(args, shell), ChainCommands::Init(args) => init::run(args, shell).await, + ChainCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, ChainCommands::Genesis(args) => genesis::run(args, shell).await, ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await @@ -65,5 +78,7 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::UpdateTokenMultiplierSetter(args) => { set_token_multiplier_setter::run(args, shell).await } + ChainCommands::ConvertToGateway(args) => convert_to_gateway::run(args, shell).await, + ChainCommands::MigrateToGateway(args) => migrate_to_gateway::run(args, shell).await, } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs index f92391c22f4..4d012479dfb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs @@ -23,7 +23,7 @@ use crate::{ lazy_static! { static ref SET_TOKEN_MULTIPLIER_SETTER: BaseContract = BaseContract::from( parse_abi(&[ - "function chainSetTokenMultiplierSetter(address chainAdmin, address target) public" + "function chainSetTokenMultiplierSetter(address accessControlRestriction, address diamondProxyAddress, address setter) public" ]) .unwrap(), ); @@ -43,8 +43,8 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .l1_rpc_url .expose_str() .to_string(); - let token_multiplier_setter_address = ecosystem_config - .get_wallets() + let token_multiplier_setter_address = chain_config + .get_wallets_config() .context(MSG_WALLETS_CONFIG_MUST_BE_PRESENT)? .token_multiplier_setter .context(MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND)? @@ -55,7 +55,8 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { shell, &ecosystem_config, chain_config.get_wallets_config()?.governor_private_key(), - contracts_config.l1.chain_admin_addr, + contracts_config.l1.access_control_restriction_addr, + contracts_config.l1.diamond_proxy_addr, token_multiplier_setter_address, &args.clone(), l1_url, @@ -71,12 +72,14 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { Ok(()) } +#[allow(clippy::too_many_arguments)] pub async fn set_token_multiplier_setter( shell: &Shell, ecosystem_config: &EcosystemConfig, governor: Option, - chain_admin_address: Address, - target_address: Address, + access_control_restriction_address: Address, + diamond_proxy_address: Address, + new_setter_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, ) -> anyhow::Result<()> { @@ -89,10 +92,14 @@ pub async fn set_token_multiplier_setter( let calldata = SET_TOKEN_MULTIPLIER_SETTER .encode( "chainSetTokenMultiplierSetter", - (chain_admin_address, target_address), + ( + access_control_restriction_address, + diamond_proxy_address, + new_setter_address, + ), ) .unwrap(); - let foundry_contracts_path = ecosystem_config.path_to_foundry(); + let foundry_contracts_path = ecosystem_config.path_to_l1_foundry(); let forge = Forge::new(&foundry_contracts_path) .script( &ACCEPT_GOVERNANCE_SCRIPT_PARAMS.script(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zk_toolbox/crates/zk_inception/src/commands/containers.rs index 81d7970df83..9c11cc2e3ef 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/containers.rs @@ -6,9 +6,13 @@ use config::{EcosystemConfig, DOCKER_COMPOSE_FILE, ERA_OBSERVABILITY_COMPOSE_FIL use xshell::Shell; use super::args::ContainersArgs; -use crate::messages::{ - MSG_CONTAINERS_STARTED, MSG_FAILED_TO_FIND_ECOSYSTEM_ERR, MSG_RETRY_START_CONTAINERS_PROMPT, - MSG_STARTING_CONTAINERS, MSG_STARTING_DOCKER_CONTAINERS_SPINNER, +use crate::{ + commands::ecosystem::setup_observability, + messages::{ + MSG_CONTAINERS_STARTED, MSG_FAILED_TO_FIND_ECOSYSTEM_ERR, + MSG_RETRY_START_CONTAINERS_PROMPT, MSG_STARTING_CONTAINERS, + MSG_STARTING_DOCKER_CONTAINERS_SPINNER, + }, }; pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { @@ -20,6 +24,10 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { logger::info(MSG_STARTING_CONTAINERS); let spinner = Spinner::new(MSG_STARTING_DOCKER_CONTAINERS_SPINNER); + if args.observability { + setup_observability::run(shell)?; + } + start_containers(shell, args.observability)?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs index c74e4a4f765..7ba7d3cb40c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs @@ -5,12 +5,13 @@ use xshell::Shell; use super::releases::{get_releases_with_arch, Arch, Version}; use crate::messages::{ - MSG_ARCH_NOT_SUPPORTED_ERR, MSG_FETCHING_VYPER_RELEASES_SPINNER, - MSG_FETCHING_ZKSOLC_RELEASES_SPINNER, MSG_FETCHING_ZKVYPER_RELEASES_SPINNER, - MSG_FETCH_SOLC_RELEASES_SPINNER, MSG_GET_SOLC_RELEASES_ERR, MSG_GET_VYPER_RELEASES_ERR, - MSG_GET_ZKSOLC_RELEASES_ERR, MSG_GET_ZKVYPER_RELEASES_ERR, MSG_NO_VERSION_FOUND_ERR, - MSG_OS_NOT_SUPPORTED_ERR, MSG_SOLC_VERSION_PROMPT, MSG_VYPER_VERSION_PROMPT, - MSG_ZKSOLC_VERSION_PROMPT, MSG_ZKVYPER_VERSION_PROMPT, + MSG_ARCH_NOT_SUPPORTED_ERR, MSG_ERA_VM_SOLC_VERSION_PROMPT, + MSG_FETCHING_VYPER_RELEASES_SPINNER, MSG_FETCHING_ZKSOLC_RELEASES_SPINNER, + MSG_FETCHING_ZKVYPER_RELEASES_SPINNER, MSG_FETCH_ERA_VM_SOLC_RELEASES_SPINNER, + MSG_FETCH_SOLC_RELEASES_SPINNER, MSG_GET_ERA_VM_SOLC_RELEASES_ERR, MSG_GET_SOLC_RELEASES_ERR, + MSG_GET_VYPER_RELEASES_ERR, MSG_GET_ZKSOLC_RELEASES_ERR, MSG_GET_ZKVYPER_RELEASES_ERR, + MSG_NO_VERSION_FOUND_ERR, MSG_OS_NOT_SUPPORTED_ERR, MSG_SOLC_VERSION_PROMPT, + MSG_VYPER_VERSION_PROMPT, MSG_ZKSOLC_VERSION_PROMPT, MSG_ZKVYPER_VERSION_PROMPT, }; #[derive(Debug, Clone, Parser, Default)] @@ -24,9 +25,15 @@ pub struct InitContractVerifierArgs { /// Version of solc to install #[clap(long)] pub solc_version: Option, + /// Version of era vm solc to install + #[clap(long)] + pub era_vm_solc_version: Option, /// Version of vyper to install #[clap(long)] pub vyper_version: Option, + /// Install only provided compilers + #[clap(long, default_missing_value = "true")] + pub only: bool, } #[derive(Debug, Clone)] @@ -34,6 +41,7 @@ pub struct InitContractVerifierArgsFinal { pub zksolc_releases: Vec, pub zkvyper_releases: Vec, pub solc_releases: Vec, + pub era_vm_solc_releases: Vec, pub vyper_releases: Vec, } @@ -68,6 +76,14 @@ impl InitContractVerifierArgs { ) .context(MSG_GET_SOLC_RELEASES_ERR)?; + let era_vm_solc_releases = get_releases_with_arch( + shell, + "matter-labs/era-solidity", + arch, + MSG_FETCH_ERA_VM_SOLC_RELEASES_SPINNER, + ) + .context(MSG_GET_ERA_VM_SOLC_RELEASES_ERR)?; + let vyper_releases = get_releases_with_arch( shell, "vyperlang/vyper", @@ -81,33 +97,42 @@ impl InitContractVerifierArgs { zksolc_releases.clone(), MSG_ZKSOLC_VERSION_PROMPT, )?; - let zksolc_releases = get_releases_above_version(zksolc_releases, zksolc_version)?; + let zksolc_releases = get_final_releases(zksolc_releases, zksolc_version, self.only)?; let zkvyper_version = select_min_version( self.zkvyper_version, zkvyper_releases.clone(), MSG_ZKVYPER_VERSION_PROMPT, )?; - let zkvyper_releases = get_releases_above_version(zkvyper_releases, zkvyper_version)?; + let zkvyper_releases = get_final_releases(zkvyper_releases, zkvyper_version, self.only)?; let solc_version = select_min_version( self.solc_version, solc_releases.clone(), MSG_SOLC_VERSION_PROMPT, )?; - let solc_releases = get_releases_above_version(solc_releases, solc_version)?; + let solc_releases = get_final_releases(solc_releases, solc_version, self.only)?; + + let era_vm_solc_version = select_min_version( + self.era_vm_solc_version, + era_vm_solc_releases.clone(), + MSG_ERA_VM_SOLC_VERSION_PROMPT, + )?; + let era_vm_solc_releases = + get_final_releases(era_vm_solc_releases, era_vm_solc_version, self.only)?; let vyper_version = select_min_version( self.vyper_version, vyper_releases.clone(), MSG_VYPER_VERSION_PROMPT, )?; - let vyper_releases = get_releases_above_version(vyper_releases, vyper_version)?; + let vyper_releases = get_final_releases(vyper_releases, vyper_version, self.only)?; Ok(InitContractVerifierArgsFinal { zksolc_releases, zkvyper_releases, solc_releases, + era_vm_solc_releases, vyper_releases, }) } @@ -156,14 +181,20 @@ fn select_min_version( Ok(selected) } -fn get_releases_above_version( +fn get_final_releases( releases: Vec, version: Version, + only: bool, ) -> anyhow::Result> { let pos = releases .iter() .position(|r| r.version == version.version) .context(MSG_NO_VERSION_FOUND_ERR)?; - Ok(releases[..=pos].to_vec()) + let result = if only { + vec![releases[pos].clone()] + } else { + releases[..=pos].to_vec() + }; + Ok(result) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs index 5fd482ae5ff..f376a0d36ec 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs @@ -36,6 +36,14 @@ pub(crate) async fn run(shell: &Shell, args: InitContractVerifierArgs) -> anyhow "solc", )?; + download_binaries( + shell, + args.era_vm_solc_releases, + get_era_vm_solc_path, + &link_to_code, + "solc", + )?; + download_binaries( shell, args.vyper_releases, @@ -105,3 +113,9 @@ fn get_vyper_path(link_to_code: &Path, version: &str) -> PathBuf { fn get_solc_path(link_to_code: &Path, version: &str) -> PathBuf { link_to_code.join("etc/solc-bin/").join(version) } + +fn get_era_vm_solc_path(link_to_code: &Path, version: &str) -> PathBuf { + link_to_code + .join("etc/solc-bin/") + .join(format!("zkVM-{version}")) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs new file mode 100644 index 00000000000..697fa518b6e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs @@ -0,0 +1,68 @@ +use std::{path::PathBuf, str::FromStr}; + +use clap::Parser; +use common::{forge::ForgeScriptArgs, Prompt}; +use serde::{Deserialize, Serialize}; +use url::Url; +use zksync_basic_types::H160; + +use crate::{ + consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_SENDER_ADDRESS_PROMPT, + }, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct BuildTransactionsArgs { + /// Address of the transaction sender. + #[clap(long)] + pub sender: Option, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, + /// Output directory for the generated files. + #[arg(long, short)] + pub out: Option, + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, +} + +impl BuildTransactionsArgs { + pub fn fill_values_with_prompt(self) -> BuildTransactionsFinal { + let sender = self.sender.unwrap_or_else(|| { + Prompt::new(MSG_SENDER_ADDRESS_PROMPT) + .validate_with(|val: &String| -> Result<(), String> { + H160::from_str(val).map_or_else(|err| Err(err.to_string()), |_| Ok(())) + }) + .ask() + }); + + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_L1_RPC_URL_PROMPT) + .default(LOCAL_RPC_URL) + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }); + BuildTransactionsFinal { + sender, + out: self.out.unwrap_or(DEFAULT_UNSIGNED_TRANSACTIONS_DIR.into()), + forge_args: self.forge_args.clone(), + l1_rpc_url, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct BuildTransactionsFinal { + pub sender: String, + pub out: PathBuf, + pub forge_args: ForgeScriptArgs, + pub l1_rpc_url: String, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs index 8a6048a8643..c25eebda3d6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs @@ -1,3 +1,4 @@ +pub mod build_transactions; pub mod change_default; pub mod create; pub mod init; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs new file mode 100644 index 00000000000..ff713236097 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs @@ -0,0 +1,79 @@ +use anyhow::Context; +use common::{git, logger, spinner::Spinner}; +use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; +use xshell::Shell; + +use super::{ + args::build_transactions::BuildTransactionsArgs, + common::deploy_l1, + create_configs::create_initial_deployments_config, + utils::{build_system_contracts, install_yarn_dependencies}, +}; +use crate::messages::{ + MSG_BUILDING_ECOSYSTEM, MSG_BUILDING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_ECOSYSTEM_TXN_OUTRO, + MSG_ECOSYSTEM_TXN_OUT_PATH_INVALID_ERR, MSG_INTALLING_DEPS_SPINNER, + MSG_WRITING_OUTPUT_FILES_SPINNER, +}; + +const DEPLOY_TRANSACTIONS_FILE_SRC: &str = + "contracts/l1-contracts/broadcast/DeployL1.s.sol/9/dry-run/run-latest.json"; +const DEPLOY_TRANSACTIONS_FILE_DST: &str = "deploy-l1-txns.json"; + +const SCRIPT_CONFIG_FILE_SRC: &str = "contracts/l1-contracts/script-config/config-deploy-l1.toml"; +const SCRIPT_CONFIG_FILE_DST: &str = "config-deploy-l1.toml"; + +pub async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + git::submodule_update(shell, ecosystem_config.link_to_code.clone())?; + + let initial_deployment_config = match ecosystem_config.get_initial_deployment_config() { + Ok(config) => config, + Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, + }; + + logger::info(MSG_BUILDING_ECOSYSTEM); + + let spinner = Spinner::new(MSG_INTALLING_DEPS_SPINNER); + install_yarn_dependencies(shell, &ecosystem_config.link_to_code)?; + build_system_contracts(shell, &ecosystem_config.link_to_code)?; + spinner.finish(); + + let spinner = Spinner::new(MSG_BUILDING_ECOSYSTEM_CONTRACTS_SPINNER); + let contracts_config = deploy_l1( + shell, + &args.forge_args, + &ecosystem_config, + &initial_deployment_config, + &args.l1_rpc_url, + Some(args.sender), + false, + ) + .await?; + + contracts_config.save_with_base_path(shell, &args.out)?; + spinner.finish(); + + let spinner = Spinner::new(MSG_WRITING_OUTPUT_FILES_SPINNER); + shell + .create_dir(&args.out) + .context(MSG_ECOSYSTEM_TXN_OUT_PATH_INVALID_ERR)?; + + shell.copy_file( + ecosystem_config + .link_to_code + .join(DEPLOY_TRANSACTIONS_FILE_SRC), + args.out.join(DEPLOY_TRANSACTIONS_FILE_DST), + )?; + + shell.copy_file( + ecosystem_config.link_to_code.join(SCRIPT_CONFIG_FILE_SRC), + args.out.join(SCRIPT_CONFIG_FILE_DST), + )?; + spinner.finish(); + + logger::outro(MSG_ECOSYSTEM_TXN_OUTRO); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs new file mode 100644 index 00000000000..dbd487bff3c --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs @@ -0,0 +1,75 @@ +use anyhow::Context; +use common::forge::{Forge, ForgeScriptArgs}; +use config::{ + forge_interface::{ + deploy_ecosystem::{ + input::{DeployL1Config, InitialDeploymentConfig}, + output::DeployL1Output, + }, + script_params::DEPLOY_ECOSYSTEM_SCRIPT_PARAMS, + }, + traits::{ReadConfig, ReadConfigWithBasePath, SaveConfig}, + ContractsConfig, EcosystemConfig, GenesisConfig, +}; +use types::{L1Network, ProverMode}; +use xshell::Shell; + +use crate::utils::forge::{check_the_balance, fill_forge_private_key}; + +pub async fn deploy_l1( + shell: &Shell, + forge_args: &ForgeScriptArgs, + config: &EcosystemConfig, + initial_deployment_config: &InitialDeploymentConfig, + l1_rpc_url: &str, + sender: Option, + broadcast: bool, +) -> anyhow::Result { + let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); + let default_genesis_config = + GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) + .context("Context")?; + + let wallets_config = config.get_wallets()?; + // For deploying ecosystem we only need genesis batch params + let deploy_config = DeployL1Config::new( + &default_genesis_config, + &wallets_config, + initial_deployment_config, + config.era_chain_id, + config.prover_version == ProverMode::NoProofs, + ); + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(&DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url.to_string()); + + if config.l1_network == L1Network::Localhost { + // It's a kludge for reth, just because it doesn't behave properly with large amount of txs + forge = forge.with_slow(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; + } + + if broadcast { + forge = forge.with_broadcast(); + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let script_output = DeployL1Output::read( + shell, + DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.output(&config.link_to_code), + )?; + let mut contracts_config = ContractsConfig::default(); + contracts_config.update_from_l1_output(&script_output); + + Ok(contracts_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 7d34437ef2d..146160d2132 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -1,39 +1,34 @@ -use std::{ - path::{Path, PathBuf}, - str::FromStr, -}; +use std::{path::PathBuf, str::FromStr}; use anyhow::Context; use common::{ - cmd::Cmd, config::global_config, forge::{Forge, ForgeScriptArgs}, - git, logger, + git, + hardhat::{build_l1_contracts, build_l2_contracts}, + logger, spinner::Spinner, Prompt, }; use config::{ forge_interface::{ deploy_ecosystem::{ - input::{ - DeployErc20Config, DeployL1Config, Erc20DeploymentConfig, InitialDeploymentConfig, - }, - output::{DeployL1Output, ERC20Tokens}, + input::{DeployErc20Config, Erc20DeploymentConfig, InitialDeploymentConfig}, + output::ERC20Tokens, }, - script_params::{DEPLOY_ECOSYSTEM_SCRIPT_PARAMS, DEPLOY_ERC20_SCRIPT_PARAMS}, - }, - traits::{ - FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, - SaveConfigWithBasePath, + script_params::DEPLOY_ERC20_SCRIPT_PARAMS, }, - ContractsConfig, EcosystemConfig, GenesisConfig, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ContractsConfig, EcosystemConfig, }; -use types::{L1Network, ProverMode}; -use xshell::{cmd, Shell}; +use types::L1Network; +use xshell::Shell; use super::{ args::init::{EcosystemArgsFinal, EcosystemInitArgs, EcosystemInitArgsFinal}, + common::deploy_l1, setup_observability, + utils::{build_da_contracts, build_system_contracts, install_yarn_dependencies}, }; use crate::{ accept_ownership::{accept_admin, accept_owner}, @@ -144,7 +139,10 @@ async fn init( ) -> anyhow::Result { let spinner = Spinner::new(MSG_INTALLING_DEPS_SPINNER); install_yarn_dependencies(shell, &ecosystem_config.link_to_code)?; + build_da_contracts(shell, &ecosystem_config.link_to_code)?; + build_l1_contracts(shell, &ecosystem_config.link_to_code)?; build_system_contracts(shell, &ecosystem_config.link_to_code)?; + build_l2_contracts(shell, &ecosystem_config.link_to_code)?; spinner.finish(); let contracts = deploy_ecosystem( @@ -180,7 +178,7 @@ async fn deploy_erc20( ) .save(shell, deploy_config_path)?; - let mut forge = Forge::new(&ecosystem_config.path_to_foundry()) + let mut forge = Forge::new(&ecosystem_config.path_to_l1_foundry()) .script(&DEPLOY_ERC20_SCRIPT_PARAMS.script(), forge_args.clone()) .with_ffi() .with_rpc_url(l1_rpc_url) @@ -280,47 +278,19 @@ async fn deploy_ecosystem_inner( initial_deployment_config: &InitialDeploymentConfig, l1_rpc_url: String, ) -> anyhow::Result { - let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); - - let default_genesis_config = - GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; - - let wallets_config = config.get_wallets()?; - // For deploying ecosystem we only need genesis batch params - let deploy_config = DeployL1Config::new( - &default_genesis_config, - &wallets_config, - initial_deployment_config, - config.era_chain_id, - config.prover_version == ProverMode::NoProofs, - ); - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_foundry()) - .script(&DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url.clone()) - .with_broadcast(); - - if config.l1_network == L1Network::Localhost { - // It's a kludge for reth, just because it doesn't behave properly with large amount of txs - forge = forge.with_slow(); - } - - forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; - let spinner = Spinner::new(MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER); - check_the_balance(&forge).await?; - forge.run(shell)?; + let contracts_config = deploy_l1( + shell, + &forge_args, + config, + initial_deployment_config, + &l1_rpc_url, + None, + true, + ) + .await?; spinner.finish(); - let script_output = DeployL1Output::read( - shell, - DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.output(&config.link_to_code), - )?; - let mut contracts_config = ContractsConfig::default(); - contracts_config.update_from_l1_output(&script_output); accept_owner( shell, config, @@ -354,21 +324,26 @@ async fn deploy_ecosystem_inner( ) .await?; - accept_admin( + // Note, that there is no admin in L1 asset router, so we do + // need to accept it + + accept_owner( shell, config, - contracts_config.l1.chain_admin_addr, + contracts_config.l1.governance_addr, config.get_wallets()?.governor_private_key(), - contracts_config.bridges.shared.l1_address, + contracts_config + .ecosystem_contracts + .state_transition_proxy_addr, &forge_args, l1_rpc_url.clone(), ) .await?; - accept_owner( + accept_admin( shell, config, - contracts_config.l1.governance_addr, + contracts_config.l1.chain_admin_addr, config.get_wallets()?.governor_private_key(), contracts_config .ecosystem_contracts @@ -378,14 +353,14 @@ async fn deploy_ecosystem_inner( ) .await?; - accept_admin( + accept_owner( shell, config, - contracts_config.l1.chain_admin_addr, + contracts_config.l1.governance_addr, config.get_wallets()?.governor_private_key(), contracts_config .ecosystem_contracts - .state_transition_proxy_addr, + .stm_deployment_tracker_proxy_addr, &forge_args, l1_rpc_url.clone(), ) @@ -393,13 +368,3 @@ async fn deploy_ecosystem_inner( Ok(contracts_config) } - -fn install_yarn_dependencies(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code); - Ok(Cmd::new(cmd!(shell, "yarn install")).run()?) -} - -fn build_system_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code.join("contracts")); - Ok(Cmd::new(cmd!(shell, "yarn sc build")).run()?) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs index cb5195ccf93..3f4aa7565e1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs @@ -1,3 +1,4 @@ +use args::build_transactions::BuildTransactionsArgs; use clap::Subcommand; use xshell::Shell; @@ -6,11 +7,14 @@ use crate::commands::ecosystem::args::{ }; mod args; +pub(crate) mod build_transactions; mod change_default; +mod common; mod create; pub mod create_configs; pub(crate) mod init; -mod setup_observability; +pub(crate) mod setup_observability; +mod utils; #[derive(Subcommand, Debug)] #[allow(clippy::large_enum_variant)] @@ -18,6 +22,8 @@ pub enum EcosystemCommands { /// Create a new ecosystem and chain, /// setting necessary configurations for later initialization Create(EcosystemCreateArgs), + /// Create transactions to build ecosystem contracts + BuildTransactions(BuildTransactionsArgs), /// Initialize ecosystem and chain, /// deploying necessary contracts and performing on-chain operations Init(EcosystemInitArgs), @@ -33,6 +39,7 @@ pub enum EcosystemCommands { pub(crate) async fn run(shell: &Shell, args: EcosystemCommands) -> anyhow::Result<()> { match args { EcosystemCommands::Create(args) => create::run(args, shell), + EcosystemCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, EcosystemCommands::Init(args) => init::run(args, shell).await, EcosystemCommands::ChangeDefaultChain(args) => change_default::run(args, shell), EcosystemCommands::SetupObservability => setup_observability::run(shell), diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs new file mode 100644 index 00000000000..1403d737164 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs @@ -0,0 +1,19 @@ +use std::path::Path; + +use common::cmd::Cmd; +use xshell::{cmd, Shell}; + +pub(super) fn install_yarn_dependencies(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code); + Ok(Cmd::new(cmd!(shell, "yarn install")).run()?) +} + +pub(super) fn build_system_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts")); + Ok(Cmd::new(cmd!(shell, "yarn sc build")).run()?) +} + +pub(super) fn build_da_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts")); + Ok(Cmd::new(cmd!(shell, "yarn da build:foundry")).run()?) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zk_toolbox/crates/zk_inception/src/commands/server.rs index f96bc3aeebc..334609e93b0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/server.rs @@ -9,6 +9,7 @@ use config::{ GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, }; use xshell::Shell; +use zksync_config::configs::gateway::GatewayChainConfig; use crate::{ commands::args::RunServerArgs, @@ -51,6 +52,12 @@ fn run_server( } else { ServerMode::Normal }; + + let gateway_contracts = chain_config + .get_gateway_chain_config() + .ok() + .map(|_| GatewayChainConfig::get_path_with_base_path(&chain_config.configs)); + server .run( shell, @@ -60,6 +67,7 @@ fn run_server( GeneralConfig::get_path_with_base_path(&chain_config.configs), SecretsConfig::get_path_with_base_path(&chain_config.configs), ContractsConfig::get_path_with_base_path(&chain_config.configs), + gateway_contracts, vec![], ) .context(MSG_FAILED_TO_RUN_SERVER_ERR) diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zk_toolbox/crates/zk_inception/src/commands/update.rs index a05ecbe62e0..c140c3a4e9c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/update.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/update.rs @@ -1,7 +1,11 @@ use std::path::Path; use anyhow::{Context, Ok}; -use common::{git, logger, spinner::Spinner}; +use common::{ + git, logger, + spinner::Spinner, + yaml::{merge_yaml, ConfigDiff}, +}; use config::{ ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, ERA_OBSERBAVILITY_DIR, GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, @@ -12,38 +16,11 @@ use super::args::UpdateArgs; use crate::messages::{ msg_diff_contracts_config, msg_diff_genesis_config, msg_diff_secrets, msg_updating_chain, MSG_CHAIN_NOT_FOUND_ERR, MSG_DIFF_EN_CONFIG, MSG_DIFF_EN_GENERAL_CONFIG, - MSG_DIFF_GENERAL_CONFIG, MSG_INVALID_KEY_TYPE_ERR, MSG_PULLING_ZKSYNC_CODE_SPINNER, + MSG_DIFF_GENERAL_CONFIG, MSG_PULLING_ZKSYNC_CODE_SPINNER, MSG_UPDATING_ERA_OBSERVABILITY_SPINNER, MSG_UPDATING_SUBMODULES_SPINNER, MSG_UPDATING_ZKSYNC, MSG_ZKSYNC_UPDATED, }; -/// Holds the differences between two YAML configurations. -#[derive(Default)] -struct ConfigDiff { - /// Fields that have different values between the two configurations - /// This contains the new values - pub differing_values: serde_yaml::Mapping, - - /// Fields that are present in the new configuration but not in the old one. - pub new_fields: serde_yaml::Mapping, -} - -impl ConfigDiff { - fn print(&self, msg: &str, is_warning: bool) { - if self.new_fields.is_empty() { - return; - } - - if is_warning { - logger::warn(msg); - logger::warn(logger::object_to_string(&self.new_fields)); - } else { - logger::info(msg); - logger::info(logger::object_to_string(&self.new_fields)); - } - } -} - pub fn run(shell: &Shell, args: UpdateArgs) -> anyhow::Result<()> { logger::info(MSG_UPDATING_ZKSYNC); let ecosystem = EcosystemConfig::from_file(shell)?; @@ -127,7 +104,7 @@ fn update_config( ) -> anyhow::Result<()> { let original_config = serde_yaml::from_str(&shell.read_file(original_config_path)?)?; let mut chain_config = serde_yaml::from_str(&shell.read_file(chain_config_path)?)?; - let diff = merge_yaml(&mut chain_config, original_config)?; + let diff = merge_yaml(&mut chain_config, original_config, false)?; if save_config { save_updated_config(&shell, chain_config, chain_config_path, diff, msg)?; } else { @@ -202,298 +179,3 @@ fn update_chain( Ok(()) } - -fn merge_yaml_internal( - a: &mut serde_yaml::Value, - b: serde_yaml::Value, - current_key: String, - diff: &mut ConfigDiff, -) -> anyhow::Result<()> { - match (a, b) { - (serde_yaml::Value::Mapping(a), serde_yaml::Value::Mapping(b)) => { - for (key, value) in b { - let k = key.as_str().context(MSG_INVALID_KEY_TYPE_ERR)?.to_string(); - let current_key = if current_key.is_empty() { - k.clone() - } else { - format!("{}.{}", current_key, k) - }; - - if a.contains_key(&key) { - merge_yaml_internal(a.get_mut(&key).unwrap(), value, current_key, diff)?; - } else { - a.insert(key.clone(), value.clone()); - diff.new_fields.insert(current_key.into(), value); - } - } - } - (a, b) => { - if a != &b { - diff.differing_values.insert(current_key.into(), b); - } - } - } - Ok(()) -} - -fn merge_yaml(a: &mut serde_yaml::Value, b: serde_yaml::Value) -> anyhow::Result { - let mut diff = ConfigDiff::default(); - merge_yaml_internal(a, b, "".into(), &mut diff)?; - Ok(diff) -} - -#[cfg(test)] -mod tests { - #[test] - fn test_merge_yaml_both_are_equal_returns_no_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let diff = super::merge_yaml(&mut a, b).unwrap(); - assert!(diff.differing_values.is_empty()); - assert!(diff.new_fields.is_empty()); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_b_has_extra_field_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert!(diff.differing_values.is_empty()); - assert_eq!(diff.new_fields.len(), 1); - assert_eq!( - diff.new_fields.get::("key5".into()).unwrap(), - b.clone().get("key5").unwrap() - ); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_extra_field_no_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b).unwrap(); - assert!(diff.differing_values.is_empty()); - assert!(diff.new_fields.is_empty()); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_extra_field_and_b_has_extra_field_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key6: value6 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - key6: value6 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert_eq!(diff.differing_values.len(), 0); - assert_eq!(diff.new_fields.len(), 1); - assert_eq!( - diff.new_fields.get::("key6".into()).unwrap(), - b.clone().get("key6").unwrap() - ); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_different_value_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value5 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert_eq!(diff.differing_values.len(), 1); - assert_eq!( - diff.differing_values - .get::("key3.key4".into()) - .unwrap(), - b.get("key3").unwrap().get("key4").unwrap() - ); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_different_value_and_b_has_extra_field_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value5 - key5: value5 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert_eq!(diff.differing_values.len(), 1); - assert_eq!( - diff.differing_values - .get::("key3.key4".into()) - .unwrap(), - b.get("key3").unwrap().get("key4").unwrap() - ); - assert_eq!(diff.new_fields.len(), 1); - assert_eq!( - diff.new_fields.get::("key5".into()).unwrap(), - b.get("key5").unwrap() - ); - assert_eq!(a, expected); - } -} diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 22e570a5439..9f81847e333 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -8,6 +8,7 @@ pub const PROVER_MIGRATIONS: &str = "prover/crates/lib/prover_dal/migrations"; pub const PROVER_STORE_MAX_RETRIES: u16 = 10; pub const DEFAULT_CREDENTIALS_FILE: &str = "~/.config/gcloud/application_default_credentials.json"; pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; +pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; @@ -53,3 +54,7 @@ pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_ge pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; + +pub const PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG: &str = + "etc/env/file_based/overrides/only_real_proofs.yaml"; +pub const PATH_TO_VALIDIUM_OVERRIDE_CONFIG: &str = "etc/env/file_based/overrides/validium.yaml"; diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index f6f7d83dede..f1ca348df38 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -8,6 +8,7 @@ use common::{ config::{global_config, init_global_config, GlobalConfig}, error::log_error, init_prompt_theme, logger, + version::version_message, }; use config::EcosystemConfig; use xshell::Shell; @@ -26,7 +27,10 @@ mod messages; mod utils; #[derive(Parser, Debug)] -#[command(version, about)] +#[command( + version = version_message(env!("CARGO_PKG_VERSION")), + about +)] struct Inception { #[command(subcommand)] command: InceptionSubcommands, @@ -86,13 +90,16 @@ struct InceptionGlobalArgs { async fn main() -> anyhow::Result<()> { human_panic::setup_panic!(); + // We must parse arguments before printing the intro, because some autogenerated + // Clap commands (like `--version` would look odd otherwise). + let inception_args = Inception::parse(); + init_prompt_theme(); logger::new_empty_line(); logger::intro(); let shell = Shell::new().unwrap(); - let inception_args = Inception::parse(); init_global_config_inner(&shell, &inception_args.global)?; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index c5b77f63eba..aca0ab6967d 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -75,11 +75,13 @@ pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = "Minting base token to the governance addresses..."; pub(super) const MSG_INTALLING_DEPS_SPINNER: &str = "Installing and building dependencies..."; +pub(super) const MSG_PREPARING_CONFIG_SPINNER: &str = "Preparing config files..."; pub(super) const MSG_DEPLOYING_ERC20_SPINNER: &str = "Deploying ERC20 contracts..."; pub(super) const MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER: &str = "Deploying ecosystem contracts..."; pub(super) const MSG_REGISTERING_CHAIN_SPINNER: &str = "Registering chain..."; pub(super) const MSG_ACCEPTING_ADMIN_SPINNER: &str = "Accepting admin..."; +pub(super) const MSG_DA_PAIR_REGISTRATION_SPINNER: &str = "Registering DA pair..."; pub(super) const MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER: &str = "Updating token multiplier setter..."; pub(super) const MSG_TOKEN_MULTIPLIER_SETTER_UPDATED_TO: &str = @@ -121,6 +123,14 @@ pub(super) fn msg_chain_load_err(chain_name: &str) -> String { format!("Failed to load chain config for {chain_name}") } +/// Build ecosystem transactions related messages +pub(super) const MSG_SENDER_ADDRESS_PROMPT: &str = "What is the address of the transaction sender?"; +pub(super) const MSG_BUILDING_ECOSYSTEM: &str = "Building ecosystem transactions"; +pub(super) const MSG_BUILDING_ECOSYSTEM_CONTRACTS_SPINNER: &str = "Building ecosystem contracts..."; +pub(super) const MSG_WRITING_OUTPUT_FILES_SPINNER: &str = "Writing output files..."; +pub(super) const MSG_ECOSYSTEM_TXN_OUTRO: &str = "Transactions successfully built"; +pub(super) const MSG_ECOSYSTEM_TXN_OUT_PATH_INVALID_ERR: &str = "Invalid path"; + /// Chain create related messages pub(super) const MSG_PROVER_MODE_HELP: &str = "Prover options"; pub(super) const MSG_CHAIN_ID_HELP: &str = "Chain ID"; @@ -177,6 +187,7 @@ pub(super) const MSG_INITIALIZING_SERVER_DATABASE: &str = "Initializing server d pub(super) const MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR: &str = "Failed to drop server database"; pub(super) const MSG_INITIALIZING_PROVER_DATABASE: &str = "Initializing prover database"; pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop prover database"; + /// Chain update related messages pub(super) const MSG_WALLETS_CONFIG_MUST_BE_PRESENT: &str = "Wallets configuration must be present"; @@ -218,6 +229,14 @@ pub(super) const MSG_DEPLOYING_L2_CONTRACT_SPINNER: &str = "Deploying l2 contrac /// Chain deploy paymaster related messages pub(super) const MSG_DEPLOYING_PAYMASTER: &str = "Deploying paymaster"; +/// Chain build related messages +pub(super) const MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER: &str = + "Building chain registration transactions..."; +pub(super) const MSG_CHAIN_TXN_OUT_PATH_INVALID_ERR: &str = "Invalid path"; +pub(super) const MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG: &str = + "Missing contract.yaml, please be sure to run this command within initialized ecosystem"; +pub(super) const MSG_CHAIN_TRANSACTIONS_BUILT: &str = "Chain transactions successfully built"; + /// Run server related messages pub(super) const MSG_SERVER_COMPONENTS_HELP: &str = "Components of server to run"; pub(super) const MSG_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; @@ -396,10 +415,12 @@ pub(super) const MSG_GET_ZKSOLC_RELEASES_ERR: &str = "Failed to get zksolc relea pub(super) const MSG_FETCHING_ZKSOLC_RELEASES_SPINNER: &str = "Fetching zksolc releases..."; pub(super) const MSG_FETCHING_ZKVYPER_RELEASES_SPINNER: &str = "Fetching zkvyper releases..."; pub(super) const MSG_FETCH_SOLC_RELEASES_SPINNER: &str = "Fetching solc releases..."; +pub(super) const MSG_FETCH_ERA_VM_SOLC_RELEASES_SPINNER: &str = "Fetching era vm solc releases..."; pub(super) const MSG_FETCHING_VYPER_RELEASES_SPINNER: &str = "Fetching vyper releases..."; pub(super) const MSG_ZKSOLC_VERSION_PROMPT: &str = "Select the minimal zksolc version:"; pub(super) const MSG_ZKVYPER_VERSION_PROMPT: &str = "Select the minimal zkvyper version:"; pub(super) const MSG_SOLC_VERSION_PROMPT: &str = "Select the minimal solc version:"; +pub(super) const MSG_ERA_VM_SOLC_VERSION_PROMPT: &str = "Select the minimal era vm solc version:"; pub(super) const MSG_VYPER_VERSION_PROMPT: &str = "Select the minimal vyper version:"; pub(super) const MSG_NO_RELEASES_FOUND_ERR: &str = "No releases found for current architecture"; pub(super) const MSG_NO_VERSION_FOUND_ERR: &str = "No version found"; @@ -407,6 +428,7 @@ pub(super) const MSG_ARCH_NOT_SUPPORTED_ERR: &str = "Architecture not supported" pub(super) const MSG_OS_NOT_SUPPORTED_ERR: &str = "OS not supported"; pub(super) const MSG_GET_VYPER_RELEASES_ERR: &str = "Failed to get vyper releases"; pub(super) const MSG_GET_SOLC_RELEASES_ERR: &str = "Failed to get solc releases"; +pub(super) const MSG_GET_ERA_VM_SOLC_RELEASES_ERR: &str = "Failed to get era vm solc releases"; pub(super) const MSG_GET_ZKVYPER_RELEASES_ERR: &str = "Failed to get zkvyper releases"; pub(super) fn msg_binary_already_exists(name: &str, version: &str) -> String { @@ -433,7 +455,6 @@ pub(super) const MSG_DIFF_EN_CONFIG: &str = "Added the following fields to the external node config:"; pub(super) const MSG_DIFF_EN_GENERAL_CONFIG: &str = "Added the following fields to the external node generalconfig:"; -pub(super) const MSG_INVALID_KEY_TYPE_ERR: &str = "Invalid key type"; pub(super) const MSG_UPDATING_ERA_OBSERVABILITY_SPINNER: &str = "Updating era observability..."; pub(super) fn msg_diff_genesis_config(chain: &str) -> String { diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 06848334a6e..0b24bbe5cdc 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -95,6 +95,7 @@ pub fn get_genesis_specs( attesters: vec![attester], leader, registry_address: None, + seed_peers: [].into(), } } diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index d9c5c2196fa..4c450a73639 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -15,6 +15,7 @@ anyhow.workspace = true clap.workspace = true common.workspace = true config.workspace = true +chrono.workspace = true ethers.workspace = true human-panic.workspace = true strum.workspace = true @@ -27,3 +28,4 @@ clap-markdown.workspace = true futures.workspace = true types.workspace = true serde_yaml.workspace = true +zksync_basic_types.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs index 92c8a0f1086..06ee1347ea4 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs @@ -42,7 +42,8 @@ pub fn containers(shell: &Shell) -> anyhow::Result<()> { } pub fn contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { - let path_to_foundry = ecosystem_config.path_to_foundry(); + let path_to_foundry = ecosystem_config.path_to_l1_foundry(); + let contracts_path = ecosystem_config.link_to_code.join("contracts"); logger::info(MSG_CONTRACTS_CLEANING); shell .remove_path(path_to_foundry.join("broadcast")) @@ -62,18 +63,35 @@ pub fn contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::R shell .remove_path(path_to_foundry.join("typechain")) .context("typechain")?; + shell + .remove_path(contracts_path.join("l2-contracts/cache-forge")) + .context("l2-contracts/cache-forge")?; + shell + .remove_path(contracts_path.join("l2-contracts/zkout")) + .context("l2-contracts/zkout")?; + shell + .remove_path(contracts_path.join("system-contracts/cache-forge")) + .context("system-contracts/cache-forge")?; + shell + .remove_path(contracts_path.join("system-contracts/zkout")) + .context("system-contracts/zkout")?; + shell + .remove_path(contracts_path.join("system-contracts/contracts-preprocessed")) + .context("system-contracts/contracts-preprocessed")?; shell .remove_path(path_to_foundry.join("script-config")) .context("remove script-config")?; shell .create_dir(path_to_foundry.join("script-config")) .context("create script-config")?; + shell.write_file(path_to_foundry.join("script-config/.gitkeep"), "")?; shell .remove_path(path_to_foundry.join("script-out")) .context("remove script-out")?; shell .create_dir(path_to_foundry.join("script-out")) .context("create script-out")?; + shell.write_file(path_to_foundry.join("script-out/.gitkeep"), "")?; logger::info(MSG_CONTRACTS_CLEANING_FINISHED); Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs new file mode 100644 index 00000000000..3adecb36d06 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs @@ -0,0 +1,35 @@ +use anyhow::Context; +use clap::Parser; +use common::{config::global_config, logger, Prompt}; +use config::{override_config, EcosystemConfig}; +use xshell::Shell; + +use crate::messages::{ + msg_overriding_config, MSG_CHAIN_NOT_FOUND_ERR, MSG_OVERRIDE_CONFIG_PATH_HELP, + MSG_OVERRIDE_SUCCESS, MSG_OVERRRIDE_CONFIG_PATH_PROMPT, +}; + +#[derive(Debug, Parser)] +pub struct ConfigWriterArgs { + #[clap(long, short, help = MSG_OVERRIDE_CONFIG_PATH_HELP)] + pub path: Option, +} + +impl ConfigWriterArgs { + pub fn get_config_path(self) -> String { + self.path + .unwrap_or_else(|| Prompt::new(MSG_OVERRRIDE_CONFIG_PATH_PROMPT).ask()) + } +} + +pub fn run(shell: &Shell, args: ConfigWriterArgs) -> anyhow::Result<()> { + let path = args.get_config_path().into(); + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + logger::step(msg_overriding_config(chain.name.clone())); + override_config(shell, path, &chain)?; + logger::outro(MSG_OVERRIDE_SUCCESS); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs index 1238d7a87a0..b0f129f7dde 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs @@ -7,8 +7,9 @@ use xshell::{cmd, Shell}; use crate::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, - MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, - MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, + MSG_BUILDING_L1_DA_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, + MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, MSG_BUILDING_TEST_CONTRACTS_SPINNER, + MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L1_DA_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, MSG_NOTHING_TO_BUILD_MSG, }; @@ -17,6 +18,8 @@ use crate::messages::{ pub struct ContractsArgs { #[clap(long, alias = "l1", help = MSG_BUILD_L1_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] pub l1_contracts: Option, + #[clap(long, alias = "l1-da", help = MSG_BUILD_L1_DA_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub l1_da_contracts: Option, #[clap(long, alias = "l2", help = MSG_BUILD_L2_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] pub l2_contracts: Option, #[clap(long, alias = "sc", help = MSG_BUILD_SYSTEM_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] @@ -31,9 +34,11 @@ impl ContractsArgs { && self.l2_contracts.is_none() && self.system_contracts.is_none() && self.test_contracts.is_none() + && self.l1_da_contracts.is_none() { return vec![ ContractType::L1, + ContractType::L1DA, ContractType::L2, ContractType::SystemContracts, ContractType::TestContracts, @@ -45,6 +50,9 @@ impl ContractsArgs { if self.l1_contracts.unwrap_or(false) { contracts.push(ContractType::L1); } + if self.l1_da_contracts.unwrap_or(false) { + contracts.push(ContractType::L1DA); + } if self.l2_contracts.unwrap_or(false) { contracts.push(ContractType::L2); } @@ -62,6 +70,7 @@ impl ContractsArgs { #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ContractType { L1, + L1DA, L2, SystemContracts, TestContracts, @@ -78,10 +87,15 @@ impl ContractBuilder { fn new(ecosystem: &EcosystemConfig, contract_type: ContractType) -> Self { match contract_type { ContractType::L1 => Self { - dir: ecosystem.path_to_foundry(), + dir: ecosystem.path_to_l1_foundry(), cmd: "forge build".to_string(), msg: MSG_BUILDING_L1_CONTRACTS_SPINNER.to_string(), }, + ContractType::L1DA => Self { + dir: ecosystem.link_to_code.join("contracts/da-contracts"), + cmd: "forge build".to_string(), + msg: MSG_BUILDING_L1_DA_CONTRACTS_SPINNER.to_string(), + }, ContractType::L2 => Self { dir: ecosystem.link_to_code.clone(), cmd: "yarn l2-contracts build".to_string(), diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 875f2982c95..d3cb99f1e34 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1,9 +1,11 @@ pub mod clean; +pub mod config_writer; pub mod contracts; pub mod database; pub mod fmt; pub mod lint; pub(crate) mod lint_utils; pub mod prover; +pub mod send_transactions; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs new file mode 100644 index 00000000000..e3d4f220ff2 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs @@ -0,0 +1,69 @@ +use std::path::PathBuf; + +use clap::Parser; +use common::Prompt; +use url::Url; + +use crate::{ + defaults::LOCAL_RPC_URL, + messages::{ + MSG_INVALID_L1_RPC_URL_ERR, MSG_PROMPT_L1_RPC_URL, MSG_PROMPT_SECRET_KEY, + MSG_PROMPT_TRANSACTION_FILE, + }, +}; + +const DEFAULT_TRANSACTION_CONFIRMATIONS: usize = 2; + +#[derive(Debug, Parser)] +pub struct SendTransactionsArgs { + #[clap(long)] + pub file: Option, + #[clap(long)] + pub private_key: Option, + #[clap(long)] + pub l1_rpc_url: Option, + #[clap(long)] + pub confirmations: Option, +} + +#[derive(Debug)] +pub struct SendTransactionsArgsFinal { + pub file: PathBuf, + pub private_key: String, + pub l1_rpc_url: String, + pub confirmations: usize, +} + +impl SendTransactionsArgs { + pub fn fill_values_with_prompt(self) -> SendTransactionsArgsFinal { + let file = self + .file + .unwrap_or_else(|| Prompt::new(MSG_PROMPT_TRANSACTION_FILE).ask()); + + let private_key = self + .private_key + .unwrap_or_else(|| Prompt::new(MSG_PROMPT_SECRET_KEY).ask()); + + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_PROMPT_L1_RPC_URL) + .default(LOCAL_RPC_URL) + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_INVALID_L1_RPC_URL_ERR.to_string()) + }) + .ask() + }); + + let confirmations = self + .confirmations + .unwrap_or(DEFAULT_TRANSACTION_CONFIRMATIONS); + + SendTransactionsArgsFinal { + file, + private_key, + l1_rpc_url, + confirmations, + } + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs new file mode 100644 index 00000000000..79d8efc600e --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs @@ -0,0 +1,132 @@ +use std::{ + fs::{File, OpenOptions}, + io::{Read, Write}, + ops::Add, + path::PathBuf, + time::Duration, +}; + +use anyhow::Context; +use args::SendTransactionsArgs; +use chrono::Local; +use common::{ethereum::create_ethers_client, logger}; +use config::EcosystemConfig; +use ethers::{abi::Bytes, providers::Middleware, types::TransactionRequest, utils::hex}; +use serde::Deserialize; +use tokio::time::sleep; +use xshell::Shell; +use zksync_basic_types::{H160, U256}; + +use crate::{ + consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, + messages::{ + msg_send_txns_outro, MSG_FAILED_TO_SEND_TXN_ERR, MSG_UNABLE_TO_OPEN_FILE_ERR, + MSG_UNABLE_TO_READ_FILE_ERR, MSG_UNABLE_TO_READ_PARSE_JSON_ERR, + MSG_UNABLE_TO_WRITE_FILE_ERR, + }, +}; + +pub mod args; + +const MAX_ATTEMPTS: u32 = 3; + +#[derive(Deserialize)] +struct Transaction { + from: String, + gas: String, + input: String, +} + +#[derive(Deserialize)] +struct Txn { + #[serde(rename = "contractAddress")] + contract_address: String, + transaction: Transaction, +} + +#[derive(Deserialize)] +struct Txns { + transactions: Vec, +} + +pub async fn run(shell: &Shell, args: SendTransactionsArgs) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_id = ecosystem_config.l1_network.chain_id(); + + // Read the JSON file + let mut file = File::open(args.file).context(MSG_UNABLE_TO_OPEN_FILE_ERR)?; + let mut data = String::new(); + file.read_to_string(&mut data) + .context(MSG_UNABLE_TO_READ_FILE_ERR)?; + + // Parse the JSON file + let txns: Txns = serde_json::from_str(&data).context(MSG_UNABLE_TO_READ_PARSE_JSON_ERR)?; + + let timestamp = Local::now().format("%Y%m%d_%H%M%S").to_string(); + let log_file = ecosystem_config + .link_to_code + .join(DEFAULT_UNSIGNED_TRANSACTIONS_DIR) + .join(format!("{}_receipt.log", timestamp)); + + let client = create_ethers_client(args.private_key.parse()?, args.l1_rpc_url, Some(chain_id))?; + let mut nonce = client.get_transaction_count(client.address(), None).await?; + let gas_price = client.get_gas_price().await?; + + for txn in txns.transactions { + let to: H160 = txn.contract_address.parse()?; + let from: H160 = txn.transaction.from.parse()?; + let gas_limit: U256 = txn.transaction.gas.parse()?; + let input_data: Bytes = hex::decode(txn.transaction.input)?; + + let tx = TransactionRequest::new() + .to(to) + .from(from) + .gas(gas_limit) + .gas_price(gas_price) + .nonce(nonce) + .data(input_data) + .chain_id(chain_id); + + nonce = nonce.add(1); + + let mut attempts = 0; + let receipt = loop { + attempts += 1; + match client + .send_transaction(tx.clone(), None) + .await? + .confirmations(args.confirmations) + .interval(Duration::from_millis(30)) + .await + { + Ok(receipt) => break receipt, + Err(e) if attempts < MAX_ATTEMPTS => { + logger::info(format!("Attempt {} failed: {:?}", attempts, e).as_str()); + sleep(Duration::from_secs(1)).await; + continue; + } + Err(e) => return Err(e).context(MSG_FAILED_TO_SEND_TXN_ERR)?, + } + }; + + log_receipt(&log_file, format!("{:?}", receipt).as_str())?; + } + + logger::outro(msg_send_txns_outro(log_file.to_string_lossy().as_ref())); + + Ok(()) +} + +fn log_receipt(path: &PathBuf, receipt: &str) -> anyhow::Result<()> { + let mut file = OpenOptions::new() + .append(true) + .create(true) + .open(path) + .context(MSG_UNABLE_TO_OPEN_FILE_ERR)?; + + writeln!(file, "{}", receipt).context(MSG_UNABLE_TO_WRITE_FILE_ERR)?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs new file mode 100644 index 00000000000..a08b0404605 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs @@ -0,0 +1,34 @@ +use std::path::Path; + +use common::{cmd::Cmd, db::wait_for_db, logger}; +use xshell::{cmd, Shell}; + +use crate::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; + +pub async fn reset_test_databases( + shell: &Shell, + link_to_code: &Path, + dals: Vec, +) -> anyhow::Result<()> { + logger::info(MSG_RESETTING_TEST_DATABASES); + + Cmd::new(cmd!( + shell, + "docker compose -f docker-compose-unit-tests.yml down" + )) + .run()?; + Cmd::new(cmd!( + shell, + "docker compose -f docker-compose-unit-tests.yml up -d" + )) + .run()?; + + for dal in dals { + let mut url = dal.url.clone(); + url.set_path(""); + wait_for_db(&url, 3).await?; + database::reset::reset_database(shell, link_to_code, dal.clone()).await?; + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index fb3e1436acc..6a91a4579d7 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -42,7 +42,7 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let test_pattern = args.test_pattern; let mut command = cmd!( shell, - "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" + "yarn jest --forceExit --testTimeout 350000 -t {test_pattern...}" ) .env("CHAIN_NAME", ecosystem_config.current_chain()) .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index 712e2f75eef..7d2af71ae9c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -13,6 +13,7 @@ use crate::messages::{ mod args; mod build; +mod db; mod integration; mod l1_contracts; mod loadtest; @@ -57,7 +58,7 @@ pub async fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { TestCommands::Build => build::run(shell), TestCommands::Rust(args) => rust::run(shell, args).await, TestCommands::L1Contracts => l1_contracts::run(shell), - TestCommands::Prover => prover::run(shell), + TestCommands::Prover => prover::run(shell).await, TestCommands::Wallet => wallet::run(shell), TestCommands::Loadtest => loadtest::run(shell), } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs index 4e9c4fc2528..f48b359a935 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs @@ -1,15 +1,29 @@ +use std::str::FromStr; + use common::{cmd::Cmd, logger}; use config::EcosystemConfig; +use url::Url; use xshell::{cmd, Shell}; -use crate::messages::MSG_PROVER_TEST_SUCCESS; +use crate::{ + commands::test::db::reset_test_databases, + dals::{Dal, PROVER_DAL_PATH}, + defaults::TEST_DATABASE_PROVER_URL, + messages::MSG_PROVER_TEST_SUCCESS, +}; -pub fn run(shell: &Shell) -> anyhow::Result<()> { +pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; - let _dir_guard = shell.push_dir(ecosystem.link_to_code.join("prover")); + let dals = vec![Dal { + url: Url::from_str(TEST_DATABASE_PROVER_URL)?, + path: PROVER_DAL_PATH.to_string(), + }]; + reset_test_databases(shell, &ecosystem.link_to_code, dals).await?; + let _dir_guard = shell.push_dir(ecosystem.link_to_code.join("prover")); Cmd::new(cmd!(shell, "cargo test --release --workspace --locked")) .with_force_run() + .env("TEST_DATABASE_PROVER_URL", TEST_DATABASE_PROVER_URL) .run()?; logger::outro(MSG_PROVER_TEST_SUCCESS); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index 3ac331becc9..fdee03fe63e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -1,19 +1,19 @@ -use std::{path::Path, str::FromStr}; +use std::str::FromStr; use anyhow::Context; -use common::{cmd::Cmd, db::wait_for_db, logger}; +use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; use crate::{ - commands::database, + commands::test::db::reset_test_databases, dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ - MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, - MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, + MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_UNIT_TESTS_RUN_SUCCESS, + MSG_USING_CARGO_NEXTEST, }, }; @@ -78,31 +78,3 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); Ok(()) } - -async fn reset_test_databases( - shell: &Shell, - link_to_code: &Path, - dals: Vec, -) -> anyhow::Result<()> { - logger::info(MSG_RESETTING_TEST_DATABASES); - - Cmd::new(cmd!( - shell, - "docker compose -f docker-compose-unit-tests.yml down" - )) - .run()?; - Cmd::new(cmd!( - shell, - "docker compose -f docker-compose-unit-tests.yml up -d" - )) - .run()?; - - for dal in dals { - let mut url = dal.url.clone(); - url.set_path(""); - wait_for_db(&url, 3).await?; - database::reset::reset_database(shell, link_to_code, dal.clone()).await?; - } - - Ok(()) -} diff --git a/zk_toolbox/crates/zk_supervisor/src/consts.rs b/zk_toolbox/crates/zk_supervisor/src/consts.rs new file mode 100644 index 00000000000..66f00c7553b --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/consts.rs @@ -0,0 +1 @@ +pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zk_toolbox/crates/zk_supervisor/src/defaults.rs index f4bae739c2d..d9325402f53 100644 --- a/zk_toolbox/crates/zk_supervisor/src/defaults.rs +++ b/zk_toolbox/crates/zk_supervisor/src/defaults.rs @@ -2,3 +2,4 @@ pub const TEST_DATABASE_SERVER_URL: &str = "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test"; pub const TEST_DATABASE_PROVER_URL: &str = "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test"; +pub const LOCAL_RPC_URL: &str = "http://127.0.0.1:8545"; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 32aefa7fcad..242affd8a71 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,6 +1,7 @@ use clap::{Parser, Subcommand}; use commands::{ - contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, prover::ProverCommands, + config_writer::ConfigWriterArgs, contracts::ContractsArgs, database::DatabaseCommands, + lint::LintArgs, prover::ProverCommands, send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, }; use common::{ @@ -8,24 +9,30 @@ use common::{ config::{global_config, init_global_config, GlobalConfig}, error::log_error, init_prompt_theme, logger, + version::version_message, }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, - MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, - MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, + MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; mod commands; +mod consts; mod dals; mod defaults; mod messages; #[derive(Parser, Debug)] -#[command(version, about)] +#[command( + version = version_message(env!("CARGO_PKG_VERSION")), + about +)] struct Supervisor { #[command(subcommand)] command: SupervisorSubcommands, @@ -53,6 +60,10 @@ enum SupervisorSubcommands { Prover(ProverCommands), #[command(about = MSG_CONTRACTS_ABOUT)] Contracts(ContractsArgs), + #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] + ConfigWriter(ConfigWriterArgs), + #[command(about = MSG_SEND_TXNS_ABOUT)] + SendTransactions(SendTransactionsArgs), } #[derive(Parser, Debug)] @@ -73,14 +84,16 @@ struct SupervisorGlobalArgs { async fn main() -> anyhow::Result<()> { human_panic::setup_panic!(); + // We must parse arguments before printing the intro, because some autogenerated + // Clap commands (like `--version` would look odd otherwise). + let args = Supervisor::parse(); + init_prompt_theme(); logger::new_empty_line(); logger::intro(); let shell = Shell::new().unwrap(); - let args = Supervisor::parse(); - init_global_config_inner(&shell, &args.global)?; if !global_config().ignore_prerequisites { @@ -111,6 +124,10 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, + SupervisorSubcommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, + SupervisorSubcommands::SendTransactions(args) => { + commands::send_transactions::run(shell, args).await? + } } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 311a6e11c32..269f9ecc102 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -14,6 +14,7 @@ pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; pub(super) const MSG_SUBCOMMAND_LINT_ABOUT: &str = "Lint code"; pub(super) const MSG_CONTRACTS_ABOUT: &str = "Build contracts"; +pub(super) const MSG_CONFIG_WRITER_ABOUT: &str = "Overwrite general config"; pub(super) const MSG_SUBCOMMAND_FMT_ABOUT: &str = "Format code"; @@ -112,10 +113,12 @@ pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; +pub(super) const MSG_BUILDING_L1_DA_CONTRACTS_SPINNER: &str = "Building L1 DA contracts.."; pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; pub(super) const MSG_BUILDING_TEST_CONTRACTS_SPINNER: &str = "Building test contracts.."; pub(super) const MSG_BUILDING_CONTRACTS_SUCCESS: &str = "Contracts built successfully"; pub(super) const MSG_BUILD_L1_CONTRACTS_HELP: &str = "Build L1 contracts"; +pub(super) const MSG_BUILD_L1_DA_CONTRACTS_HELP: &str = "Build L1 DA contracts"; pub(super) const MSG_BUILD_L2_CONTRACTS_HELP: &str = "Build L2 contracts"; pub(super) const MSG_BUILD_SYSTEM_CONTRACTS_HELP: &str = "Build system contracts"; pub(super) const MSG_BUILD_TEST_CONTRACTS_HELP: &str = "Build test contracts"; @@ -202,3 +205,28 @@ pub(super) const MSG_DESERIALIZE_TEST_WALLETS_ERR: &str = "Impossible to deseria pub(super) const MSG_WALLETS_TEST_SUCCESS: &str = "Wallets test success"; pub(super) const MSG_LOADTEST_ABOUT: &str = "Run loadtest"; + +pub(super) const MSG_OVERRIDE_CONFIG_PATH_HELP: &str = "Path to the config file to override"; +pub(super) const MSG_OVERRRIDE_CONFIG_PATH_PROMPT: &str = + "Provide path to the config file to override"; +pub(super) const MSG_OVERRIDE_SUCCESS: &str = "Config was overridden successfully"; + +pub(super) fn msg_overriding_config(chain: String) -> String { + format!("Overriding general config for chain {}", chain) +} + +// Send transactions related messages +pub(super) const MSG_SEND_TXNS_ABOUT: &str = "Send transactions from file"; +pub(super) const MSG_PROMPT_TRANSACTION_FILE: &str = "Path to transactions file"; +pub(super) const MSG_PROMPT_SECRET_KEY: &str = "Secret key of the sender"; +pub(super) const MSG_PROMPT_L1_RPC_URL: &str = "L1 RPC URL"; +pub(super) fn msg_send_txns_outro(log_file: &str) -> String { + format!("Transaction receipts logged to: {}", log_file) +} + +pub(super) const MSG_UNABLE_TO_OPEN_FILE_ERR: &str = "Unable to open file"; +pub(super) const MSG_UNABLE_TO_READ_FILE_ERR: &str = "Unable to read file"; +pub(super) const MSG_UNABLE_TO_WRITE_FILE_ERR: &str = "Unable to write data to file"; +pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON"; +pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; +pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL";