From 9b63817020e8264accf8dff8845e12b3afcc85da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Tue, 24 Sep 2024 04:40:52 -0300 Subject: [PATCH 01/16] feat(zk_toolbox): Add options for selective installation of zk_inception and zk_supervisor (#2934) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Introduced `--inception` and `--supervisor` options to allow users to specify which binaries to install. - Removed the `ZKUP_SKIP_ZK_SUPERVISOR` option. ## Why ❔ Make it clear what to install, focusing on what to include rather what to exclude --- zk_toolbox/zkup/README.md | 11 ++++++---- zk_toolbox/zkup/zkup | 46 +++++++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/zk_toolbox/zkup/README.md b/zk_toolbox/zkup/README.md index 002ca46b5f6..d6e3e634688 100644 --- a/zk_toolbox/zkup/README.md +++ b/zk_toolbox/zkup/README.md @@ -39,8 +39,11 @@ The `zkup` script provides various options for installing `zk_toolbox`: - `-v, --version ` Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. -- `--skip-zk-supervisor` - Skip the installation of the `zk_supervisor` binary. +- `--inception` + Installs `zk_inception` from the repository. By default, `zkup` installs `zk_inception` and `zk_supervisor`. + +- `--supervisor` + Installs `zk_supervisor` from the repository. ### Local Installation @@ -66,8 +69,8 @@ different repository, branch, commit, or version using the respective options. I zkup --repo matter-labs/zksync-era --version 0.1.1 ``` -**Install from a local path, skipping `zk_supervisor`:** +**Install from a local path, only installing `zk_inception`:** ```bash -zkup --path /path/to/local/zk_toolbox --skip-zk-supervisor +zkup --path /path/to/local/zk_toolbox --inception ``` diff --git a/zk_toolbox/zkup/zkup b/zk_toolbox/zkup/zkup index 16637c35e6a..e6ca1748738 100755 --- a/zk_toolbox/zkup/zkup +++ b/zk_toolbox/zkup/zkup @@ -5,10 +5,11 @@ BASE_DIR=${XDG_CONFIG_HOME:-$HOME} ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} ZKT_BIN_DIR="$ZKT_DIR/bin" -ZKUP_SKIP_ZK_SUPERVISOR=0 +ZKUP_INSTALL_SUPERVISOR=0 +ZKUP_INSTALL_INCEPTION=0 ZKUP_ALIAS=0 -BINS=(zk_inception zk_supervisor) +BINS=() main() { parse_args "$@" @@ -18,6 +19,8 @@ main() { check_prerequisites mkdir -p "$ZKT_BIN_DIR" + set_bins + if [ -n "$ZKUP_PATH" ]; then install_local else @@ -84,7 +87,8 @@ parse_args() { shift ZKUP_VERSION=$1 ;; - --skip-zk-supervisor) ZKUP_SKIP_ZK_SUPERVISOR=1 ;; + --inception) ZKUP_INSTALL_INCEPTION=1 ;; + --supervisor) ZKUP_INSTALL_SUPERVISOR=1 ;; -a | --alias) ZKUP_ALIAS=1 ;; -h | --help) usage @@ -113,15 +117,31 @@ Options: -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. -a, --alias Create aliases zki and zks for zk_inception and zk_supervisor binaries. - --skip-zk-supervisor Skip installation of the zk_supervisor binary. + --inception Installs the zk_inception binary. Default is to install both zk_inception and zk_supervisor binaries. + --supervisor Installs the zk_supervisor binary. Default is to install both zk_inception and zk_supervisor binaries. -h, --help Show this help message and exit. Examples: $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 - $(basename "$0") --path /path/to/local/zk_toolbox --skip-zk-supervisor + $(basename "$0") --path /path/to/local/zk_toolbox --inception EOF } +set_bins() { + if [ $ZKUP_INSTALL_INCEPTION -eq 1 ]; then + BINS+=(zk_inception) + fi + + if [ $ZKUP_INSTALL_SUPERVISOR -eq 1 ]; then + BINS+=(zk_supervisor) + fi + + # Installs both binaries if not option is provided + if [ ${#BINS[@]} -eq 0 ]; then + BINS=(zk_inception zk_supervisor) + fi +} + install_local() { if [ ! -d "$ZKUP_PATH/zk_toolbox" ]; then err "Path $ZKUP_PATH does not contain zk_toolbox" @@ -135,10 +155,6 @@ install_local() { say "Installing zk_toolbox from $ZKUP_PATH" ensure cd "$ZKUP_PATH"/zk_toolbox - if [ $ZKUP_SKIP_ZK_SUPERVISOR -eq 1 ]; then - BINS=(zk_inception) - fi - for bin in "${BINS[@]}"; do say "Installing $bin" ensure cargo install --root $ZKT_DIR --path ./crates/$bin --force @@ -154,10 +170,6 @@ install_from_repo() { say "Installing zk_toolbox from $ZKUP_REPO" - if [ $ZKUP_SKIP_ZK_SUPERVISOR -eq 1 ]; then - BINS=(zk_inception) - fi - if [ -n "$ZKUP_VERSION" ]; then if [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_BRANCH" ]; then warn "Ignoring --commit and --branch arguments when installing by version" @@ -176,10 +188,12 @@ install_from_repo() { } create_alias() { - say "Creating alias 'zki' for zk_inception" - ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" + if [[ "${BINS[@]}" =~ "zk_inception" ]]; then + say "Creating alias 'zki' for zk_inception" + ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" + fi - if [ $ZKUP_SKIP_ZK_SUPERVISOR -eq 0 ]; then + if [[ "${BINS[@]}" =~ "zk_supervisor" ]]; then say "Creating alias 'zks' for zk_supervisor" ensure ln -sf "$ZKT_BIN_DIR/zk_supervisor" "$ZKT_BIN_DIR/zks" fi From 70266373f67986406e23f0015469e7a85697a900 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Tue, 24 Sep 2024 06:17:54 -0300 Subject: [PATCH 02/16] feat(zk_toolbox): Add zk toolbox unit tests to zks test (#2935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add zk toolbox unit tests to zks test --- zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index fdee03fe63e..7011e0f0f87 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -75,6 +75,12 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; + // Run unit tests for zk_toolbox + let _dir_guard = shell.push_dir(link_to_code.join("zk_toolbox")); + Cmd::new(cmd!(shell, "cargo nextest run --release")) + .with_force_run() + .run()?; + logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); Ok(()) } From 1095ae7bf9df8fc3f191c1d35dd584bebbc5e59f Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Tue, 24 Sep 2024 13:13:11 +0200 Subject: [PATCH 03/16] feat(circuit-prover): Add circuit_prover Dockerfile and build rules (#2939) This will enable us to build images for circuit proving (new version). Not tested. --------- Co-authored-by: Yury Akudovich --- .github/workflows/build-docker-from-tag.yml | 4 +- ...ri-gpu-gar-and-circuit-prover-gpu-gar.yml} | 46 +++++++++++---- .github/workflows/build-prover-template.yml | 1 + .github/workflows/release-test-stage.yml | 4 +- docker/circuit-prover-gpu-gar/Dockerfile | 15 +++++ docker/circuit-prover-gpu/Dockerfile | 57 +++++++++++++++++++ infrastructure/zk/src/docker.ts | 4 +- 7 files changed, 114 insertions(+), 17 deletions(-) rename .github/workflows/{build-prover-fri-gpu-gar.yml => build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml} (65%) create mode 100644 docker/circuit-prover-gpu-gar/Dockerfile create mode 100644 docker/circuit-prover-gpu/Dockerfile diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 72d428b2def..0141bd82565 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -108,10 +108,10 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - build-gar-prover-fri-gpu: + build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU needs: [ setup, build-push-prover-images ] - uses: ./.github/workflows/build-prover-fri-gpu-gar.yml + uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: contains(github.ref_name, 'prover') with: setup_keys_id: ${{ needs.setup.outputs.prover_fri_gpu_key_id }} diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml similarity index 65% rename from .github/workflows/build-prover-fri-gpu-gar.yml rename to .github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index c0ea060b07e..031677cf9b6 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -1,4 +1,4 @@ -name: Build Prover FRI GPU with builtin setup data +name: Build Prover FRI GPU & Circuit Prover GPU with builtin setup data on: workflow_call: @@ -17,7 +17,7 @@ on: type: string jobs: - build-gar-prover-fri-gpu: + build: name: Build prover FRI GPU GAR runs-on: [matterlabs-ci-runner-high-performance] steps: @@ -39,7 +39,15 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - - name: Build and push + - name: Login to Asia GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + + - name: Login to Europe GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + + - name: Build and push prover-gpu-fri-gar uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/prover-gpu-fri-gar @@ -49,22 +57,36 @@ jobs: tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Login to Asia GAR + - name: Build and push prover-gpu-fri-gar to Asia GAR run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Build and push to Asia GAR + - name: Build and push prover-gpu-fri-gar to Europe GAR run: | docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Login to Europe GAR + - name: Build and push circuit-prover-gpu-gar + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: docker/circuit-prover-gpu-gar + build-args: | + PROVER_IMAGE=${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + push: true + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Build and push circuit-prover-gpu-gar to Asia GAR run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Build and push to Europe GAR + - name: Build and push circuit-prover-gpu-gar to Europe GAR run: | docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 4f3cad7f1d0..d6ec61114c7 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -52,6 +52,7 @@ jobs: - witness-generator - prover-gpu-fri - witness-vector-generator + - circuit-prover-gpu - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 81a5a06f0ea..988426d0cb6 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -120,10 +120,10 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - build-gar-prover-fri-gpu: + build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU needs: [ setup, build-push-prover-images ] - uses: ./.github/workflows/build-prover-fri-gpu-gar.yml + uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: setup_keys_id: ${{ needs.setup.outputs.prover_fri_gpu_key_id }} diff --git a/docker/circuit-prover-gpu-gar/Dockerfile b/docker/circuit-prover-gpu-gar/Dockerfile new file mode 100644 index 00000000000..3dfc6bdf9ad --- /dev/null +++ b/docker/circuit-prover-gpu-gar/Dockerfile @@ -0,0 +1,15 @@ +ARG PROVER_IMAGE +FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu:2.0-$PROVER_IMAGE as prover + +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 as app + +# HACK copying to root is the only way to make Docker layer caching work for these files for some reason +COPY *.bin / + +RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* + +# copy finalization hints required for assembly generation +COPY --from=prover prover/data/keys/ /prover/data/keys/ +COPY --from=prover /usr/bin/zksync_circuit_prover /usr/bin/ + +ENTRYPOINT ["zksync_circuit_prover"] diff --git a/docker/circuit-prover-gpu/Dockerfile b/docker/circuit-prover-gpu/Dockerfile new file mode 100644 index 00000000000..8e193e20589 --- /dev/null +++ b/docker/circuit-prover-gpu/Dockerfile @@ -0,0 +1,57 @@ +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder + +ARG DEBIAN_FRONTEND=noninteractive + +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + +RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ + pkg-config build-essential libclang-dev && \ + rm -rf /var/lib/apt/lists/* + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ + rustup install nightly-2024-08-01 && \ + rustup default nightly-2024-08-01 + +RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ + chmod +x cmake-3.24.2-linux-x86_64.sh && \ + ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local + +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_circuit_prover + +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 + +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +# copy finalization hints required for assembly generation +COPY prover/data/keys/ /prover/data/keys/ + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_circuit_prover /usr/bin/ + +ENTRYPOINT ["zksync_circuit_prover"] diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 035061a8ed0..063777a671b 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -11,6 +11,7 @@ const IMAGES = [ 'witness-generator', 'prover-gpu-fri', 'witness-vector-generator', + 'circuit-prover-gpu', 'prover-fri-gateway', 'prover-job-monitor', 'proof-fri-gpu-compressor', @@ -87,7 +88,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'prover-fri-gateway', 'prover-gpu-fri', 'witness-generator', - 'witness-vector-generator' + 'witness-vector-generator', + 'circuit-prover-gpu' ].includes(image) ) { tagList.push(`2.0-${protocolVersionTag}-${imageTagShaTS}`, `${protocolVersionTag}-${imageTagShaTS}`); From 72e22184788d080ea53441007f176be4475982aa Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:12:13 +0300 Subject: [PATCH 04/16] feat(contract-verifier): add new compilers (#2947) --- docker/contract-verifier/Dockerfile | 2 +- docker/contract-verifier/install-all-solc.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7fcc695bf70..7943dae835a 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ diff --git a/docker/contract-verifier/install-all-solc.sh b/docker/contract-verifier/install-all-solc.sh index bc7cec143cc..4fe992f8357 100755 --- a/docker/contract-verifier/install-all-solc.sh +++ b/docker/contract-verifier/install-all-solc.sh @@ -26,7 +26,7 @@ done # Download zkVM solc list=( "0.8.25-1.0.0" "0.8.24-1.0.0" "0.8.23-1.0.0" "0.8.22-1.0.0" "0.8.21-1.0.0" "0.8.20-1.0.0" "0.8.19-1.0.0" "0.8.18-1.0.0" "0.8.17-1.0.0" "0.8.16-1.0.0" "0.8.15-1.0.0" "0.8.14-1.0.0" "0.8.13-1.0.0" "0.8.12-1.0.0" "0.8.11-1.0.0" "0.8.10-1.0.0" "0.8.9-1.0.0" "0.8.8-1.0.0" "0.8.7-1.0.0" "0.8.6-1.0.0" "0.8.5-1.0.0" "0.8.4-1.0.0" "0.8.3-1.0.0" "0.8.2-1.0.0" "0.8.1-1.0.0" "0.8.0-1.0.0" "0.7.6-1.0.0" "0.7.5-1.0.0" "0.7.4-1.0.0" "0.7.3-1.0.0" "0.7.2-1.0.0" "0.7.1-1.0.0" "0.7.0-1.0.0" "0.6.12-1.0.0" "0.6.11-1.0.0" "0.6.10-1.0.0" "0.6.9-1.0.0" "0.6.8-1.0.0" "0.6.7-1.0.0" "0.6.6-1.0.0" "0.6.5-1.0.0" "0.6.4-1.0.0" "0.6.3-1.0.0" "0.6.2-1.0.0" "0.6.1-1.0.0" "0.6.0-1.0.0" "0.5.17-1.0.0" "0.5.16-1.0.0" "0.5.15-1.0.0" "0.5.14-1.0.0" "0.5.13-1.0.0" "0.5.12-1.0.0" "0.5.11-1.0.0" "0.5.10-1.0.0" "0.5.9-1.0.0" "0.5.8-1.0.0" "0.5.7-1.0.0" "0.5.6-1.0.0" "0.5.5-1.0.0" "0.5.4-1.0.0" "0.5.3-1.0.0" "0.5.2-1.0.0" "0.5.1-1.0.0" "0.5.0-1.0.0" "0.4.26-1.0.0" "0.4.25-1.0.0" "0.4.24-1.0.0" "0.4.23-1.0.0" "0.4.22-1.0.0" "0.4.21-1.0.0" "0.4.20-1.0.0" "0.4.19-1.0.0" "0.4.18-1.0.0" "0.4.17-1.0.0" "0.4.16-1.0.0" "0.4.15-1.0.0" "0.4.14-1.0.0" "0.4.13-1.0.0" "0.4.12-1.0.0" - "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" + "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" ) for version in ${list[@]}; do From 934048091023f7f2b1dbbdedb0c0b793e6fe32cf Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Tue, 24 Sep 2024 18:07:00 +0200 Subject: [PATCH 05/16] ci: Copy setup_keys to docker/circuit-prover-gpu-gar (#2951) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Copy setup keys to be available for docker/circuit-prover-gpu-gar. ## Why ❔ To build circuit-prover-gpu image with keys. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index 031677cf9b6..b92fb8e8111 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -28,6 +28,7 @@ jobs: - name: Download Setup data run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar + cp -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ - name: Login to us-central1 GAR run: | From 6313c7de05e09a9f997356613a72d6b52c24154a Mon Sep 17 00:00:00 2001 From: mtzsky Date: Wed, 25 Sep 2024 08:39:13 +0200 Subject: [PATCH 06/16] docs: fix invalid links to "namespaces/eth.rs" and "execution_sandbox.rs" (#2945) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR fixes 2 invalid links in the documents which was caused by refactor (refactor: Split the rest of the zksync_core (https://github.com/matter-labs/zksync-era/pull/1940)) ## Why ❔ Unclarity in documentation ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docs/guides/advanced/05_how_call_works.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/advanced/05_how_call_works.md b/docs/guides/advanced/05_how_call_works.md index 7f283cf8e0c..5b9458ddce8 100644 --- a/docs/guides/advanced/05_how_call_works.md +++ b/docs/guides/advanced/05_how_call_works.md @@ -110,10 +110,10 @@ In this article, we covered the 'life of a call' from the RPC to the inner worki https://github.com/matter-labs/zksync-era/blob/edd48fc37bdd58f9f9d85e27d684c01ef2cac8ae/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs 'namespaces RPC api' [namespaces_rpc_impl]: - https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs#L94 + https://github.com/matter-labs/zksync-era/blob/main/core/node/api_server/src/web3/namespaces/eth.rs 'namespaces RPC implementation' [execution_sandbox]: - https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs + https://github.com/matter-labs/zksync-era/blob/main/core/node/api_server/src/execution_sandbox/execute.rs 'execution sandbox' [vm_code]: https://github.com/matter-labs/zksync-era/blob/ccd13ce88ff52c3135d794c6f92bec3b16f2210f/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs#L108 From 2edd8a2c1f4ea30b39e608fdb399bf1566c7a637 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Wed, 25 Sep 2024 11:56:11 +0200 Subject: [PATCH 07/16] chore(prover): Fixes broken non-GPU setup (#2953) Previously, this was done via a compile flag. This was deemed poor taste and we've moved a specific env-var - `ZKSYNC_USE_CUDA_STUBS`. This is wired deep within crypto dependencies. This PR updates the dependencies, adds documentation on how to work with the flag and updates all workflows (fixing the broken ones from RUSTFLAGS time). --------- Co-authored-by: Igor Aleksanov --- .github/workflows/build-docker-from-tag.yml | 2 +- .../build-witness-generator-template.yml | 1 + .github/workflows/ci-common-reusable.yml | 2 +- .github/workflows/ci-core-lint-reusable.yml | 2 +- .github/workflows/ci-prover-reusable.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/release-test-stage.yml | 2 +- docker/witness-generator/Dockerfile | 3 +- docker/witness-vector-generator/Dockerfile | 3 +- docs/guides/external-node/00_quick_start.md | 3 +- docs/guides/setup-dev.md | 23 ++++++++------- prover/Cargo.lock | 28 +++++++++---------- prover/Cargo.toml | 4 +-- 13 files changed, 40 insertions(+), 37 deletions(-) diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 0141bd82565..206e15bd195 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -103,7 +103,7 @@ jobs: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" - WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl --cfg=no_cuda" + WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 9c29297460d..33d78b3cf2f 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -51,6 +51,7 @@ jobs: ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} WITNESS_GENERATOR_RUST_FLAGS: ${{ inputs.WITNESS_GENERATOR_RUST_FLAGS }} + ZKSYNC_USE_CUDA_STUBS: true runs-on: [ matterlabs-ci-runner-c3d ] strategy: matrix: diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 3d28df592e9..2f51229aeaf 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -22,7 +22,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env - echo "RUSTFLAGS=--cfg=no_cuda" >> .env + echo "ZKSYNC_USE_CUDA_STUBS=true" >> .env - name: Start services run: | diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 85e4be3ff5e..6d0785fe46f 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -19,7 +19,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env - echo "RUSTFLAGS=--cfg=no_cuda" >> .env + echo "ZKSYNC_USE_CUDA_STUBS=true" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 6fa987b1cec..3f842b23488 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -57,7 +57,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env - echo "RUSTFLAGS=--cfg=no_cuda" >> .env + echo "ZKSYNC_USE_CUDA_STUBS=true" >> .env - name: Start services run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d8c22895601..0a27a719aeb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -166,7 +166,7 @@ jobs: with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 action: "build" - WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl --cfg=no_cuda" + WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 988426d0cb6..11a844fdd2b 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -115,7 +115,7 @@ jobs: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" - WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl --cfg=no_cuda" + WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl " secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 06d836c9fa5..29e22698771 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,8 +1,9 @@ FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive -ARG RUST_FLAGS="--cfg=no_cuda" +ARG RUST_FLAGS="" ENV RUSTFLAGS=${RUST_FLAGS} +ENV ZKSYNC_USE_CUDA_STUBS=true # set of args for use of sccache ARG SCCACHE_GCS_BUCKET="" diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index eb46b459c69..b305f89b001 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,8 +1,7 @@ FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive -ARG RUST_FLAGS="--cfg=no_cuda" -ENV RUSTFLAGS=${RUST_FLAGS} +ENV ZKSYNC_USE_CUDA_STUBS=true # set of args for use of sccache ARG SCCACHE_GCS_BUCKET="" diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 75d8ba89151..287a4d2d47c 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -34,8 +34,7 @@ cd docker-compose-examples docker compose --file testnet-external-node-docker-compose.yml down --volumes ``` -You can see the status of the node (after recovery) in -[local grafana dashboard](http://localhost:3000/dashboards). +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). Those commands start ZKsync node locally inside docker. diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 7781e65e5bf..4eef211cd3d 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -49,9 +49,9 @@ cargo install sqlx-cli --version 0.8.1 curl -L https://foundry.paradigm.xyz | bash foundryup --branch master -# Non GPU setup, can be skipped if the machine has a GPU configured for provers -echo "export RUSTFLAGS='--cfg=no_cuda'" >> ~/.bashrc - +# Non CUDA (GPU) setup, can be skipped if the machine has a CUDA installed for provers +# Don't do that if you intend to run provers on your machine. Check the prover docs for a setup instead. +echo "export ZKSYNC_USE_CUDA_STUBS=true" >> ~/.bashrc # You will need to reload your `*rc` file here # Clone the repo to the desired location @@ -243,20 +243,23 @@ commands related to deployment, you can pass flags for Foundry integration. ## Non-GPU setup -Circuit Prover requires a GPU (& CUDA bindings) to run. If you still want to be able to build everything locally on -non-GPU setup, you'll need to change your rustflags. +Circuit Prover requires a CUDA bindings to run. If you still want to be able to build everything locally on non-CUDA +setup, you'll need use CUDA stubs. For a single run, it's enough to export it on the shell: ``` -export RUSTFLAGS='--cfg=no_cuda' +export ZKSYNC_USE_CUDA_STUBS=true ``` -For persistent runs, you can either echo it in your ~/.rc file (discouraged), or configure it for your taste in -`config.toml`. +For persistent runs, you can echo it in your ~/.rc file + +``` +echo "export ZKSYNC_USE_CUDA_STUBS=true" >> ~/.rc +``` -For project level configuration, edit `/path/to/zksync/.cargo/config.toml`. For global cargo setup, -`~/.cargo/config.toml`. Add the following: +Note that the same can be achieved with RUSTFLAGS (discouraged). The flag is `--cfg=no_cuda`. You can either set +RUSTFLAGS as env var, or pass it in `config.toml` (either project level or global). The config would need the following: ```toml [build] diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 88c0d1114fc..1abec8d0c1a 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -650,9 +650,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7735446f2263e8d12435fc4d5a02c7727838eaffc7c518a961b3e839fb59e7" +checksum = "04f9a6d958dd58a0899737e5a1fc6597aefcf7980bf8be5be5329e701cbd45ca" dependencies = [ "boojum", "cmake", @@ -1690,9 +1690,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f76aa50bd291b43ad56fb7da3e63c4c3cecb3c7e19db76c8097856371bc0d84a" +checksum = "51f0d6e329b2c11d134c3140951209be968ef316ed64ddde75640eaed7f10264" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1701,9 +1701,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d2db304df6b72141d45b140ec6df68ecd2300a7ab27de18b3e0e3af38c9776" +checksum = "060e8186234c7a281021fb95614e06e94e1fc7ab78938360a5c27af0f8fc6105" dependencies = [ "serde_json", ] @@ -5313,9 +5313,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f11e6942c89861aecb72261f8220800a1b69b8a5463c07c24df75b81fd809b0" +checksum = "ebb6d928451f0779f14da02ee9d51d4bde560328edc6471f0d5c5c11954345c4" dependencies = [ "bincode", "blake2 0.10.6", @@ -7279,9 +7279,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aecd7f624185b785e9d8457986ac34685d478e2baa78417d51b102b7d0fa27fd" +checksum = "86511b3957adfe415ecdbd1ee01c51aa3ca131a607e61ca024976312f613b0f9" dependencies = [ "bindgen 0.59.2", "cmake", @@ -7295,9 +7295,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a089b11fcdbd37065acaf427545cb50b87e6712951a10f3761b3d370e4b8f9bc" +checksum = "9e4c00f2db603d1b696bc2e9d822bb4c087050de5b65559067fc2232786cbc93" dependencies = [ "bit-vec", "cfg-if", @@ -7312,9 +7312,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc764c21d4ae15c5bc2c07c14c814c5e3ba8d194ddcca543b8cec95456031832" +checksum = "d58df1ec10e0d5eb58563bb01abda5ed185c9b9621502e361848ca40eb7868ac" dependencies = [ "circuit_definitions", "zkevm_test_harness", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index b21ad800afa..e95bae3d4c1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -63,8 +63,8 @@ circuit_sequencer_api = "=0.150.5" zkevm_test_harness = "=0.150.5" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.7" } -shivini = "=0.150.7" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.9" } +shivini = "=0.150.9" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } From 1fdbb304a8cdc739bcdc0087dfd4e34560d5dff2 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Wed, 25 Sep 2024 13:04:40 +0200 Subject: [PATCH 08/16] chore(prover-gateway): Speed up polling (#2957) 16 minutes is just ridiculous. This should've been done a long time ago, but has been avoided due to other priorities and always put on the back-burner/forgotten. Today we change it. --- etc/env/base/fri_prover_gateway.toml | 2 +- etc/env/file_based/general.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/env/base/fri_prover_gateway.toml b/etc/env/base/fri_prover_gateway.toml index 8974298a57c..f77b5d59258 100644 --- a/etc/env/base/fri_prover_gateway.toml +++ b/etc/env/base/fri_prover_gateway.toml @@ -1,6 +1,6 @@ [fri_prover_gateway] api_url="http://127.0.0.1:3320" -api_poll_duration_secs=1000 +api_poll_duration_secs=15 prometheus_listener_port=3314 prometheus_pushgateway_url="http://127.0.0.1:9091" prometheus_push_interval_ms=100 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 6a36f65c97c..cdf02175458 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -172,7 +172,7 @@ data_handler: tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 - api_poll_duration_secs: 1000 + api_poll_duration_secs: 15 prometheus_listener_port: 3310 prometheus_pushgateway_url: http://127.0.0.1:9091 prometheus_push_interval_ms: 100 From 218646aa1c56200f4ffee99b7f83366e2689354f Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 25 Sep 2024 13:44:48 +0200 Subject: [PATCH 09/16] fix(api): Return correct flat call tracer (#2917) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ It was an inconsistency between call tracer results ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- ...46bd1b7b9269375f11f050099cb6d3c1427aa.json | 34 +++ ...c783b3710ee47f88edce1b17c2b8fa21dadd3.json | 22 -- ...355e6aec05ba4af297a03169e3122a67ae53e.json | 28 +++ ...0fb28594859564a0f888eae748ad1f9fcede5.json | 22 -- core/lib/dal/src/blocks_web3_dal.rs | 13 +- .../lib/dal/src/models/storage_transaction.rs | 42 ++-- core/lib/dal/src/transactions_dal.rs | 24 +- core/lib/types/src/api/mod.rs | 80 +++++- core/lib/types/src/debug_flat_call.rs | 235 +----------------- core/lib/web3_decl/src/namespaces/debug.rs | 18 +- .../backend_jsonrpsee/namespaces/debug.rs | 21 +- .../api_server/src/web3/namespaces/debug.rs | 172 ++++++++++--- core/node/api_server/src/web3/tests/debug.rs | 44 +++- core/node/api_server/src/web3/tests/vm.rs | 22 +- .../ts-integration/tests/api/debug.test.ts | 6 +- prover/crates/bin/prover_fri/src/utils.rs | 2 +- ...f1cdac8b194f09926c133985479c533a651f2.json | 18 ++ ...0d001cdf5bc7ba988b742571ec90a938434e3.json | 17 -- ...0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json} | 4 +- ...cf8c93630d529ec96e67aac078f18196f61a5.json | 19 ++ ...adefab0bf3abf6853a6d36123c8adcaf813b.json} | 4 +- ...d48c95ca5b4520dde415a2b5ff32ece47c86.json} | 4 +- ...8d4cc59246dda91b19526e73f27a17c8e3da.json} | 4 +- ...a468057599be1e6c6c96a947c33df53a68224.json | 15 -- ...2e61157bf58aec70903623afc9da24d46a336.json | 16 -- ...1b931c0d8dbc6835dfac20107ea7412ce9fbb.json | 15 -- ...9a67936d572f8046d3a1c7a4f100ff209d81d.json | 18 -- ...41976a264759c4060c1a38e466ee2052fc17d.json | 15 ++ ...2a4a98ec63eb942c73ce4448d0957346047cd.json | 17 ++ ...08a01b63ae4aa03c983c3a52c802d585e5a80.json | 15 ++ ...700302981be0afef31a8864613484f8521f9e.json | 19 -- ...b5a4672ad50a9de92c84d939ac4c69842e355.json | 16 ++ ...d005d8760c4809b7aef902155196873da66e.json} | 4 +- ...8e1010d7389457b3c97e9b238a3a0291a54e.json} | 4 +- .../crates/lib/prover_dal/src/cli_test_dal.rs | 99 ++++---- .../lib/prover_dal/src/fri_prover_dal.rs | 10 +- .../src/fri_witness_generator_dal.rs | 15 +- yarn.lock | 2 +- 38 files changed, 576 insertions(+), 559 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json delete mode 100644 core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json create mode 100644 core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json delete mode 100644 core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json rename prover/crates/lib/prover_dal/.sqlx/{query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json => query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json} (53%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json rename prover/crates/lib/prover_dal/.sqlx/{query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json => query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json} (70%) rename prover/crates/lib/prover_dal/.sqlx/{query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json => query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json} (70%) rename prover/crates/lib/prover_dal/.sqlx/{query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json => query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json} (69%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json rename prover/crates/lib/prover_dal/.sqlx/{query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json => query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json} (82%) rename prover/crates/lib/prover_dal/.sqlx/{query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json => query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json} (78%) diff --git a/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json b/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json new file mode 100644 index 00000000000..84ff845b0d0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS tx_index_in_block,\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "tx_index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "call_trace", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa" +} diff --git a/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json b/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json deleted file mode 100644 index 81981683e89..00000000000 --- a/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3" -} diff --git a/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json b/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json new file mode 100644 index 00000000000..0b1f56ef9f3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e" +} diff --git a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json b/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json deleted file mode 100644 index 906cd108140..00000000000 --- a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "call_trace", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5" -} diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 36a4acc0a6d..3d17a919a07 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -527,7 +527,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { + ) -> DalResult> { let protocol_version = sqlx::query!( r#" SELECT @@ -554,6 +554,8 @@ impl BlocksWeb3Dal<'_, '_> { CallTrace, r#" SELECT + transactions.hash AS tx_hash, + transactions.index_in_block AS tx_index_in_block, call_trace FROM call_traces @@ -570,7 +572,11 @@ impl BlocksWeb3Dal<'_, '_> { .fetch_all(self.storage) .await? .into_iter() - .map(|call_trace| call_trace.into_call(protocol_version)) + .map(|call_trace| { + let hash = H256::from_slice(&call_trace.tx_hash); + let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; + (call_trace.into_call(protocol_version), hash, index) + }) .collect()) } @@ -1084,8 +1090,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for (trace, tx_result) in traces.iter().zip(&tx_results) { + for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); + assert_eq!(&tx_result.hash, hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 9f67e9025e0..bb219ee1d61 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -561,32 +561,38 @@ impl StorageApiTransaction { #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct CallTrace { pub call_trace: Vec, + pub tx_hash: Vec, + pub tx_index_in_block: Option, } impl CallTrace { pub(crate) fn into_call(self, protocol_version: ProtocolVersionId) -> Call { - if protocol_version.is_pre_1_5_0() { - if let Ok(legacy_call_trace) = bincode::deserialize::(&self.call_trace) { - legacy_call_trace.into() - } else { - let legacy_mixed_call_trace = - bincode::deserialize::(&self.call_trace) - .expect("Failed to deserialize call trace"); - legacy_mixed_call_trace.into() - } - } else { - bincode::deserialize(&self.call_trace).unwrap() - } + parse_call_trace(&self.call_trace, protocol_version) } +} - pub(crate) fn from_call(call: Call, protocol_version: ProtocolVersionId) -> Self { - let call_trace = if protocol_version.is_pre_1_5_0() { - bincode::serialize(&LegacyCall::try_from(call).unwrap()) +pub(crate) fn parse_call_trace(call_trace: &[u8], protocol_version: ProtocolVersionId) -> Call { + if protocol_version.is_pre_1_5_0() { + if let Ok(legacy_call_trace) = bincode::deserialize::(call_trace) { + legacy_call_trace.into() } else { - bincode::serialize(&call) + let legacy_mixed_call_trace = bincode::deserialize::(call_trace) + .expect("Failed to deserialize call trace"); + legacy_mixed_call_trace.into() } - .unwrap(); + } else { + bincode::deserialize(call_trace).unwrap() + } +} - Self { call_trace } +pub(crate) fn serialize_call_into_bytes( + call: Call, + protocol_version: ProtocolVersionId, +) -> Vec { + if protocol_version.is_pre_1_5_0() { + bincode::serialize(&LegacyCall::try_from(call).unwrap()) + } else { + bincode::serialize(&call) } + .unwrap() } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 0a72289b48a..408837d699e 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -20,7 +20,9 @@ use zksync_vm_interface::{ }; use crate::{ - models::storage_transaction::{CallTrace, StorageTransaction}, + models::storage_transaction::{ + parse_call_trace, serialize_call_into_bytes, StorageTransaction, + }, Core, CoreDal, }; @@ -521,8 +523,7 @@ impl TransactionsDal<'_, '_> { let mut bytea_call_traces = Vec::with_capacity(transactions.len()); for tx_res in transactions { if let Some(call_trace) = tx_res.call_trace() { - bytea_call_traces - .push(CallTrace::from_call(call_trace, protocol_version).call_trace); + bytea_call_traces.push(serialize_call_into_bytes(call_trace, protocol_version)); call_traces_tx_hashes.push(tx_res.hash.as_bytes()); } } @@ -2112,11 +2113,12 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + index_in_block FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2139,8 +2141,7 @@ impl TransactionsDal<'_, '_> { .map(|v| (v as u16).try_into().unwrap()) .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); - Ok(sqlx::query_as!( - CallTrace, + Ok(sqlx::query!( r#" SELECT call_trace @@ -2155,7 +2156,12 @@ impl TransactionsDal<'_, '_> { .with_arg("tx_hash", &tx_hash) .fetch_optional(self.storage) .await? - .map(|call_trace| call_trace.into_call(protocol_version))) + .map(|call_trace| { + ( + parse_call_trace(&call_trace.call_trace, protocol_version), + row.index_in_block.unwrap_or_default() as usize, + ) + })) } pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> DalResult> { @@ -2227,7 +2233,7 @@ mod tests { .await .unwrap(); - let call_trace = conn + let (call_trace, _) = conn .transactions_dal() .get_call_trace(tx_hash) .await diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index f648204ca55..432b6c309c1 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -13,7 +13,10 @@ use zksync_contracts::BaseSystemContractsHashes; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; -use crate::{protocol_version::L1VerifierConfig, Address, L2BlockNumber, ProtocolVersionId}; +use crate::{ + debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, + ProtocolVersionId, +}; pub mod en; pub mod state_override; @@ -602,6 +605,7 @@ pub struct ResultDebugCall { } #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "camelCase")] pub enum DebugCallType { #[default] Call, @@ -701,19 +705,20 @@ impl ProtocolVersion { } } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase")] pub enum SupportedTracers { CallTracer, + FlatCallTracer, } -#[derive(Debug, Serialize, Deserialize, Clone, Default)] +#[derive(Debug, Serialize, Deserialize, Clone, Default, Copy)] #[serde(rename_all = "camelCase")] pub struct CallTracerConfig { pub only_top_call: bool, } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase")] pub struct TracerConfig { pub tracer: SupportedTracers, @@ -721,6 +726,17 @@ pub struct TracerConfig { pub tracer_config: CallTracerConfig, } +impl Default for TracerConfig { + fn default() -> Self { + TracerConfig { + tracer: SupportedTracers::CallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum BlockStatus { @@ -728,6 +744,62 @@ pub enum BlockStatus { Verified, } +/// Result tracers need to have a nested result field for compatibility. So we have two different +/// structs 1 for blocks tracing and one for txs and call tracing +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum CallTracerBlockResult { + CallTrace(Vec), + FlatCallTrace(Vec), +} + +impl CallTracerBlockResult { + pub fn unwrap_flatten(self) -> Vec { + match self { + Self::CallTrace(_) => { + panic!("Result is a FlatCallTrace") + } + Self::FlatCallTrace(a) => a, + } + } + + pub fn unwrap_default(self) -> Vec { + match self { + Self::CallTrace(a) => a, + Self::FlatCallTrace(_) => { + panic!("Result is a CallTrace") + } + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum CallTracerResult { + CallTrace(DebugCall), + FlatCallTrace(Vec), +} + +impl CallTracerResult { + pub fn unwrap_flat(self) -> Vec { + match self { + Self::CallTrace(_) => { + panic!("Result is a FlatCallTrace") + } + Self::FlatCallTrace(a) => a, + } + } + + pub fn unwrap_default(self) -> DebugCall { + match self { + Self::CallTrace(a) => a, + Self::FlatCallTrace(_) => { + panic!("Result is a CallTrace") + } + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BlockDetailsBase { diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index b5c0d79c857..89a008b5fb5 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -1,26 +1,24 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::Bytes, U256}; -use crate::{ - api::{DebugCall, DebugCallType, ResultDebugCall}, - Address, -}; +use crate::{api::DebugCallType, Address, H256}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { pub action: Action, - pub result: CallResult, + pub result: Option, pub subtraces: usize, - pub traceaddress: Vec, - pub error: Option, - pub revert_reason: Option, + pub trace_address: Vec, + pub transaction_position: usize, + pub transaction_hash: H256, + pub r#type: DebugCallType, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Action { - pub r#type: DebugCallType, + pub call_type: DebugCallType, pub from: Address, pub to: Address, pub gas: U256, @@ -34,222 +32,3 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } - -pub fn flatten_debug_calls(calls: Vec) -> Vec { - let mut flattened_calls = Vec::new(); - for (index, result_debug_call) in calls.into_iter().enumerate() { - let mut trace_address = vec![index]; // Initialize the trace addressees with the index of the top-level call - flatten_call_recursive( - &result_debug_call.result, - &mut flattened_calls, - &mut trace_address, - ); - } - flattened_calls -} - -fn flatten_call_recursive( - call: &DebugCall, - flattened_calls: &mut Vec, - trace_address: &mut Vec, -) { - let flat_call = DebugCallFlat { - action: Action { - r#type: call.r#type.clone(), - from: call.from, - to: call.to, - gas: call.gas, - value: call.value, - input: call.input.clone(), - }, - result: CallResult { - output: call.output.clone(), - gas_used: call.gas_used, - }, - subtraces: call.calls.len(), - traceaddress: trace_address.clone(), // Clone the current trace address - error: call.error.clone(), - revert_reason: call.revert_reason.clone(), - }; - flattened_calls.push(flat_call); - - // Process nested calls - for (index, nested_call) in call.calls.iter().enumerate() { - trace_address.push(index); // Update trace addressees for the nested call - flatten_call_recursive(nested_call, flattened_calls, trace_address); - trace_address.pop(); // Reset trace addressees after processing the nested call (prevent to keep filling the vector) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - api::{DebugCall, DebugCallType, ResultDebugCall}, - Address, BOOTLOADER_ADDRESS, - }; - - #[test] - fn test_flatten_debug_call() { - let result_debug_trace: Vec = [1, 1] - .map(|_| ResultDebugCall { - result: new_testing_debug_call(), - }) - .into(); - - let debug_call_flat = flatten_debug_calls(result_debug_trace); - let expected_debug_call_flat = expected_flat_trace(); - assert_eq!(debug_call_flat, expected_debug_call_flat); - } - - fn new_testing_debug_call() -> DebugCall { - DebugCall { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - gas_used: 1000.into(), - value: 0.into(), - output: vec![].into(), - input: vec![].into(), - error: None, - revert_reason: None, - calls: new_testing_trace(), - } - } - - fn new_testing_trace() -> Vec { - let first_call_trace = DebugCall { - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - gas_used: 42.into(), - ..DebugCall::default() - }; - let second_call_trace = DebugCall { - from: Address::zero(), - to: Address::zero(), - value: 123.into(), - gas: 58.into(), - gas_used: 10.into(), - input: Bytes(b"input".to_vec()), - output: Bytes(b"output".to_vec()), - ..DebugCall::default() - }; - [first_call_trace, second_call_trace].into() - } - - fn expected_flat_trace() -> Vec { - [ - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 1000.into(), - }, - subtraces: 2, - traceaddress: [0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 42.into(), - }, - subtraces: 0, - traceaddress: [0, 0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 58.into(), - value: 123.into(), - input: b"input".to_vec().into(), - }, - result: CallResult { - output: b"output".to_vec().into(), - gas_used: 10.into(), - }, - subtraces: 0, - traceaddress: [0, 1].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 1000.into(), - }, - subtraces: 2, - traceaddress: [1].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 42.into(), - }, - subtraces: 0, - traceaddress: [1, 0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 58.into(), - value: 123.into(), - input: b"input".to_vec().into(), - }, - result: CallResult { - output: b"output".to_vec().into(), - gas_used: 10.into(), - }, - subtraces: 0, - traceaddress: [1, 1].into(), - error: None, - revert_reason: None, - }, - ] - .into() - } -} diff --git a/core/lib/web3_decl/src/namespaces/debug.rs b/core/lib/web3_decl/src/namespaces/debug.rs index 1fbe3237104..8ca5622e95d 100644 --- a/core/lib/web3_decl/src/namespaces/debug.rs +++ b/core/lib/web3_decl/src/namespaces/debug.rs @@ -2,8 +2,7 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - debug_flat_call::DebugCallFlat, + api::{BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, TracerConfig}, transaction_request::CallRequest, }; @@ -26,21 +25,14 @@ pub trait DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> RpcResult>; - - #[method(name = "traceBlockByNumber.callFlatTracer")] - async fn trace_block_by_number_flat( - &self, - block: BlockNumber, - options: Option, - ) -> RpcResult>; + ) -> RpcResult; #[method(name = "traceBlockByHash")] async fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> RpcResult>; + ) -> RpcResult; #[method(name = "traceCall")] async fn trace_call( @@ -48,12 +40,12 @@ pub trait DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> RpcResult; + ) -> RpcResult; #[method(name = "traceTransaction")] async fn trace_transaction( &self, tx_hash: H256, options: Option, - ) -> RpcResult>; + ) -> RpcResult>; } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs index 726beae2cc9..50981a2b284 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs @@ -1,6 +1,5 @@ use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - debug_flat_call::DebugCallFlat, + api::{BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, TracerConfig}, transaction_request::CallRequest, H256, }; @@ -17,27 +16,17 @@ impl DebugNamespaceServer for DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> RpcResult> { + ) -> RpcResult { self.debug_trace_block_impl(BlockId::Number(block), options) .await .map_err(|err| self.current_method().map_err(err)) } - async fn trace_block_by_number_flat( - &self, - block: BlockNumber, - options: Option, - ) -> RpcResult> { - self.debug_trace_block_flat_impl(BlockId::Number(block), options) - .await - .map_err(|err| self.current_method().map_err(err)) - } - async fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> RpcResult> { + ) -> RpcResult { self.debug_trace_block_impl(BlockId::Hash(hash), options) .await .map_err(|err| self.current_method().map_err(err)) @@ -48,7 +37,7 @@ impl DebugNamespaceServer for DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> RpcResult { + ) -> RpcResult { self.debug_trace_call_impl(request, block, options) .await .map_err(|err| self.current_method().map_err(err)) @@ -58,7 +47,7 @@ impl DebugNamespaceServer for DebugNamespace { &self, tx_hash: H256, options: Option, - ) -> RpcResult> { + ) -> RpcResult> { self.debug_trace_transaction_impl(tx_hash, options) .await .map_err(|err| self.current_method().map_err(err)) diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 2c6c70f6faa..68c7951cee4 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -3,8 +3,11 @@ use zksync_dal::{CoreDal, DalError}; use zksync_multivm::interface::{Call, CallType, ExecutionResult, OneshotTracingParams}; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, DebugCallType, ResultDebugCall, TracerConfig}, - debug_flat_call::{flatten_debug_calls, DebugCallFlat}, + api::{ + BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, + ResultDebugCall, SupportedTracers, TracerConfig, + }, + debug_flat_call::{Action, CallResult, DebugCallFlat}, fee_model::BatchFeeInput, l2::L2Tx, transaction_request::CallRequest, @@ -42,13 +45,39 @@ impl DebugNamespace { }) } - pub(crate) fn map_call(call: Call, only_top_call: bool) -> DebugCall { + pub(crate) fn map_call( + call: Call, + index: usize, + transaction_hash: H256, + tracer_option: TracerConfig, + ) -> CallTracerResult { + match tracer_option.tracer { + SupportedTracers::CallTracer => CallTracerResult::CallTrace(Self::map_default_call( + call, + tracer_option.tracer_config.only_top_call, + )), + SupportedTracers::FlatCallTracer => { + let mut calls = vec![]; + let mut traces = vec![index]; + Self::flatten_call( + call, + &mut calls, + &mut traces, + tracer_option.tracer_config.only_top_call, + index, + transaction_hash, + ); + CallTracerResult::FlatCallTrace(calls) + } + } + } + pub(crate) fn map_default_call(call: Call, only_top_call: bool) -> DebugCall { let calls = if only_top_call { vec![] } else { call.calls .into_iter() - .map(|call| Self::map_call(call, false)) + .map(|call| Self::map_default_call(call, false)) .collect() }; let debug_type = match call.r#type { @@ -71,6 +100,63 @@ impl DebugNamespace { } } + fn flatten_call( + call: Call, + calls: &mut Vec, + trace_address: &mut Vec, + only_top_call: bool, + transaction_position: usize, + transaction_hash: H256, + ) { + let subtraces = call.calls.len(); + let debug_type = match call.r#type { + CallType::Call(_) => DebugCallType::Call, + CallType::Create => DebugCallType::Create, + CallType::NearCall => unreachable!("We have to filter our near calls before"), + }; + + let result = if call.error.is_none() { + Some(CallResult { + output: web3::Bytes::from(call.output), + gas_used: U256::from(call.gas_used), + }) + } else { + None + }; + + calls.push(DebugCallFlat { + action: Action { + call_type: debug_type, + from: call.from, + to: call.to, + gas: U256::from(call.gas), + value: call.value, + input: web3::Bytes::from(call.input), + }, + result, + subtraces, + trace_address: trace_address.clone(), // Clone the current trace address + transaction_position, + transaction_hash, + r#type: DebugCallType::Call, + }); + + if !only_top_call { + for (number, call) in call.calls.into_iter().enumerate() { + trace_address.push(number); + Self::flatten_call( + call, + calls, + trace_address, + false, + transaction_position, + transaction_hash, + ); + trace_address.pop(); + } + } + } + pub(crate) fn current_method(&self) -> &MethodTracer { &self.state.current_method } @@ -79,16 +165,13 @@ impl DebugNamespace { &self, block_id: BlockId, options: Option, - ) -> Result, Web3Error> { + ) -> Result { self.current_method().set_block_id(block_id); if matches!(block_id, BlockId::Number(BlockNumber::Pending)) { // See `EthNamespace::get_block_impl()` for an explanation why this check is needed. - return Ok(vec![]); + return Ok(CallTracerBlockResult::CallTrace(vec![])); } - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.current_method() @@ -99,41 +182,55 @@ impl DebugNamespace { .get_traces_for_l2_block(block_number) .await .map_err(DalError::generalize)?; - let call_trace = call_traces - .into_iter() - .map(|call_trace| { - let result = Self::map_call(call_trace, only_top_call); - ResultDebugCall { result } - }) - .collect(); - Ok(call_trace) - } - pub async fn debug_trace_block_flat_impl( - &self, - block_id: BlockId, - options: Option, - ) -> Result, Web3Error> { - let call_trace = self.debug_trace_block_impl(block_id, options).await?; - let call_trace_flat = flatten_debug_calls(call_trace); - Ok(call_trace_flat) + let options = options.unwrap_or_default(); + let result = match options.tracer { + SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( + call_traces + .into_iter() + .map(|(call, _, _)| ResultDebugCall { + result: Self::map_default_call(call, options.tracer_config.only_top_call), + }) + .collect(), + ), + SupportedTracers::FlatCallTracer => { + let mut flat_calls = vec![]; + for (call, tx_hash, tx_index) in call_traces { + let mut traces = vec![tx_index]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + tx_index, + tx_hash, + ); + } + CallTracerBlockResult::FlatCallTrace(flat_calls) + } + }; + Ok(result) } pub async fn debug_trace_transaction_impl( &self, tx_hash: H256, options: Option, - ) -> Result, Web3Error> { - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); + ) -> Result, Web3Error> { let mut connection = self.state.acquire_connection().await?; let call_trace = connection .transactions_dal() .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|call_trace| Self::map_call(call_trace, only_top_call))) + Ok(call_trace.map(|(call_trace, index_in_block)| { + Self::map_call( + call_trace, + index_in_block, + tx_hash, + options.unwrap_or_default(), + ) + })) } pub async fn debug_trace_call_impl( @@ -141,13 +238,11 @@ impl DebugNamespace { mut request: CallRequest, block_id: Option, options: Option, - ) -> Result { + ) -> Result { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); + let options = options.unwrap_or_default(); let mut connection = self.state.acquire_connection().await?; let block_args = self @@ -182,7 +277,7 @@ impl DebugNamespace { // We don't need properly trace if we only need top call let tracing_params = OneshotTracingParams { - trace_calls: !only_top_call, + trace_calls: !options.tracer_config.only_top_call, }; let connection = self.state.acquire_connection().await?; @@ -212,7 +307,8 @@ impl DebugNamespace { )) } }; - + // It's a call request, it's safe to keep it zero + let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -222,6 +318,6 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, false)) + Ok(Self::map_call(call, 0, hash, options)) } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 76496b42cad..7711570c3c5 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -1,7 +1,10 @@ //! Tests for the `debug` Web3 namespace. use zksync_multivm::interface::{Call, TransactionExecutionResult}; -use zksync_types::BOOTLOADER_ADDRESS; +use zksync_types::{ + api::{CallTracerConfig, SupportedTracers, TracerConfig}, + BOOTLOADER_ADDRESS, +}; use zksync_web3_decl::{ client::{DynClient, L2}, namespaces::DebugNamespaceClient, @@ -58,18 +61,19 @@ impl HttpTest for TraceBlockTest { let block_traces = match block_id { api::BlockId::Number(number) => client.trace_block_by_number(number, None).await?, api::BlockId::Hash(hash) => client.trace_block_by_hash(hash, None).await?, - }; + } + .unwrap_default(); assert_eq!(block_traces.len(), tx_results.len()); // equals to the number of transactions in the block for (trace, tx_result) in block_traces.iter().zip(&tx_results) { - let api::ResultDebugCall { result } = trace; + let result = &trace.result; assert_eq!(result.from, Address::zero()); assert_eq!(result.to, BOOTLOADER_ADDRESS); assert_eq!(result.gas, tx_result.transaction.gas_limit()); let expected_calls: Vec<_> = tx_result .call_traces .iter() - .map(|call| DebugNamespace::map_call(call.clone(), false)) + .map(|call| DebugNamespace::map_default_call(call.clone(), false)) .collect(); assert_eq!(result.calls, expected_calls); } @@ -122,7 +126,18 @@ impl HttpTest for TraceBlockFlatTest { for block_id in block_ids { if let api::BlockId::Number(number) = block_id { - let block_traces = client.trace_block_by_number_flat(number, None).await?; + let block_traces = client + .trace_block_by_number( + number, + Some(TracerConfig { + tracer: SupportedTracers::FlatCallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + }), + ) + .await? + .unwrap_flatten(); // A transaction with 2 nested calls will convert into 3 Flattened calls. // Also in this test, all tx have the same # of nested calls @@ -133,10 +148,10 @@ impl HttpTest for TraceBlockFlatTest { // First tx has 2 nested calls, thus 2 sub-traces assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].traceaddress, [0]); + assert_eq!(block_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].traceaddress, [0, 0]); + assert_eq!(block_traces[1].trace_address, [0, 0]); let top_level_call_indexes = [0, 3, 6]; let top_level_traces = top_level_call_indexes @@ -157,7 +172,15 @@ impl HttpTest for TraceBlockFlatTest { let missing_block_number = api::BlockNumber::from(*self.0 + 100); let error = client - .trace_block_by_number_flat(missing_block_number, None) + .trace_block_by_number( + missing_block_number, + Some(TracerConfig { + tracer: SupportedTracers::FlatCallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + }), + ) .await .unwrap_err(); if let ClientError::Call(error) = error { @@ -198,13 +221,14 @@ impl HttpTest for TraceTransactionTest { let expected_calls: Vec<_> = tx_results[0] .call_traces .iter() - .map(|call| DebugNamespace::map_call(call.clone(), false)) + .map(|call| DebugNamespace::map_default_call(call.clone(), false)) .collect(); let result = client .trace_transaction(tx_results[0].hash, None) .await? - .context("no transaction traces")?; + .context("no transaction traces")? + .unwrap_default(); assert_eq!(result.from, Address::zero()); assert_eq!(result.to, BOOTLOADER_ADDRESS); assert_eq!(result.gas, tx_results[0].transaction.gas_limit()); diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 9bdcf115930..b8f74370303 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -488,12 +488,16 @@ impl HttpTest for TraceCallTest { self.fee_input.expect_default(Self::FEE_SCALE); let call_request = CallTest::call_request(b"pending"); - let call_result = client.trace_call(call_request.clone(), None, None).await?; + let call_result = client + .trace_call(call_request.clone(), None, None) + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); let pending_block_number = api::BlockId::Number(api::BlockNumber::Pending); let call_result = client .trace_call(call_request.clone(), Some(pending_block_number), None) - .await?; + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); let latest_block_numbers = [api::BlockNumber::Latest, 1.into()]; @@ -506,7 +510,8 @@ impl HttpTest for TraceCallTest { Some(api::BlockId::Number(number)), None, ) - .await?; + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); } @@ -557,12 +562,16 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { ) -> anyhow::Result<()> { self.fee_input.expect_default(TraceCallTest::FEE_SCALE); let call_request = CallTest::call_request(b"pending"); - let call_result = client.trace_call(call_request.clone(), None, None).await?; + let call_result = client + .trace_call(call_request.clone(), None, None) + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); let pending_block_number = api::BlockId::Number(api::BlockNumber::Pending); let call_result = client .trace_call(call_request.clone(), Some(pending_block_number), None) - .await?; + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; @@ -584,7 +593,8 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { let number = api::BlockId::Number(number); let call_result = client .trace_call(call_request.clone(), Some(number), None) - .await?; + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); } Ok(()) diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index 054aa57cf64..2af18c8438b 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -50,7 +50,7 @@ describe('Debug methods', () => { output: '0x', revertReason: 'Error function_selector = 0x, data = 0x', to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: expect.any(String), calls: expect.any(Array) }; @@ -75,7 +75,7 @@ describe('Debug methods', () => { input: expect.any(String), output: '0x', to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: expect.any(String), calls: expect.any(Array) // We intentionally skip `error` and `revertReason` fields: the block may contain failing txs @@ -99,7 +99,7 @@ describe('Debug methods', () => { output: '0x', revertReason: null, to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: '0x0', calls: expect.any(Array) }; diff --git a/prover/crates/bin/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs index 181dc857c36..86e6568f8e4 100644 --- a/prover/crates/bin/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -200,7 +200,7 @@ mod tests { round: AggregationRound::BasicCircuits, }; - let result = get_setup_data_key(key.clone()); + let result = get_setup_data_key(key); // Check if the key has remained same assert_eq!(key, result); diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json new file mode 100644 index 00000000000..c99572bcc8e --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1\n WHERE\n l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json b/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json deleted file mode 100644 index 4015a22ff3f..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE leaf_aggregation_witness_jobs_fri \n SET status = $1, attempts = $2\n WHERE l1_batch_number = $3\n AND circuit_id = $4", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int2", - "Int8", - "Int2" - ] - }, - "nullable": [] - }, - "hash": "1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json similarity index 53% rename from prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json rename to prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json index ce9e492a7d4..05163dcfa2e 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (WHERE status = 'queued') as queued,\n COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", "describe": { "columns": [ { @@ -34,5 +34,5 @@ null ] }, - "hash": "97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464" + "hash": "29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json new file mode 100644 index 00000000000..50d121213fb --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND sequence_number = $4\n AND aggregation_round = $5\n AND circuit_id = $6\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json similarity index 70% rename from prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json rename to prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json index 14463ecbe42..bf8db798e7d 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12" + "hash": "35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json similarity index 70% rename from prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json rename to prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json index 8f5b046b974..d7eb6a32b42 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c" + "hash": "3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json similarity index 69% rename from prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json rename to prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json index 3c4c8d7a29f..c97fe7f4042 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574" + "hash": "37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json b/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json deleted file mode 100644 index 5cec4d7d7d0..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (\n l1_batch_number,\n status,\n number_of_final_node_jobs,\n created_at,\n updated_at\n )\n VALUES\n ($1, 'waiting_for_proofs',1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json b/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json deleted file mode 100644 index 063ae8fc90a..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET status = $3\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text" - ] - }, - "nullable": [] - }, - "hash": "3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json b/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json deleted file mode 100644 index 69390508415..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_compression_jobs_fri (\n l1_batch_number,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json b/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json deleted file mode 100644 index 7615523f92f..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE prover_jobs_fri SET status = $1\n WHERE l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8", - "Int4", - "Int2", - "Int2" - ] - }, - "nullable": [] - }, - "hash": "434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json new file mode 100644 index 00000000000..f8b141a8dac --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_proofs', 1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json new file mode 100644 index 00000000000..d23ed8d9fc8 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND circuit_id = $4\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json new file mode 100644 index 00000000000..93532150f7f --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json b/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json deleted file mode 100644 index 3d60050c92e..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE prover_jobs_fri \n SET status = $1, attempts = $2\n WHERE l1_batch_number = $3\n AND sequence_number =$4\n AND aggregation_round = $5\n AND circuit_id = $6", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int2", - "Int8", - "Int4", - "Int2", - "Int2" - ] - }, - "nullable": [] - }, - "hash": "aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json new file mode 100644 index 00000000000..cadc931fa1c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at)\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json rename to prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json index 208b23d939f..4ee9278fe42 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9" + "hash": "e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json similarity index 78% rename from prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json rename to prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json index 61518273b4d..f8e92b1ad66 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET status = $3\n ", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n status = $3\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad" + "hash": "eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e" } diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs index 19fe0e4f57b..d0841820337 100644 --- a/prover/crates/lib/prover_dal/src/cli_test_dal.rs +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -21,11 +21,16 @@ impl CliTestDal<'_, '_> { sequence_number: usize, ) { sqlx::query!( - "UPDATE prover_jobs_fri SET status = $1 - WHERE l1_batch_number = $2 + r#" + UPDATE prover_jobs_fri + SET + status = $1 + WHERE + l1_batch_number = $2 AND sequence_number = $3 AND aggregation_round = $4 - AND circuit_id = $5", + AND circuit_id = $5 + "#, status.to_string(), batch_number.0 as i64, sequence_number as i64, @@ -44,7 +49,7 @@ impl CliTestDal<'_, '_> { circuit_id: u8, ) { sqlx::query!( - " + r#" INSERT INTO leaf_aggregation_witness_jobs_fri ( l1_batch_number, @@ -58,8 +63,9 @@ impl CliTestDal<'_, '_> { ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE - SET status = $3 - ", + SET + status = $3 + "#, batch_number.0 as i64, circuit_id as i16, status.to_string() @@ -76,21 +82,16 @@ impl CliTestDal<'_, '_> { circuit_id: u8, ) { sqlx::query!( - " + r#" INSERT INTO - node_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - status, - created_at, - updated_at - ) + node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at) VALUES ($1, $2, 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE - SET status = $3 - ", + SET + status = $3 + "#, batch_number.0 as i64, circuit_id as i16, status.to_string(), @@ -102,21 +103,16 @@ impl CliTestDal<'_, '_> { pub async fn insert_rt_job(&mut self, status: WitnessJobStatus, batch_number: L1BatchNumber) { sqlx::query!( - " + r#" INSERT INTO - recursion_tip_witness_jobs_fri ( - l1_batch_number, - status, - number_of_final_node_jobs, - created_at, - updated_at - ) + recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at) VALUES - ($1, 'waiting_for_proofs',1, NOW(), NOW()) + ($1, 'waiting_for_proofs', 1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = $2 - ", + SET + status = $2 + "#, batch_number.0 as i64, status.to_string(), ) @@ -131,7 +127,7 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, ) { sqlx::query!( - " + r#" INSERT INTO scheduler_witness_jobs_fri ( l1_batch_number, @@ -144,8 +140,9 @@ impl CliTestDal<'_, '_> { ($1, '', 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = $2 - ", + SET + status = $2 + "#, batch_number.0 as i64, status.to_string(), ) @@ -160,20 +157,16 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, ) { sqlx::query!( - " + r#" INSERT INTO - proof_compression_jobs_fri ( - l1_batch_number, - status, - created_at, - updated_at - ) + proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at) VALUES ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = $2 - ", + SET + status = $2 + "#, batch_number.0 as i64, status.to_string(), ) @@ -192,12 +185,17 @@ impl CliTestDal<'_, '_> { sequence_number: usize, ) { sqlx::query!( - "UPDATE prover_jobs_fri - SET status = $1, attempts = $2 - WHERE l1_batch_number = $3 - AND sequence_number =$4 + r#" + UPDATE prover_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND sequence_number = $4 AND aggregation_round = $5 - AND circuit_id = $6", + AND circuit_id = $6 + "#, status.to_string(), attempts as i64, batch_number.0 as i64, @@ -218,10 +216,15 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, ) { sqlx::query!( - "UPDATE leaf_aggregation_witness_jobs_fri - SET status = $1, attempts = $2 - WHERE l1_batch_number = $3 - AND circuit_id = $4", + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND circuit_id = $4 + "#, status.to_string(), attempts as i64, batch_number.0 as i64, diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 1a3b8de0ce4..71d0c11728b 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -528,8 +528,14 @@ impl FriProverDal<'_, '_> { SELECT protocol_version AS "protocol_version!", protocol_version_patch AS "protocol_version_patch!", - COUNT(*) FILTER (WHERE status = 'queued') as queued, - COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress + COUNT(*) FILTER ( + WHERE + status = 'queued' + ) AS queued, + COUNT(*) FILTER ( + WHERE + status = 'in_progress' + ) AS in_progress FROM prover_jobs_fri WHERE diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 66e34f7f8e7..c7ba0f60ef3 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1719,7 +1719,10 @@ impl FriWitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND attempts >= $2 - AND (status = 'in_progress' OR status = 'failed') + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1786,7 +1789,10 @@ impl FriWitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND attempts >= $2 - AND (status = 'in_progress' OR status = 'failed') + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1827,7 +1833,10 @@ impl FriWitnessGeneratorDal<'_, '_> { WHERE l1_batch_number = $1 AND attempts >= $2 - AND (status = 'in_progress' OR status = 'failed') + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, diff --git a/yarn.lock b/yarn.lock index b70e64f148a..3c764c7c7b7 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6903,7 +6903,7 @@ jest-each@^29.7.0: jest-util "^29.7.0" pretty-format "^29.7.0" -jest-environment-node@^29.7.0: +jest-environment-node@^29.0.3, jest-environment-node@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw== From e214dd094457f196712722e084010a7ef94ee475 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Wed, 25 Sep 2024 17:56:17 +0400 Subject: [PATCH 10/16] chore(main): release core 24.27.0 (#2940) :robot: I have created a release *beep* *boop* --- ## [24.27.0](https://github.com/matter-labs/zksync-era/compare/core-v24.26.0...core-v24.27.0) (2024-09-25) ### Features * **vm:** Split old and new VM implementations ([#2915](https://github.com/matter-labs/zksync-era/issues/2915)) ([93bc66f](https://github.com/matter-labs/zksync-era/commit/93bc66f21f9f67a440f06f1c4402e0d687698741)) ### Bug Fixes * **api:** Return correct flat call tracer ([#2917](https://github.com/matter-labs/zksync-era/issues/2917)) ([218646a](https://github.com/matter-labs/zksync-era/commit/218646aa1c56200f4ffee99b7f83366e2689354f)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 12 ++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 5b7501b6573..44e10fb13fd 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.26.0", + "core": "24.27.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 50f0784d9fa..0a26aab604c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9855,7 +9855,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.26.0" +version = "24.27.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 9f4d65132ec..6cf2ff4419a 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## [24.27.0](https://github.com/matter-labs/zksync-era/compare/core-v24.26.0...core-v24.27.0) (2024-09-25) + + +### Features + +* **vm:** Split old and new VM implementations ([#2915](https://github.com/matter-labs/zksync-era/issues/2915)) ([93bc66f](https://github.com/matter-labs/zksync-era/commit/93bc66f21f9f67a440f06f1c4402e0d687698741)) + + +### Bug Fixes + +* **api:** Return correct flat call tracer ([#2917](https://github.com/matter-labs/zksync-era/issues/2917)) ([218646a](https://github.com/matter-labs/zksync-era/commit/218646aa1c56200f4ffee99b7f83366e2689354f)) + ## [24.26.0](https://github.com/matter-labs/zksync-era/compare/core-v24.25.0...core-v24.26.0) (2024-09-23) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index a0f12b24244..d841ee5b42e 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.26.0" # x-release-please-version +version = "24.27.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From aa72d849c24a664acd083eba73795ddc5d31d55f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Thu, 26 Sep 2024 09:57:22 +0200 Subject: [PATCH 11/16] feat(eth-watch): redesign to support multiple chains (#2867) This PR contains code from sync-layer-stable + I removed old upgrades processor and updated unit-tests to use the new one --------- Signed-off-by: tomg10 --- Cargo.lock | 1 + core/lib/contracts/src/lib.rs | 5 +- ...240b34ce29aad3ac6571116e084d45574c448.json | 33 ++ ...b9863082ccd1ce45b2d20e1119f1e78171d66.json | 26 ++ ...93639a63047f46dc8e605ff37b78f43f5cef5.json | 26 ++ ...50_add_eth_watcher_progress_table.down.sql | 4 + ...5550_add_eth_watcher_progress_table.up.sql | 9 + core/lib/dal/src/eth_watcher_dal.rs | 154 +++++++++ core/lib/dal/src/lib.rs | 12 +- core/node/eth_watch/Cargo.toml | 1 + core/node/eth_watch/src/client.rs | 38 ++- .../decentralized_upgrades.rs | 26 +- .../event_processors/governance_upgrades.rs | 142 -------- .../eth_watch/src/event_processors/mod.rs | 22 +- .../src/event_processors/priority_ops.rs | 43 ++- core/node/eth_watch/src/lib.rs | 143 ++++---- core/node/eth_watch/src/metrics.rs | 1 - core/node/eth_watch/src/tests.rs | 322 +++++++++++++----- .../src/implementations/layers/eth_watch.rs | 5 +- 19 files changed, 665 insertions(+), 348 deletions(-) create mode 100644 core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json create mode 100644 core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json create mode 100644 core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json create mode 100644 core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql create mode 100644 core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql create mode 100644 core/lib/dal/src/eth_watcher_dal.rs delete mode 100644 core/node/eth_watch/src/event_processors/governance_upgrades.rs diff --git a/Cargo.lock b/Cargo.lock index 0a26aab604c..37e4569cbef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9840,6 +9840,7 @@ dependencies = [ "anyhow", "async-recursion", "async-trait", + "test-log", "thiserror", "tokio", "tracing", diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a60d9fbf181..a72b5c95d1b 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -49,12 +49,13 @@ const DIAMOND_INIT_CONTRACT_FILE: (&str, &str) = ( const GOVERNANCE_CONTRACT_FILE: (&str, &str) = ("governance", "IGovernance.sol/IGovernance.json"); const CHAIN_ADMIN_CONTRACT_FILE: (&str, &str) = ("governance", "IChainAdmin.sol/IChainAdmin.json"); const GETTERS_FACET_CONTRACT_FILE: (&str, &str) = ( - "state-transition/chain-deps/facets", - "Getters.sol/GettersFacet.json", + "state-transition/chain-interfaces", + "IGetters.sol/IGetters.json", ); const MULTICALL3_CONTRACT_FILE: (&str, &str) = ("dev-contracts", "Multicall3.sol/Multicall3.json"); const VERIFIER_CONTRACT_FILE: (&str, &str) = ("state-transition", "Verifier.sol/Verifier.json"); + const _IERC20_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/contracts/common/interfaces/IERC20.sol/IERC20.json"; const _FAIL_ON_RECEIVE_CONTRACT_FILE: &str = diff --git a/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json b/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json new file mode 100644 index 00000000000..ee5de53d6e6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n next_block_to_process\n FROM\n processed_events\n WHERE\n TYPE = $1\n AND chain_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "next_block_to_process", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + { + "Custom": { + "name": "event_type", + "kind": { + "Enum": [ + "ProtocolUpgrades", + "PriorityTransactions" + ] + } + } + }, + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448" +} diff --git a/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json b/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json new file mode 100644 index 00000000000..bb0d73ee6c8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n processed_events (\n TYPE,\n chain_id,\n next_block_to_process\n )\n VALUES\n ($1, $2, $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + { + "Custom": { + "name": "event_type", + "kind": { + "Enum": [ + "ProtocolUpgrades", + "PriorityTransactions" + ] + } + } + }, + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66" +} diff --git a/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json b/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json new file mode 100644 index 00000000000..b797ccb4604 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE processed_events\n SET\n next_block_to_process = $3\n WHERE\n TYPE = $1\n AND chain_id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + { + "Custom": { + "name": "event_type", + "kind": { + "Enum": [ + "ProtocolUpgrades", + "PriorityTransactions" + ] + } + } + }, + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5" +} diff --git a/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql new file mode 100644 index 00000000000..79331481f58 --- /dev/null +++ b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS processed_events; + +DROP TYPE IF EXISTS event_type; + diff --git a/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql new file mode 100644 index 00000000000..be8a2b89a02 --- /dev/null +++ b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql @@ -0,0 +1,9 @@ +CREATE TYPE event_type AS ENUM ('ProtocolUpgrades', 'PriorityTransactions'); + +CREATE TABLE processed_events +( + type event_type NOT NULL, + chain_id BIGINT NOT NULL, + next_block_to_process BIGINT NOT NULL, + PRIMARY KEY (chain_id, type) +) diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs new file mode 100644 index 00000000000..3220868decf --- /dev/null +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -0,0 +1,154 @@ +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::SLChainId; + +use crate::Core; + +pub struct EthWatcherDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +#[derive(Debug, Copy, Clone, sqlx::Type)] +#[sqlx(type_name = "event_type")] +pub enum EventType { + ProtocolUpgrades, + PriorityTransactions, +} + +impl EthWatcherDal<'_, '_> { + // Returns last set value of next_block_to_process for given event_type and chain_id. + // If the value was missing, initializes it with provided next_block_to_process value + pub async fn get_or_set_next_block_to_process( + &mut self, + event_type: EventType, + chain_id: SLChainId, + next_block_to_process: u64, + ) -> DalResult { + let result = sqlx::query!( + r#" + SELECT + next_block_to_process + FROM + processed_events + WHERE + TYPE = $1 + AND chain_id = $2 + "#, + event_type as EventType, + chain_id.0 as i64 + ) + .instrument("get_or_set_next_block_to_process") + .with_arg("event_type", &event_type) + .with_arg("chain_id", &chain_id) + .fetch_optional(self.storage) + .await?; + + if let Some(row) = result { + Ok(row.next_block_to_process as u64) + } else { + sqlx::query!( + r#" + INSERT INTO + processed_events ( + TYPE, + chain_id, + next_block_to_process + ) + VALUES + ($1, $2, $3) + "#, + event_type as EventType, + chain_id.0 as i64, + next_block_to_process as i64 + ) + .instrument("get_or_set_next_block_to_process - insert") + .with_arg("event_type", &event_type) + .with_arg("chain_id", &chain_id) + .execute(self.storage) + .await?; + + Ok(next_block_to_process) + } + } + + pub async fn update_next_block_to_process( + &mut self, + event_type: EventType, + chain_id: SLChainId, + next_block_to_process: u64, + ) -> DalResult<()> { + sqlx::query!( + r#" + UPDATE processed_events + SET + next_block_to_process = $3 + WHERE + TYPE = $1 + AND chain_id = $2 + "#, + event_type as EventType, + chain_id.0 as i64, + next_block_to_process as i64 + ) + .instrument("update_next_block_to_process") + .with_arg("event_type", &event_type) + .with_arg("chain_id", &chain_id) + .execute(self.storage) + .await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ConnectionPool, Core, CoreDal}; + + #[tokio::test] + async fn test_get_or_set_next_block_to_process_with_different_event_types() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let mut dal = conn.processed_events_dal(); + + // Test with ProtocolUpgrades + let next_block = dal + .get_or_set_next_block_to_process(EventType::ProtocolUpgrades, SLChainId(1), 100) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 100); + + // Test with PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(1), 200) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 200); + + // Test with PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(2), 300) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 300); + + // Verify that the initial block is not updated for ProtocolUpgrades + let next_block = dal + .get_or_set_next_block_to_process(EventType::ProtocolUpgrades, SLChainId(1), 150) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 100); + + // Verify that the initial block is not updated for PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(1), 250) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 200); + + // Verify that the initial block is not updated for PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(2), 350) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 300); + } +} diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 0e1badb9af7..f0d2f0c1671 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -15,8 +15,9 @@ use crate::{ base_token_dal::BaseTokenDal, blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, consensus_dal::ConsensusDal, contract_verification_dal::ContractVerificationDal, data_availability_dal::DataAvailabilityDal, eth_sender_dal::EthSenderDal, - events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, factory_deps_dal::FactoryDepsDal, - proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, + eth_watcher_dal::EthWatcherDal, events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, + factory_deps_dal::FactoryDepsDal, proof_generation_dal::ProofGenerationDal, + protocol_versions_dal::ProtocolVersionsDal, protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, pruning_dal::PruningDal, snapshot_recovery_dal::SnapshotRecoveryDal, snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, @@ -35,6 +36,7 @@ pub mod consensus_dal; pub mod contract_verification_dal; mod data_availability_dal; pub mod eth_sender_dal; +pub mod eth_watcher_dal; pub mod events_dal; pub mod events_web3_dal; pub mod factory_deps_dal; @@ -132,6 +134,8 @@ where fn vm_runner_dal(&mut self) -> VmRunnerDal<'_, 'a>; fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a>; + + fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -258,4 +262,8 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a> { BaseTokenDal { storage: self } } + + fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a> { + EthWatcherDal { storage: self } + } } diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index a3d6325f4a2..985649c35da 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -28,3 +28,4 @@ async-recursion.workspace = true [dev-dependencies] zksync_concurrency.workspace = true +test-log.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 237c8e5bc2e..2dbaf869390 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,7 +1,9 @@ use std::fmt; use anyhow::Context; -use zksync_contracts::{state_transition_manager_contract, verifier_contract}; +use zksync_contracts::{ + getters_facet_contract, state_transition_manager_contract, verifier_contract, +}; use zksync_eth_client::{ clients::{DynClient, L1}, CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, @@ -10,7 +12,7 @@ use zksync_eth_client::{ use zksync_types::{ ethabi::Contract, web3::{BlockId, BlockNumber, FilterBuilder, Log}, - Address, H256, + Address, SLChainId, H256, U256, }; /// L1 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. @@ -21,10 +23,14 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { &self, from: BlockNumber, to: BlockNumber, + topic1: H256, + topic2: Option, retries_left: usize, ) -> EnrichedClientResult>; /// Returns finalized L1 block number. async fn finalized_block_number(&self) -> EnrichedClientResult; + + async fn get_total_priority_txs(&self) -> Result; /// Returns scheduler verification key hash by verifier address. async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; @@ -33,8 +39,8 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { &self, packed_version: H256, ) -> EnrichedClientResult>>; - /// Sets list of topics to return events for. - fn set_topics(&mut self, topics: Vec); + + async fn chain_id(&self) -> EnrichedClientResult; } pub const RETRY_LIMIT: usize = 5; @@ -43,10 +49,9 @@ const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthHttpQueryClient { client: Box>, - topics: Vec, diamond_proxy_addr: Address, governance_address: Address, new_upgrade_cut_data_signature: H256, @@ -54,6 +59,7 @@ pub struct EthHttpQueryClient { state_transition_manager_address: Option
, chain_admin_address: Option
, verifier_contract_abi: Contract, + getters_facet_contract_abi: Contract, confirmations_for_eth_event: Option, } @@ -73,7 +79,6 @@ impl EthHttpQueryClient { ); Self { client: client.for_component("watch"), - topics: Vec::new(), diamond_proxy_addr, state_transition_manager_address, chain_admin_address, @@ -84,6 +89,7 @@ impl EthHttpQueryClient { .unwrap() .signature(), verifier_contract_abi: verifier_contract(), + getters_facet_contract_abi: getters_facet_contract(), confirmations_for_eth_event, } } @@ -249,13 +255,15 @@ impl EthClient for EthHttpQueryClient { &self, from: BlockNumber, to: BlockNumber, + topic1: H256, + topic2: Option, retries_left: usize, ) -> EnrichedClientResult> { self.get_events_inner( from, to, - Some(self.topics.clone()), - None, + Some(vec![topic1]), + topic2.map(|topic2| vec![topic2]), Some(self.get_default_address_list()), retries_left, ) @@ -283,7 +291,15 @@ impl EthClient for EthHttpQueryClient { } } - fn set_topics(&mut self, topics: Vec) { - self.topics = topics; + async fn get_total_priority_txs(&self) -> Result { + CallFunctionArgs::new("getTotalPriorityTxs", ()) + .for_contract(self.diamond_proxy_addr, &self.getters_facet_contract_abi) + .call(&self.client) + .await + .map(|x: U256| x.try_into().unwrap()) + } + + async fn chain_id(&self) -> EnrichedClientResult { + Ok(self.client.fetch_chain_id().await?) } } diff --git a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs index dff10662e98..aa43e7239f8 100644 --- a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs +++ b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_types::{ ethabi::Contract, protocol_version::ProtocolSemanticVersion, web3::Log, ProtocolUpgrade, H256, U256, @@ -7,7 +7,7 @@ use zksync_types::{ use crate::{ client::EthClient, - event_processors::{EventProcessor, EventProcessorError}, + event_processors::{EventProcessor, EventProcessorError, EventsSource}, metrics::{PollStage, METRICS}, }; @@ -40,18 +40,18 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - client: &dyn EthClient, + sl_client: &dyn EthClient, events: Vec, - ) -> Result<(), EventProcessorError> { + ) -> Result { let mut upgrades = Vec::new(); - for event in events { + for event in &events { let version = event.topics.get(1).copied().context("missing topic 1")?; let timestamp: u64 = U256::from_big_endian(&event.data.0) .try_into() .ok() .context("upgrade timestamp is too big")?; - let diamond_cut = client + let diamond_cut = sl_client .diamond_cut_by_version(version) .await? .context("missing upgrade data on STM")?; @@ -62,7 +62,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { }; // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(client.scheduler_vk_hash(address).await?) + Some(sl_client.scheduler_vk_hash(address).await?) } else { None }; @@ -75,7 +75,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { .collect(); let Some((last_upgrade, _)) = new_upgrades.last() else { - return Ok(()); + return Ok(events.len()); }; let versions: Vec<_> = new_upgrades .iter() @@ -125,10 +125,18 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { stage_latency.observe(); self.last_seen_protocol_version = last_version; - Ok(()) + Ok(events.len()) } fn relevant_topic(&self) -> H256 { self.update_upgrade_timestamp_signature } + + fn event_source(&self) -> EventsSource { + EventsSource::SL + } + + fn event_type(&self) -> EventType { + EventType::ProtocolUpgrades + } } diff --git a/core/node/eth_watch/src/event_processors/governance_upgrades.rs b/core/node/eth_watch/src/event_processors/governance_upgrades.rs deleted file mode 100644 index 72f5c411892..00000000000 --- a/core/node/eth_watch/src/event_processors/governance_upgrades.rs +++ /dev/null @@ -1,142 +0,0 @@ -use anyhow::Context as _; -use zksync_dal::{Connection, Core, CoreDal, DalError}; -use zksync_types::{ - ethabi::Contract, protocol_upgrade::GovernanceOperation, - protocol_version::ProtocolSemanticVersion, web3::Log, Address, ProtocolUpgrade, H256, -}; - -use crate::{ - client::EthClient, - event_processors::{EventProcessor, EventProcessorError}, - metrics::{PollStage, METRICS}, -}; - -/// Listens to operation events coming from the governance contract and saves new protocol upgrade proposals to the database. -#[derive(Debug)] -pub struct GovernanceUpgradesEventProcessor { - // ZKsync diamond proxy - target_contract_address: Address, - /// Last protocol version seen. Used to skip events for already known upgrade proposals. - last_seen_protocol_version: ProtocolSemanticVersion, - upgrade_proposal_signature: H256, -} - -impl GovernanceUpgradesEventProcessor { - pub fn new( - target_contract_address: Address, - last_seen_protocol_version: ProtocolSemanticVersion, - governance_contract: &Contract, - ) -> Self { - Self { - target_contract_address, - last_seen_protocol_version, - upgrade_proposal_signature: governance_contract - .event("TransparentOperationScheduled") - .context("TransparentOperationScheduled event is missing in ABI") - .unwrap() - .signature(), - } - } -} - -#[async_trait::async_trait] -impl EventProcessor for GovernanceUpgradesEventProcessor { - async fn process_events( - &mut self, - storage: &mut Connection<'_, Core>, - client: &dyn EthClient, - events: Vec, - ) -> Result<(), EventProcessorError> { - let mut upgrades = Vec::new(); - for event in events { - assert_eq!(event.topics[0], self.upgrade_proposal_signature); // guaranteed by the watcher - - let governance_operation = GovernanceOperation::try_from(event) - .map_err(|err| EventProcessorError::log_parse(err, "governance operation"))?; - // Some calls can target other contracts than Diamond proxy, skip them. - for call in governance_operation - .calls - .into_iter() - .filter(|call| call.target == self.target_contract_address) - { - // We might not get an upgrade operation here, but something else instead - // (e.g. `acceptGovernor` call), so if parsing doesn't work, just skip the call. - let Ok(upgrade) = ProtocolUpgrade::try_from(call) else { - tracing::warn!( - "Failed to parse governance operation call as protocol upgrade, skipping" - ); - continue; - }; - // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. - let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(client.scheduler_vk_hash(address).await?) - } else { - None - }; - upgrades.push((upgrade, scheduler_vk_hash)); - } - } - - let new_upgrades: Vec<_> = upgrades - .into_iter() - .skip_while(|(v, _)| v.version <= self.last_seen_protocol_version) - .collect(); - - let Some((last_upgrade, _)) = new_upgrades.last() else { - return Ok(()); - }; - let versions: Vec<_> = new_upgrades - .iter() - .map(|(u, _)| u.version.to_string()) - .collect(); - tracing::debug!("Received upgrades with versions: {versions:?}"); - - let last_version = last_upgrade.version; - let stage_latency = METRICS.poll_eth_node[&PollStage::PersistUpgrades].start(); - for (upgrade, scheduler_vk_hash) in new_upgrades { - let latest_semantic_version = storage - .protocol_versions_dal() - .latest_semantic_version() - .await - .map_err(DalError::generalize)? - .context("expected some version to be present in DB")?; - - if upgrade.version > latest_semantic_version { - let latest_version = storage - .protocol_versions_dal() - .get_protocol_version_with_latest_patch(latest_semantic_version.minor) - .await - .map_err(DalError::generalize)? - .with_context(|| { - format!( - "expected minor version {} to be present in DB", - latest_semantic_version.minor as u16 - ) - })?; - - let new_version = latest_version.apply_upgrade(upgrade, scheduler_vk_hash); - if new_version.version.minor == latest_semantic_version.minor { - // Only verification parameters may change if only patch is bumped. - assert_eq!( - new_version.base_system_contracts_hashes, - latest_version.base_system_contracts_hashes - ); - assert!(new_version.tx.is_none()); - } - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(&new_version) - .await - .map_err(DalError::generalize)?; - } - } - stage_latency.observe(); - - self.last_seen_protocol_version = last_version; - Ok(()) - } - - fn relevant_topic(&self) -> H256 { - self.upgrade_proposal_signature - } -} diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index 43ae259305a..f145181b0cf 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -1,18 +1,17 @@ use std::fmt; -use zksync_dal::{Connection, Core}; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core}; use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::{web3::Log, H256}; pub(crate) use self::{ decentralized_upgrades::DecentralizedUpgradesEventProcessor, - governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, + priority_ops::PriorityOpsEventProcessor, }; use crate::client::EthClient; mod decentralized_upgrades; -mod governance_upgrades; -mod priority_ops; +pub mod priority_ops; /// Errors issued by an [`EventProcessor`]. #[derive(Debug, thiserror::Error)] @@ -32,6 +31,12 @@ pub(super) enum EventProcessorError { Internal(#[from] anyhow::Error), } +#[derive(Debug)] +pub(super) enum EventsSource { + L1, + SL, +} + impl EventProcessorError { pub fn log_parse(source: impl Into, log_kind: &'static str) -> Self { Self::LogParse { @@ -46,13 +51,18 @@ impl EventProcessorError { #[async_trait::async_trait] pub(super) trait EventProcessor: 'static + fmt::Debug + Send + Sync { /// Processes given events. All events are guaranteed to match [`Self::relevant_topic()`]. + /// Returns number of processed events, this result is used to update last processed block. async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - client: &dyn EthClient, + sl_client: &dyn EthClient, events: Vec, - ) -> Result<(), EventProcessorError>; + ) -> Result; /// Relevant topic which defines what events to be processed fn relevant_topic(&self) -> H256; + + fn event_source(&self) -> EventsSource; + + fn event_type(&self) -> EventType; } diff --git a/core/node/eth_watch/src/event_processors/priority_ops.rs b/core/node/eth_watch/src/event_processors/priority_ops.rs index 2783637fdb9..051c076850e 100644 --- a/core/node/eth_watch/src/event_processors/priority_ops.rs +++ b/core/node/eth_watch/src/event_processors/priority_ops.rs @@ -2,13 +2,13 @@ use std::convert::TryFrom; use anyhow::Context; use zksync_contracts::hyperchain_contract; -use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{l1::L1Tx, web3::Log, PriorityOpId, H256}; use crate::{ client::EthClient, - event_processors::{EventProcessor, EventProcessorError}, + event_processors::{EventProcessor, EventProcessorError, EventsSource}, metrics::{PollStage, METRICS}, }; @@ -36,10 +36,11 @@ impl EventProcessor for PriorityOpsEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - _client: &dyn EthClient, + sl_client: &dyn EthClient, events: Vec, - ) -> Result<(), EventProcessorError> { + ) -> Result { let mut priority_ops = Vec::new(); + let events_count = events.len(); for event in events { assert_eq!(event.topics[0], self.new_priority_request_signature); // guaranteed by the watcher let tx = L1Tx::try_from(event) @@ -48,7 +49,7 @@ impl EventProcessor for PriorityOpsEventProcessor { } if priority_ops.is_empty() { - return Ok(()); + return Ok(events_count); } let first = &priority_ops[0]; @@ -70,33 +71,49 @@ impl EventProcessor for PriorityOpsEventProcessor { .into_iter() .skip_while(|tx| tx.serial_id() < self.next_expected_priority_id) .collect(); - let (Some(first_new), Some(last_new)) = (new_ops.first(), new_ops.last()) else { - return Ok(()); + let skipped_ops = events_count - new_ops.len(); + let Some(first_new) = new_ops.first() else { + return Ok(events_count); }; assert_eq!( first_new.serial_id(), self.next_expected_priority_id, "priority transaction serial id mismatch" ); - let next_expected_priority_id = last_new.serial_id().next(); let stage_latency = METRICS.poll_eth_node[&PollStage::PersistL1Txs].start(); APP_METRICS.processed_txs[&TxStage::added_to_mempool()].inc(); APP_METRICS.processed_l1_txs[&TxStage::added_to_mempool()].inc(); - for new_op in new_ops { - let eth_block = new_op.eth_block(); + let processed_priority_transactions = sl_client.get_total_priority_txs().await?; + let ops_to_insert: Vec<&L1Tx> = new_ops + .iter() + .take_while(|op| processed_priority_transactions > op.serial_id().0) + .collect(); + + for new_op in &ops_to_insert { storage .transactions_dal() - .insert_transaction_l1(&new_op, eth_block) + .insert_transaction_l1(new_op, new_op.eth_block()) .await .map_err(DalError::generalize)?; } stage_latency.observe(); - self.next_expected_priority_id = next_expected_priority_id; - Ok(()) + if let Some(last_op) = ops_to_insert.last() { + self.next_expected_priority_id = last_op.serial_id().next(); + } + + Ok(skipped_ops + ops_to_insert.len()) } fn relevant_topic(&self) -> H256 { self.new_priority_request_signature } + + fn event_source(&self) -> EventsSource { + EventsSource::L1 + } + + fn event_type(&self) -> EventType { + EventType::PriorityTransactions + } } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index e964d63bb19..537468bb6e4 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -6,23 +6,20 @@ use std::time::Duration; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ ethabi::Contract, protocol_version::ProtocolSemanticVersion, - web3::BlockNumber as Web3BlockNumber, Address, PriorityOpId, + web3::BlockNumber as Web3BlockNumber, PriorityOpId, }; pub use self::client::EthHttpQueryClient; use self::{ client::{EthClient, RETRY_LIMIT}, - event_processors::{ - EventProcessor, EventProcessorError, GovernanceUpgradesEventProcessor, - PriorityOpsEventProcessor, - }, - metrics::{PollStage, METRICS}, + event_processors::{EventProcessor, EventProcessorError, PriorityOpsEventProcessor}, + metrics::METRICS, }; -use crate::event_processors::DecentralizedUpgradesEventProcessor; +use crate::event_processors::{DecentralizedUpgradesEventProcessor, EventsSource}; mod client; mod event_processors; @@ -34,70 +31,53 @@ mod tests; struct EthWatchState { last_seen_protocol_version: ProtocolSemanticVersion, next_expected_priority_id: PriorityOpId, - last_processed_ethereum_block: u64, } /// Ethereum watcher component. #[derive(Debug)] pub struct EthWatch { - client: Box, + l1_client: Box, + sl_client: Box, poll_interval: Duration, event_processors: Vec>, - last_processed_ethereum_block: u64, pool: ConnectionPool, } impl EthWatch { pub async fn new( - diamond_proxy_addr: Address, - governance_contract: &Contract, chain_admin_contract: &Contract, - mut client: Box, + l1_client: Box, + sl_client: Box, pool: ConnectionPool, poll_interval: Duration, ) -> anyhow::Result { let mut storage = pool.connection_tagged("eth_watch").await?; - let state = Self::initialize_state(&*client, &mut storage).await?; + let state = Self::initialize_state(&mut storage).await?; tracing::info!("initialized state: {state:?}"); drop(storage); let priority_ops_processor = PriorityOpsEventProcessor::new(state.next_expected_priority_id)?; - let governance_upgrades_processor = GovernanceUpgradesEventProcessor::new( - diamond_proxy_addr, - state.last_seen_protocol_version, - governance_contract, - ); let decentralized_upgrades_processor = DecentralizedUpgradesEventProcessor::new( state.last_seen_protocol_version, chain_admin_contract, ); let event_processors: Vec> = vec![ Box::new(priority_ops_processor), - Box::new(governance_upgrades_processor), Box::new(decentralized_upgrades_processor), ]; - let topics = event_processors - .iter() - .map(|processor| processor.relevant_topic()) - .collect(); - client.set_topics(topics); - Ok(Self { - client, + l1_client, + sl_client, poll_interval, event_processors, - last_processed_ethereum_block: state.last_processed_ethereum_block, pool, }) } #[tracing::instrument(name = "EthWatch::initialize_state", skip_all)] - async fn initialize_state( - client: &dyn EthClient, - storage: &mut Connection<'_, Core>, - ) -> anyhow::Result { + async fn initialize_state(storage: &mut Connection<'_, Core>) -> anyhow::Result { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() .last_priority_id() @@ -110,26 +90,9 @@ impl EthWatch { .await? .context("expected at least one (genesis) version to be present in DB")?; - let last_processed_ethereum_block = match storage - .transactions_dal() - .get_last_processed_l1_block() - .await? - { - // There are some priority ops processed - start from the last processed eth block - // but subtract 1 in case the server stopped mid-block. - Some(block) => block.0.saturating_sub(1).into(), - // There are no priority ops processed - to be safe, scan the last 50k blocks. - None => client - .finalized_block_number() - .await - .context("cannot get current Ethereum block")? - .saturating_sub(PRIORITY_EXPIRATION), - }; - Ok(EthWatchState { next_expected_priority_id, last_seen_protocol_version, - last_processed_ethereum_block, }) } @@ -155,10 +118,6 @@ impl EthWatch { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. tracing::error!("Failed to process new blocks: {err}"); - self.last_processed_ethereum_block = - Self::initialize_state(&*self.client, &mut storage) - .await? - .last_processed_ethereum_block; } } } @@ -172,34 +131,60 @@ impl EthWatch { &mut self, storage: &mut Connection<'_, Core>, ) -> Result<(), EventProcessorError> { - let stage_latency = METRICS.poll_eth_node[&PollStage::Request].start(); - let to_block = self.client.finalized_block_number().await?; - if to_block <= self.last_processed_ethereum_block { - return Ok(()); - } - - let events = self - .client - .get_events( - Web3BlockNumber::Number(self.last_processed_ethereum_block.into()), - Web3BlockNumber::Number(to_block.into()), - RETRY_LIMIT, - ) - .await?; - stage_latency.observe(); - for processor in &mut self.event_processors { - let relevant_topic = processor.relevant_topic(); - let processor_events = events - .iter() - .filter(|event| event.topics.first() == Some(&relevant_topic)) - .cloned() - .collect(); - processor - .process_events(storage, &*self.client, processor_events) + let client = match processor.event_source() { + EventsSource::L1 => self.l1_client.as_ref(), + EventsSource::SL => self.sl_client.as_ref(), + }; + let chain_id = client.chain_id().await?; + let finalized_block = client.finalized_block_number().await?; + + let from_block = storage + .processed_events_dal() + .get_or_set_next_block_to_process( + processor.event_type(), + chain_id, + finalized_block.saturating_sub(PRIORITY_EXPIRATION), + ) + .await + .map_err(DalError::generalize)?; + + let processor_events = client + .get_events( + Web3BlockNumber::Number(from_block.into()), + Web3BlockNumber::Number(finalized_block.into()), + processor.relevant_topic(), + None, + RETRY_LIMIT, + ) .await?; + let processed_events_count = processor + .process_events(storage, &*self.sl_client, processor_events.clone()) + .await?; + + let next_block_to_process = if processed_events_count == processor_events.len() { + finalized_block + 1 + } else if processed_events_count == 0 { + //nothing was processed + from_block + } else { + processor_events[processed_events_count - 1] + .block_number + .expect("Event block number is missing") + .try_into() + .unwrap() + }; + + storage + .processed_events_dal() + .update_next_block_to_process( + processor.event_type(), + chain_id, + next_block_to_process, + ) + .await + .map_err(DalError::generalize)?; } - self.last_processed_ethereum_block = to_block; Ok(()) } } diff --git a/core/node/eth_watch/src/metrics.rs b/core/node/eth_watch/src/metrics.rs index a3684cc6e72..a942d4a6e61 100644 --- a/core/node/eth_watch/src/metrics.rs +++ b/core/node/eth_watch/src/metrics.rs @@ -7,7 +7,6 @@ use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum PollStage { - Request, PersistL1Txs, PersistUpgrades, } diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index e6e343f50bc..8c37b5c9920 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -1,37 +1,48 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; -use zksync_contracts::{chain_admin_contract, governance_contract, hyperchain_contract}; +use zksync_contracts::{ + chain_admin_contract, hyperchain_contract, state_transition_manager_contract, +}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ContractCallError, EnrichedClientResult}; use zksync_types::{ - abi, ethabi, - ethabi::{Hash, Token}, + abi, + abi::ProposedUpgrade, + ethabi, + ethabi::Token, l1::{L1Tx, OpProcessingType, PriorityQueueType}, protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, protocol_version::ProtocolSemanticVersion, - web3::{BlockNumber, Log}, + web3::{contract::Tokenizable, BlockNumber, Log}, Address, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, - ProtocolVersionId, Transaction, H256, U256, + ProtocolVersionId, SLChainId, Transaction, H160, H256, U256, U64, }; -use crate::{client::EthClient, EthWatch}; +use crate::{ + client::{EthClient, RETRY_LIMIT}, + EthWatch, +}; #[derive(Debug)] struct FakeEthClientData { transactions: HashMap>, diamond_upgrades: HashMap>, - governance_upgrades: HashMap>, + upgrade_timestamp: HashMap>, last_finalized_block_number: u64, + chain_id: SLChainId, + processed_priority_transactions_count: u64, } impl FakeEthClientData { - fn new() -> Self { + fn new(chain_id: SLChainId) -> Self { Self { transactions: Default::default(), diamond_upgrades: Default::default(), - governance_upgrades: Default::default(), + upgrade_timestamp: Default::default(), last_finalized_block_number: 0, + chain_id, + processed_priority_transactions_count: 0, } } @@ -42,21 +53,30 @@ impl FakeEthClientData { .entry(eth_block.0 as u64) .or_default() .push(tx_into_log(transaction.clone())); + self.processed_priority_transactions_count += 1; } } - fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { for (upgrade, eth_block) in upgrades { - self.governance_upgrades + self.upgrade_timestamp + .entry(*eth_block) + .or_default() + .push(upgrade_timestamp_log(*eth_block)); + self.diamond_upgrades .entry(*eth_block) .or_default() - .push(upgrade_into_governor_log(upgrade.clone(), *eth_block)); + .push(diamond_upgrade_log(upgrade.clone(), *eth_block)); } } fn set_last_finalized_block_number(&mut self, number: u64) { self.last_finalized_block_number = number; } + + fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.processed_priority_transactions_count = number; + } } #[derive(Debug, Clone)] @@ -65,9 +85,9 @@ struct MockEthClient { } impl MockEthClient { - fn new() -> Self { + fn new(chain_id: SLChainId) -> Self { Self { - inner: Arc::new(RwLock::new(FakeEthClientData::new())), + inner: Arc::new(RwLock::new(FakeEthClientData::new(chain_id))), } } @@ -75,8 +95,8 @@ impl MockEthClient { self.inner.write().await.add_transactions(transactions); } - async fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - self.inner.write().await.add_governance_upgrades(upgrades); + async fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_upgrade_timestamp(upgrades); } async fn set_last_finalized_block_number(&mut self, number: u64) { @@ -86,6 +106,13 @@ impl MockEthClient { .set_last_finalized_block_number(number); } + async fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.inner + .write() + .await + .set_processed_priority_transactions_count(number) + } + async fn block_to_number(&self, block: BlockNumber) -> u64 { match block { BlockNumber::Earliest => 0, @@ -104,6 +131,8 @@ impl EthClient for MockEthClient { &self, from: BlockNumber, to: BlockNumber, + topic1: H256, + topic2: Option, _retries_left: usize, ) -> EnrichedClientResult> { let from = self.block_to_number(from).await; @@ -116,15 +145,19 @@ impl EthClient for MockEthClient { if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { logs.extend_from_slice(ops); } - if let Some(ops) = self.inner.read().await.governance_upgrades.get(&number) { + if let Some(ops) = self.inner.read().await.upgrade_timestamp.get(&number) { logs.extend_from_slice(ops); } } - Ok(logs) + Ok(logs + .into_iter() + .filter(|log| { + log.topics.get(0) == Some(&topic1) + && (topic2.is_none() || log.topics.get(1) == topic2.as_ref()) + }) + .collect()) } - fn set_topics(&mut self, _topics: Vec) {} - async fn scheduler_vk_hash( &self, _verifier_address: Address, @@ -138,9 +171,51 @@ impl EthClient for MockEthClient { async fn diamond_cut_by_version( &self, - _packed_version: H256, + packed_version: H256, ) -> EnrichedClientResult>> { - unimplemented!() + let from_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .min() + .unwrap_or(&0); + let to_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .max() + .unwrap_or(&0); + + let logs = self + .get_events( + U64::from(from_block).into(), + U64::from(to_block).into(), + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + Some(packed_version), + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_total_priority_txs(&self) -> Result { + Ok(self + .inner + .read() + .await + .processed_priority_transactions_count) + } + + async fn chain_id(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.chain_id) } } @@ -203,27 +278,47 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx .unwrap() } -async fn create_test_watcher(connection_pool: ConnectionPool) -> (EthWatch, MockEthClient) { - let client = MockEthClient::new(); +async fn create_test_watcher( + connection_pool: ConnectionPool, + is_gateway: bool, +) -> (EthWatch, MockEthClient, MockEthClient) { + let l1_client = MockEthClient::new(SLChainId(42)); + let sl_client = if is_gateway { + MockEthClient::new(SLChainId(123)) + } else { + l1_client.clone() + }; let watcher = EthWatch::new( - Address::default(), - &governance_contract(), &chain_admin_contract(), - Box::new(client.clone()), + Box::new(l1_client.clone()), + Box::new(sl_client.clone()), connection_pool, std::time::Duration::from_nanos(1), ) .await .unwrap(); - (watcher, client) + (watcher, l1_client, sl_client) } -#[tokio::test] +async fn create_l1_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient) { + let (watcher, l1_client, _) = create_test_watcher(connection_pool, false).await; + (watcher, l1_client) +} + +async fn create_gateway_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient, MockEthClient) { + create_test_watcher(connection_pool, true).await +} + +#[test_log::test(tokio::test)] async fn test_normal_operation_l1_txs() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -258,15 +353,15 @@ async fn test_normal_operation_l1_txs() { assert_eq!(db_tx.common_data.serial_id.0, 2); } -#[tokio::test] -async fn test_gap_in_governance_upgrades() { +#[test_log::test(tokio::test)] +async fn test_gap_in_upgrade_timestamp() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client - .add_governance_upgrades(&[( + .add_upgrade_timestamp(&[( ProtocolUpgrade { version: ProtocolSemanticVersion { minor: ProtocolVersionId::next(), @@ -291,18 +386,17 @@ async fn test_gap_in_governance_upgrades() { assert_eq!(db_versions[1].minor, next_version); } -#[tokio::test] -async fn test_normal_operation_governance_upgrades() { +#[test_log::test(tokio::test)] +async fn test_normal_operation_upgrade_timestamp() { zksync_concurrency::testonly::abort_on_panic(); let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let mut client = MockEthClient::new(); + let mut client = MockEthClient::new(SLChainId(42)); let mut watcher = EthWatch::new( - Address::default(), - &governance_contract(), &chain_admin_contract(), Box::new(client.clone()), + Box::new(client.clone()), connection_pool.clone(), std::time::Duration::from_nanos(1), ) @@ -311,7 +405,7 @@ async fn test_normal_operation_governance_upgrades() { let mut storage = connection_pool.connection().await.unwrap(); client - .add_governance_upgrades(&[ + .add_upgrade_timestamp(&[ ( ProtocolUpgrade { tx: None, @@ -375,12 +469,12 @@ async fn test_normal_operation_governance_upgrades() { assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); } -#[tokio::test] +#[test_log::test(tokio::test)] #[should_panic] async fn test_gap_in_single_batch() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -396,12 +490,12 @@ async fn test_gap_in_single_batch() { watcher.loop_iteration(&mut storage).await.unwrap(); } -#[tokio::test] +#[test_log::test(tokio::test)] #[should_panic] async fn test_gap_between_batches() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -424,12 +518,12 @@ async fn test_gap_between_batches() { watcher.loop_iteration(&mut storage).await.unwrap(); } -#[tokio::test] +#[test_log::test(tokio::test)] async fn test_overlapping_batches() { zksync_concurrency::testonly::abort_on_panic(); let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -467,6 +561,52 @@ async fn test_overlapping_batches() { assert_eq!(tx.common_data.serial_id.0, 4); } +#[test_log::test(tokio::test)] +async fn test_transactions_get_gradually_processed_by_gateway() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut l1_client, mut gateway_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + l1_client + .add_transactions(&[ + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 20), + build_l1_tx(4, 22), + ]) + .await; + l1_client.set_last_finalized_block_number(15).await; + gateway_client + .set_processed_priority_transactions_count(2) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 2); + + l1_client.set_last_finalized_block_number(25).await; + gateway_client + .set_processed_priority_transactions_count(4) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 4); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); + assert_eq!(tx.common_data.serial_id.0, 2); + let tx = db_txs[3].clone(); + assert_eq!(tx.common_data.serial_id.0, 3); +} + async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { storage.transactions_dal().reset_mempool().await.unwrap(); storage @@ -518,37 +658,69 @@ fn tx_into_log(tx: L1Tx) -> Log { } } -fn upgrade_into_governor_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { - let diamond_cut = upgrade_into_diamond_cut(upgrade); +fn init_calldata(protocol_upgrade: ProtocolUpgrade) -> Vec { + let upgrade_token = upgrade_into_diamond_cut(protocol_upgrade); + + let encoded_params = ethabi::encode(&[upgrade_token]); + let execute_upgrade_selector = hyperchain_contract() .function("executeUpgrade") .unwrap() .short_signature(); - let diamond_upgrade_calldata = execute_upgrade_selector - .iter() - .copied() - .chain(ethabi::encode(&[diamond_cut])) - .collect(); - let governance_call = Token::Tuple(vec![ - Token::Address(Default::default()), - Token::Uint(U256::default()), - Token::Bytes(diamond_upgrade_calldata), - ]); - let governance_operation = Token::Tuple(vec![ - Token::Array(vec![governance_call]), - Token::FixedBytes(vec![0u8; 32]), - Token::FixedBytes(vec![0u8; 32]), - ]); - let final_data = ethabi::encode(&[Token::FixedBytes(vec![0u8; 32]), governance_operation]); + + // Concatenate the function selector with the encoded parameters + let mut calldata = Vec::with_capacity(4 + encoded_params.len()); + calldata.extend_from_slice(&execute_upgrade_selector); + calldata.extend_from_slice(&encoded_params); + + calldata +} + +fn diamond_upgrade_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + // struct DiamondCutData { + // FacetCut[] facetCuts; + // address initAddress; + // bytes initCalldata; + // } + let final_data = ethabi::encode(&[Token::Tuple(vec![ + Token::Array(vec![]), + Token::Address(H160::zero()), + Token::Bytes(init_calldata(upgrade.clone())), + ])]); + tracing::info!("{:?}", Token::Bytes(init_calldata(upgrade))); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + H256::from_low_u64_be(eth_block), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} +fn upgrade_timestamp_log(eth_block: u64) -> Log { + let final_data = ethabi::encode(&[U256::from(12345).into_token()]); Log { address: Address::repeat_byte(0x1), topics: vec![ - governance_contract() - .event("TransparentOperationScheduled") - .expect("TransparentOperationScheduled event is missing in abi") + chain_admin_contract() + .event("UpdateUpgradeTimestamp") + .expect("UpdateUpgradeTimestamp event is missing in ABI") .signature(), - Default::default(), + H256::from_low_u64_be(eth_block), ], data: final_data.into(), block_hash: Some(H256::repeat_byte(0x11)), @@ -577,7 +749,7 @@ fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { else { unreachable!() }; - let upgrade_token = abi::ProposedUpgrade { + ProposedUpgrade { l2_protocol_upgrade_tx: tx, factory_deps, bootloader_hash: upgrade.bootloader_code_hash.unwrap_or_default().into(), @@ -589,17 +761,7 @@ fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { upgrade_timestamp: upgrade.timestamp.into(), new_protocol_version: upgrade.version.pack(), } - .encode(); - Token::Tuple(vec![ - Token::Array(vec![]), - Token::Address(Default::default()), - Token::Bytes( - vec![0u8; 4] - .into_iter() - .chain(ethabi::encode(&[upgrade_token])) - .collect(), - ), - ]) + .encode() } async fn setup_db(connection_pool: &ConnectionPool) { diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 53eeb1c5280..e19828d85cc 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,5 +1,5 @@ use zksync_config::{ContractsConfig, EthWatchConfig}; -use zksync_contracts::{chain_admin_contract, governance_contract}; +use zksync_contracts::chain_admin_contract; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; use crate::{ @@ -71,9 +71,8 @@ impl WiringLayer for EthWatchLayer { ); let eth_watch = EthWatch::new( - self.contracts_config.diamond_proxy_addr, - &governance_contract(), &chain_admin_contract(), + Box::new(eth_client.clone()), Box::new(eth_client), main_pool, self.eth_watch_config.poll_interval(), From df6876221936a44fa2fb8c80c01d043d229621fc Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 26 Sep 2024 11:01:50 +0300 Subject: [PATCH 12/16] feat(prover): WG refactoring #3 (#2942) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Create WG struct with generic round param. Implement generic JobProcessor for all types of WG. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../bin/witness_generator/src/artifacts.rs | 10 +- .../src/basic_circuits/job_processor.rs | 144 --------- .../src/leaf_aggregation/job_processor.rs | 135 --------- .../src/leaf_aggregation/mod.rs | 277 ------------------ .../crates/bin/witness_generator/src/lib.rs | 7 +- .../crates/bin/witness_generator/src/main.rs | 70 +++-- .../src/node_aggregation/job_processor.rs | 126 -------- .../src/recursion_tip/job_processor.rs | 136 --------- .../{ => rounds}/basic_circuits/artifacts.rs | 19 +- .../src/rounds/basic_circuits/mod.rs | 136 +++++++++ .../mod.rs => rounds/basic_circuits/utils.rs} | 149 +--------- .../leaf_aggregation/artifacts.rs | 6 +- .../src/rounds/leaf_aggregation/mod.rs | 250 ++++++++++++++++ .../bin/witness_generator/src/rounds/mod.rs | 195 ++++++++++++ .../node_aggregation/artifacts.rs | 6 +- .../src/{ => rounds}/node_aggregation/mod.rs | 67 ++--- .../{ => rounds}/recursion_tip/artifacts.rs | 6 +- .../src/{ => rounds}/recursion_tip/mod.rs | 90 +++--- .../src/{ => rounds}/scheduler/artifacts.rs | 6 +- .../src/{ => rounds}/scheduler/mod.rs | 83 +++--- .../src/scheduler/job_processor.rs | 132 --------- .../src/witness_generator.rs | 25 -- .../bin/witness_generator/tests/basic_test.rs | 51 +--- ...2254a457665179d9cf0a3c0b18c3fe09e4838.json | 15 - ...d94f28b7b2b60d551d552a9b0bab1f1791e39.json | 22 -- ...7a1a04821495487a80595cc9b523dac6ac8e9.json | 15 - ...d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json | 15 - ...7ac83cd32a628d3e01e5cd1949c519683a352.json | 22 -- ...7227120a8279db1875d26ccae5ee0785f46a9.json | 22 -- ...7b56187686173327498ac75424593547c19c5.json | 22 -- ...49b6370c211a7fc24ad03a5f0e327f9d18040.json | 22 -- ...5263556f258565f79cbb40f5ecc1a4f6402f5.json | 15 - ...8b02c44b099e27e3c45c5c810cd5fcd8884ed.json | 15 - ...1bb675a86ae14a863fa31eb513af65d606ed.json} | 5 +- .../src/fri_witness_generator_dal.rs | 256 ++++------------ 35 files changed, 863 insertions(+), 1709 deletions(-) delete mode 100644 prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs delete mode 100644 prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs delete mode 100644 prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs delete mode 100644 prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs delete mode 100644 prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs rename prover/crates/bin/witness_generator/src/{ => rounds}/basic_circuits/artifacts.rs (81%) create mode 100644 prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs rename prover/crates/bin/witness_generator/src/{basic_circuits/mod.rs => rounds/basic_circuits/utils.rs} (74%) rename prover/crates/bin/witness_generator/src/{ => rounds}/leaf_aggregation/artifacts.rs (95%) create mode 100644 prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs create mode 100644 prover/crates/bin/witness_generator/src/rounds/mod.rs rename prover/crates/bin/witness_generator/src/{ => rounds}/node_aggregation/artifacts.rs (95%) rename prover/crates/bin/witness_generator/src/{ => rounds}/node_aggregation/mod.rs (87%) rename prover/crates/bin/witness_generator/src/{ => rounds}/recursion_tip/artifacts.rs (95%) rename prover/crates/bin/witness_generator/src/{ => rounds}/recursion_tip/mod.rs (80%) rename prover/crates/bin/witness_generator/src/{ => rounds}/scheduler/artifacts.rs (92%) rename prover/crates/bin/witness_generator/src/{ => rounds}/scheduler/mod.rs (79%) delete mode 100644 prover/crates/bin/witness_generator/src/scheduler/job_processor.rs delete mode 100644 prover/crates/bin/witness_generator/src/witness_generator.rs delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json rename prover/crates/lib/prover_dal/.sqlx/{query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json => query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json} (54%) diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs index 7c444da047b..0c6044692dd 100644 --- a/prover/crates/bin/witness_generator/src/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -1,20 +1,20 @@ -use std::time::Instant; +use std::{sync::Arc, time::Instant}; use async_trait::async_trait; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover}; #[derive(Debug)] -pub(crate) struct AggregationBlobUrls { +pub struct AggregationBlobUrls { pub aggregation_urls: String, pub circuit_ids_and_urls: Vec<(u8, String)>, } #[async_trait] -pub(crate) trait ArtifactsManager { +pub trait ArtifactsManager { type InputMetadata; type InputArtifacts; - type OutputArtifacts; + type OutputArtifacts: Send + Clone + 'static; type BlobUrls; async fn get_artifacts( @@ -26,6 +26,8 @@ pub(crate) trait ArtifactsManager { job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, + shall_save_to_public_bucket: bool, + public_blob_store: Option>, ) -> Self::BlobUrls; async fn save_to_database( diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs deleted file mode 100644 index 50e747b1ce1..00000000000 --- a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use anyhow::Context as _; -use tracing::Instrument; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; -use zksync_prover_keystore::keystore::Keystore; -use zksync_queued_job_processor::{async_trait, JobProcessor}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::{ - artifacts::ArtifactsManager, - basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, - metrics::WITNESS_GENERATOR_METRICS, - witness_generator::WitnessGenerator, -}; - -#[async_trait] -impl JobProcessor for BasicWitnessGenerator { - type Job = BasicWitnessGeneratorJob; - type JobId = L1BatchNumber; - // The artifact is optional to support skipping blocks when sampling is enabled. - type JobArtifacts = Option; - - const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - let pod_name = get_current_pod_name(); - match prover_connection - .fri_witness_generator_dal() - .get_next_basic_circuit_witness_job( - last_l1_batch_to_process, - self.protocol_version, - &pod_name, - ) - .await - { - Some(block_number) => Ok(Some(( - block_number, - ::prepare_job( - block_number, - &*self.object_store, - Keystore::locate(), // todo: this should be removed - ) - .await?, - ))), - None => Ok(None), - } - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_witness_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: BasicWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle>> { - let object_store = Arc::clone(&self.object_store); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - let block_number = job.block_number; - ::process_job( - job, - object_store, - Some(max_circuits_in_flight), - started_at, - ) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await - .map(Some) - }) - } - - #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - optional_artifacts: Option, - ) -> anyhow::Result<()> { - match optional_artifacts { - None => Ok(()), - Some(artifacts) => { - let blob_started_at = Instant::now(); - - let aux_output_witness_wrapper = - AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); - if self.config.shall_save_to_public_bucket { - self.public_blob_store.as_deref() - .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") - .put(job_id, &aux_output_witness_wrapper) - .await - .unwrap(); - } - - let blob_urls = - Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] - .observe(blob_started_at.elapsed()); - - Self::save_to_database( - &self.prover_connection_pool, - job_id.0, - started_at, - blob_urls, - artifacts, - ) - .await?; - Ok(()) - } - } - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for BasicWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_basic_circuit_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for BasicWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs deleted file mode 100644 index 440636b85fa..00000000000 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs +++ /dev/null @@ -1,135 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; - -use crate::{ - artifacts::ArtifactsManager, - leaf_aggregation::{ - LeafAggregationArtifacts, LeafAggregationWitnessGenerator, - LeafAggregationWitnessGeneratorJob, - }, - metrics::WITNESS_GENERATOR_METRICS, - witness_generator::WitnessGenerator, -}; - -#[async_trait] -impl JobProcessor for LeafAggregationWitnessGenerator { - type Job = LeafAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = LeafAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_leaf_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing leaf aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - ::prepare_job( - metadata, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_leaf_aggregation_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_leaf_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - ::process_job( - job, - object_store, - Some(max_circuits_in_flight), - started_at, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: LeafAggregationArtifacts, - ) -> anyhow::Result<()> { - let block_number = artifacts.block_number; - let circuit_id = artifacts.circuit_id; - tracing::info!( - "Saving leaf aggregation artifacts for block {} with circuit {}", - block_number.0, - circuit_id, - ); - - let blob_save_started_at = Instant::now(); - - let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] - .observe(blob_save_started_at.elapsed()); - - tracing::info!( - "Saved leaf aggregation artifacts for block {} with circuit {}", - block_number.0, - circuit_id, - ); - Self::save_to_database( - &self.prover_connection_pool, - job_id, - started_at, - blob_urls, - artifacts, - ) - .await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_leaf_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for LeafAggregationWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs deleted file mode 100644 index 960843259c3..00000000000 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs +++ /dev/null @@ -1,277 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use anyhow::Context as _; -use async_trait::async_trait; -use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; -use tokio::sync::Semaphore; -use zkevm_test_harness::{ - witness::recursive_aggregation::{ - compute_leaf_params, create_leaf_witness, split_recursion_queue, - }, - zkevm_circuits::scheduler::aux::BaseLayerCircuitType, -}; -use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; -use zksync_prover_fri_types::{ - circuit_definitions::{ - boojum::field::goldilocks::GoldilocksField, - circuit_definitions::base_layer::{ - ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerVerificationKey, - }, - encodings::recursion_request::RecursionQueueSimulator, - zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, - }, - FriProofWrapper, -}; -use zksync_prover_keystore::keystore::Keystore; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, - prover_dal::LeafAggregationJobMetadata, L1BatchNumber, -}; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - utils::{ - load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, - ClosedFormInputWrapper, - }, - witness_generator::WitnessGenerator, -}; - -mod artifacts; -mod job_processor; - -pub struct LeafAggregationWitnessGeneratorJob { - pub(crate) circuit_id: u8, - pub(crate) block_number: L1BatchNumber, - pub(crate) closed_form_inputs: ClosedFormInputWrapper, - pub(crate) proofs_ids: Vec, - pub(crate) base_vk: ZkSyncBaseLayerVerificationKey, - pub(crate) leaf_params: RecursionLeafParametersWitness, -} - -#[derive(Debug)] -pub struct LeafAggregationWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -#[derive(Clone)] -pub struct LeafAggregationArtifacts { - circuit_id: u8, - block_number: L1BatchNumber, - pub aggregations: Vec<(u64, RecursionQueueSimulator)>, - pub circuit_ids_and_urls: Vec<(u8, String)>, - #[allow(dead_code)] - closed_form_inputs: Vec>, -} - -impl LeafAggregationWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) -)] -pub async fn process_leaf_aggregation_job( - started_at: Instant, - job: LeafAggregationWitnessGeneratorJob, - object_store: Arc, - max_circuits_in_flight: usize, -) -> LeafAggregationArtifacts { - let circuit_id = job.circuit_id; - let queues = split_recursion_queue(job.closed_form_inputs.1); - - assert_eq!(circuit_id, job.base_vk.numeric_circuit_type()); - - let aggregations = queues - .iter() - .cloned() - .map(|queue| (circuit_id as u64, queue)) - .collect(); - - let mut proof_ids_iter = job.proofs_ids.into_iter(); - let mut proofs_ids = vec![]; - for queue in queues.iter() { - let proofs_ids_for_queue: Vec<_> = (&mut proof_ids_iter) - .take(queue.num_items as usize) - .collect(); - assert_eq!(queue.num_items as usize, proofs_ids_for_queue.len()); - proofs_ids.push(proofs_ids_for_queue); - } - - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); - - let mut handles = vec![]; - for (circuit_idx, (queue, proofs_ids_for_queue)) in - queues.into_iter().zip(proofs_ids).enumerate() - { - let semaphore = semaphore.clone(); - - let object_store = object_store.clone(); - let queue = queue.clone(); - let base_vk = job.base_vk.clone(); - let leaf_params = (circuit_id, job.leaf_params.clone()); - - let handle = tokio::task::spawn(async move { - let _permit = semaphore - .acquire() - .await - .expect("failed to get permit to process queues chunk"); - - let proofs = load_proofs_for_job_ids(&proofs_ids_for_queue, &*object_store).await; - let base_proofs = proofs - .into_iter() - .map(|wrapper| match wrapper { - FriProofWrapper::Base(base_proof) => base_proof, - FriProofWrapper::Recursive(_) => { - panic!( - "Expected only base proofs for leaf agg {} {}", - job.circuit_id, job.block_number - ); - } - }) - .collect(); - - let (_, circuit) = create_leaf_witness( - circuit_id.into(), - queue, - base_proofs, - &base_vk, - &leaf_params, - ); - - save_recursive_layer_prover_input_artifacts( - job.block_number, - circuit_idx, - vec![circuit], - AggregationRound::LeafAggregation, - 0, - &*object_store, - None, - ) - .await - }); - - handles.push(handle); - } - - let circuit_ids_and_urls_results = futures::future::join_all(handles).await; - let circuit_ids_and_urls = circuit_ids_and_urls_results - .into_iter() - .flat_map(|x| x.unwrap()) - .collect(); - - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - tracing::info!( - "Leaf witness generation for block {} with circuit id {}: is complete in {:?}.", - job.block_number.0, - circuit_id, - started_at.elapsed(), - ); - - LeafAggregationArtifacts { - circuit_id, - block_number: job.block_number, - aggregations, - circuit_ids_and_urls, - closed_form_inputs: job.closed_form_inputs.0, - } -} - -#[async_trait] -impl WitnessGenerator for LeafAggregationWitnessGenerator { - type Job = LeafAggregationWitnessGeneratorJob; - type Metadata = LeafAggregationJobMetadata; - type Artifacts = LeafAggregationArtifacts; - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) - )] - async fn process_job( - job: LeafAggregationWitnessGeneratorJob, - object_store: Arc, - max_circuits_in_flight: Option, - started_at: Instant, - ) -> anyhow::Result { - tracing::info!( - "Starting witness generation of type {:?} for block {} with circuit {}", - AggregationRound::LeafAggregation, - job.block_number.0, - job.circuit_id, - ); - Ok(process_leaf_aggregation_job( - started_at, - job, - object_store, - max_circuits_in_flight.unwrap(), - ) - .await) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) - )] - async fn prepare_job( - metadata: LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, - ) -> anyhow::Result { - let started_at = Instant::now(); - let closed_form_input = - LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let base_vk = keystore - .load_base_layer_verification_key(metadata.circuit_id) - .context("get_base_layer_vk_for_circuit_type()")?; - - let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( - BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), - ) as u8; - - let leaf_vk = keystore - .load_recursive_layer_verification_key(leaf_circuit_id) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - Ok(LeafAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - closed_form_inputs: closed_form_input, - proofs_ids: metadata.prover_job_ids_for_proofs, - base_vk, - leaf_params, - }) - } -} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index b24b548a49b..651535e2e80 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -2,16 +2,11 @@ #![feature(generic_const_exprs)] pub mod artifacts; -pub mod basic_circuits; -pub mod leaf_aggregation; pub mod metrics; -pub mod node_aggregation; pub mod precalculated_merkle_paths_provider; -pub mod recursion_tip; -pub mod scheduler; +pub mod rounds; mod storage_oracle; #[cfg(test)] mod tests; pub mod utils; mod witness; -pub mod witness_generator; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 9d75d8ddc6f..38aacf5d7a5 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -20,9 +20,10 @@ use zksync_types::{basic_fri_types::AggregationRound, protocol_version::Protocol use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; use zksync_witness_generator::{ - basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, - metrics::SERVER_METRICS, node_aggregation::NodeAggregationWitnessGenerator, - recursion_tip::RecursionTipWitnessGenerator, scheduler::SchedulerWitnessGenerator, + metrics::SERVER_METRICS, + rounds::{ + BasicCircuits, LeafAggregation, NodeAggregation, RecursionTip, Scheduler, WitnessGenerator, + }, }; #[cfg(not(target_env = "msvc"))] @@ -132,15 +133,14 @@ async fn main() -> anyhow::Result<()> { .listener_port }; - let prover_connection_pool = - ConnectionPool::::singleton(database_secrets.prover_url()?) - .build() - .await - .context("failed to build a prover_connection_pool")?; + let connection_pool = ConnectionPool::::singleton(database_secrets.prover_url()?) + .build() + .await + .context("failed to build a prover_connection_pool")?; let (stop_sender, stop_receiver) = watch::channel(false); let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; - ensure_protocol_alignment(&prover_connection_pool, protocol_version, &keystore) + ensure_protocol_alignment(&connection_pool, protocol_version, &keystore) .await .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); @@ -191,65 +191,71 @@ async fn main() -> anyhow::Result<()> { &protocol_version ); + let public_blob_store = match config.shall_save_to_public_bucket { + false => None, + true => Some( + ObjectStoreFactory::new( + prover_config + .public_object_store + .clone() + .expect("public_object_store"), + ) + .create_store() + .await?, + ), + }; + let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let public_blob_store = match config.shall_save_to_public_bucket { - false => None, - true => Some( - ObjectStoreFactory::new( - prover_config - .public_object_store - .clone() - .expect("public_object_store"), - ) - .create_store() - .await?, - ), - }; - let generator = BasicWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, public_blob_store, - prover_connection_pool.clone(), + connection_pool.clone(), protocol_version, + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::LeafAggregation => { - let generator = LeafAggregationWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::NodeAggregation => { - let generator = NodeAggregationWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::RecursionTip => { - let generator = RecursionTipWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::Scheduler => { - let generator = SchedulerWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs deleted file mode 100644 index 0f66c988c10..00000000000 --- a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - node_aggregation::{ - NodeAggregationArtifacts, NodeAggregationWitnessGenerator, - NodeAggregationWitnessGeneratorJob, - }, - witness_generator::WitnessGenerator, -}; - -#[async_trait] -impl JobProcessor for NodeAggregationWitnessGenerator { - type Job = NodeAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = NodeAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_node_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing node aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - ::prepare_job( - metadata, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_node_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - ::process_job( - job, - object_store, - Some(max_circuits_in_flight), - started_at, - ) - .await - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = % artifacts.block_number, circuit_id = % artifacts.circuit_id) - )] - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: NodeAggregationArtifacts, - ) -> anyhow::Result<()> { - let blob_save_started_at = Instant::now(); - - let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] - .observe(blob_save_started_at.elapsed()); - - Self::save_to_database( - &self.prover_connection_pool, - job_id, - started_at, - blob_urls, - artifacts, - ) - .await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_node_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for NodeAggregationWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs deleted file mode 100644 index 9ab7d934a3e..00000000000 --- a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - recursion_tip::{ - RecursionTipArtifacts, RecursionTipJobMetadata, RecursionTipWitnessGenerator, - RecursionTipWitnessGeneratorJob, - }, - witness_generator::WitnessGenerator, -}; - -#[async_trait] -impl JobProcessor for RecursionTipWitnessGenerator { - type Job = RecursionTipWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = RecursionTipArtifacts; - - const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some((l1_batch_number, number_of_final_node_jobs)) = prover_connection - .fri_witness_generator_dal() - .get_next_recursion_tip_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - - let final_node_proof_job_ids = prover_connection - .fri_prover_jobs_dal() - .get_final_node_proof_job_ids_for(l1_batch_number) - .await; - - assert_eq!( - final_node_proof_job_ids.len(), - number_of_final_node_jobs as usize, - "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", - number_of_final_node_jobs, final_node_proof_job_ids.len() - ); - - Ok(Some(( - l1_batch_number, - ::prepare_job( - RecursionTipJobMetadata { - l1_batch_number, - final_node_proof_job_ids, - }, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_recursion_tip_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: RecursionTipWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - tokio::spawn(async move { - ::process_job(job, object_store, None, started_at).await - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: RecursionTipArtifacts, - ) -> anyhow::Result<()> { - let blob_save_started_at = Instant::now(); - - let blob_urls = - Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] - .observe(blob_save_started_at.elapsed()); - - Self::save_to_database( - &self.prover_connection_pool, - job_id.0, - started_at, - blob_urls, - artifacts, - ) - .await?; - - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for RecursionTipWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_recursion_tip_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for RecursionTipWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/artifacts.rs similarity index 81% rename from prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/basic_circuits/artifacts.rs index aa85d185e66..2936634fc97 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/artifacts.rs @@ -1,4 +1,4 @@ -use std::time::Instant; +use std::{sync::Arc, time::Instant}; use async_trait::async_trait; use zksync_object_store::ObjectStore; @@ -9,12 +9,12 @@ use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ artifacts::ArtifactsManager, - basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, + rounds::basic_circuits::{BasicCircuitArtifacts, BasicCircuits, BasicWitnessGeneratorJob}, utils::SchedulerPartialInputWrapper, }; #[async_trait] -impl ArtifactsManager for BasicWitnessGenerator { +impl ArtifactsManager for BasicCircuits { type InputMetadata = L1BatchNumber; type InputArtifacts = BasicWitnessGeneratorJob; type OutputArtifacts = BasicCircuitArtifacts; @@ -36,8 +36,19 @@ impl ArtifactsManager for BasicWitnessGenerator { job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, + shall_save_to_public_bucket: bool, + public_blob_store: Option>, ) -> String { - let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); + let aux_output_witness_wrapper = + AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); + if shall_save_to_public_bucket { + public_blob_store.as_deref() + .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") + .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) + .await + .unwrap(); + } + object_store .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) .await diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs new file mode 100644 index 00000000000..adb2bf72d04 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs @@ -0,0 +1,136 @@ +use std::{sync::Arc, time::Instant}; + +use async_trait::async_trait; +use circuit_definitions::zkevm_circuits::scheduler::{ + block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, +}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::{ + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, +}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_prover_interface::inputs::WitnessInputData; +use zksync_prover_keystore::keystore::Keystore; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, +}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + rounds::{basic_circuits::utils::generate_witness, JobManager}, +}; + +mod artifacts; +mod utils; + +#[derive(Clone)] +pub struct BasicCircuitArtifacts { + pub(super) circuit_urls: Vec<(u8, String)>, + pub(super) queue_urls: Vec<(u8, String, usize)>, + pub(super) scheduler_witness: SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + pub(super) aux_output_witness: BlockAuxilaryOutputWitness, +} + +#[derive(Clone)] +pub struct BasicWitnessGeneratorJob { + pub(super) block_number: L1BatchNumber, + pub(super) data: WitnessInputData, +} + +type Witness = ( + Vec<(u8, String)>, + Vec<(u8, String, usize)>, + SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + BlockAuxilaryOutputWitness, +); + +pub struct BasicCircuits; + +#[async_trait] +impl JobManager for BasicCircuits { + type Job = BasicWitnessGeneratorJob; + type Metadata = L1BatchNumber; + + const ROUND: AggregationRound = AggregationRound::BasicCircuits; + const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; + + async fn process_job( + job: BasicWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result { + let BasicWitnessGeneratorJob { + block_number, + data: job, + } = job; + + tracing::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::BasicCircuits, + block_number.0 + ); + + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = + generate_witness(block_number, object_store, job, max_circuits_in_flight).await; + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + tracing::info!( + "Witness generation for block {} is complete in {:?}", + block_number.0, + started_at.elapsed() + ); + + Ok(BasicCircuitArtifacts { + circuit_urls, + queue_urls, + scheduler_witness, + aux_output_witness, + }) + } + + async fn prepare_job( + metadata: L1BatchNumber, + object_store: &dyn ObjectStore, + _keystore: Keystore, + ) -> anyhow::Result { + tracing::info!("Processing FRI basic witness-gen for block {}", metadata.0); + let started_at = Instant::now(); + let job = Self::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + + Ok(job) + } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + if let Some(l1_batch_number) = connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .get_next_basic_circuit_witness_job(protocol_version, &pod_name) + .await + { + Ok(Some((l1_batch_number.0, l1_batch_number))) + } else { + Ok(None) + } + } +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs similarity index 74% rename from prover/crates/bin/witness_generator/src/basic_circuits/mod.rs rename to prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index e76ef180c52..23ae1b0f2af 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -2,182 +2,43 @@ use std::{ collections::HashSet, hash::{DefaultHasher, Hash, Hasher}, sync::Arc, - time::Instant, }; -use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, - zkevm_circuits::{ - fsm_input_output::ClosedFormInputCompactFormWitness, - scheduler::{ - block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, - }, - }, + zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; use tokio::sync::Semaphore; use tracing::Instrument; use zkevm_test_harness::witness::oracle::WitnessGenerationArtifact; -use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_multivm::{ circuit_sequencer_api_latest::{ - boojum::{ - field::goldilocks::{GoldilocksExt2, GoldilocksField}, - gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, - }, - geometry_config::get_geometry_config, + boojum::field::goldilocks::GoldilocksField, geometry_config::get_geometry_config, }, interface::storage::StorageView, vm_latest::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle}, zk_evm_latest::ethereum_types::Address, }; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; -use zksync_prover_keystore::keystore::Keystore; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, -}; +use zksync_types::L1BatchNumber; use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, + rounds::basic_circuits::Witness, storage_oracle::StorageOracle, utils::{ expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, - witness_generator::WitnessGenerator, }; -mod artifacts; -pub mod job_processor; - -#[derive(Clone)] -pub struct BasicCircuitArtifacts { - pub(super) circuit_urls: Vec<(u8, String)>, - pub(super) queue_urls: Vec<(u8, String, usize)>, - pub(super) scheduler_witness: SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - pub(super) aux_output_witness: BlockAuxilaryOutputWitness, -} - -#[derive(Clone)] -pub struct BasicWitnessGeneratorJob { - pub(super) block_number: L1BatchNumber, - pub(super) data: WitnessInputData, -} - -#[derive(Debug)] -pub struct BasicWitnessGenerator { - config: Arc, - object_store: Arc, - public_blob_store: Option>, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, -} - -type Witness = ( - Vec<(u8, String)>, - Vec<(u8, String, usize)>, - SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - BlockAuxilaryOutputWitness, -); - -impl BasicWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - public_blob_store: Option>, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - ) -> Self { - Self { - config: Arc::new(config), - object_store, - public_blob_store, - prover_connection_pool, - protocol_version, - } - } -} - -#[async_trait] -impl WitnessGenerator for BasicWitnessGenerator { - type Job = BasicWitnessGeneratorJob; - type Metadata = L1BatchNumber; - type Artifacts = BasicCircuitArtifacts; - - async fn process_job( - job: BasicWitnessGeneratorJob, - object_store: Arc, - max_circuits_in_flight: Option, - started_at: Instant, - ) -> anyhow::Result { - let BasicWitnessGeneratorJob { - block_number, - data: job, - } = job; - - tracing::info!( - "Starting witness generation of type {:?} for block {}", - AggregationRound::BasicCircuits, - block_number.0 - ); - - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( - block_number, - object_store, - job, - max_circuits_in_flight.unwrap(), - ) - .await; - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - tracing::info!( - "Witness generation for block {} is complete in {:?}", - block_number.0, - started_at.elapsed() - ); - - Ok(BasicCircuitArtifacts { - circuit_urls, - queue_urls, - scheduler_witness, - aux_output_witness, - }) - } - - async fn prepare_job( - metadata: L1BatchNumber, - object_store: &dyn ObjectStore, - _keystore: Keystore, - ) -> anyhow::Result { - tracing::info!("Processing FRI basic witness-gen for block {}", metadata.0); - let started_at = Instant::now(); - let job = Self::get_artifacts(&metadata, object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(job) - } -} - #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn generate_witness( +pub(super) async fn generate_witness( block_number: L1BatchNumber, object_store: Arc, input: WitnessInputData, diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/artifacts.rs similarity index 95% rename from prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/artifacts.rs index c83997e36b8..e3c97dd257c 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/artifacts.rs @@ -9,13 +9,13 @@ use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregatio use crate::{ artifacts::{AggregationBlobUrls, ArtifactsManager}, - leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, metrics::WITNESS_GENERATOR_METRICS, + rounds::leaf_aggregation::{LeafAggregation, LeafAggregationArtifacts}, utils::{AggregationWrapper, ClosedFormInputWrapper}, }; #[async_trait] -impl ArtifactsManager for LeafAggregationWitnessGenerator { +impl ArtifactsManager for LeafAggregation { type InputMetadata = LeafAggregationJobMetadata; type InputArtifacts = ClosedFormInputWrapper; type OutputArtifacts = LeafAggregationArtifacts; @@ -46,6 +46,8 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, ) -> AggregationBlobUrls { let started_at = Instant::now(); let key = AggregationsKey { diff --git a/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs new file mode 100644 index 00000000000..451ceee390d --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs @@ -0,0 +1,250 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context as _; +use async_trait::async_trait; +use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; +use tokio::sync::Semaphore; +use zkevm_test_harness::{ + witness::recursive_aggregation::{ + compute_leaf_params, create_leaf_witness, split_recursion_queue, + }, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, +}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::base_layer::{ + ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerVerificationKey, + }, + encodings::recursion_request::RecursionQueueSimulator, + zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, + }, + get_current_pod_name, FriProofWrapper, +}; +use zksync_prover_keystore::keystore::Keystore; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::LeafAggregationJobMetadata, L1BatchNumber, +}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + rounds::JobManager, + utils::{ + load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, + ClosedFormInputWrapper, + }, +}; + +mod artifacts; + +pub struct LeafAggregationWitnessGeneratorJob { + pub(crate) circuit_id: u8, + pub(crate) block_number: L1BatchNumber, + pub(crate) closed_form_inputs: ClosedFormInputWrapper, + pub(crate) proofs_ids: Vec, + pub(crate) base_vk: ZkSyncBaseLayerVerificationKey, + pub(crate) leaf_params: RecursionLeafParametersWitness, +} + +#[derive(Clone)] +pub struct LeafAggregationArtifacts { + circuit_id: u8, + block_number: L1BatchNumber, + pub aggregations: Vec<(u64, RecursionQueueSimulator)>, + pub circuit_ids_and_urls: Vec<(u8, String)>, + #[allow(dead_code)] + closed_form_inputs: Vec>, +} + +pub struct LeafAggregation; + +#[async_trait] +impl JobManager for LeafAggregation { + type Job = LeafAggregationWitnessGeneratorJob; + type Metadata = LeafAggregationJobMetadata; + + const ROUND: AggregationRound = AggregationRound::LeafAggregation; + const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + )] + async fn process_job( + job: LeafAggregationWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting witness generation of type {:?} for block {} with circuit {}", + AggregationRound::LeafAggregation, + job.block_number.0, + job.circuit_id, + ); + let circuit_id = job.circuit_id; + let queues = split_recursion_queue(job.closed_form_inputs.1); + + assert_eq!(circuit_id, job.base_vk.numeric_circuit_type()); + + let aggregations = queues + .iter() + .cloned() + .map(|queue| (circuit_id as u64, queue)) + .collect(); + + let mut proof_ids_iter = job.proofs_ids.into_iter(); + let mut proofs_ids = vec![]; + for queue in queues.iter() { + let proofs_ids_for_queue: Vec<_> = (&mut proof_ids_iter) + .take(queue.num_items as usize) + .collect(); + assert_eq!(queue.num_items as usize, proofs_ids_for_queue.len()); + proofs_ids.push(proofs_ids_for_queue); + } + + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + + let mut handles = vec![]; + for (circuit_idx, (queue, proofs_ids_for_queue)) in + queues.into_iter().zip(proofs_ids).enumerate() + { + let semaphore = semaphore.clone(); + + let object_store = object_store.clone(); + let queue = queue.clone(); + let base_vk = job.base_vk.clone(); + let leaf_params = (circuit_id, job.leaf_params.clone()); + + let handle = tokio::task::spawn(async move { + let _permit = semaphore + .acquire() + .await + .expect("failed to get permit to process queues chunk"); + + let proofs = load_proofs_for_job_ids(&proofs_ids_for_queue, &*object_store).await; + let base_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(base_proof) => base_proof, + FriProofWrapper::Recursive(_) => { + panic!( + "Expected only base proofs for leaf agg {} {}", + job.circuit_id, job.block_number + ); + } + }) + .collect(); + + let (_, circuit) = create_leaf_witness( + circuit_id.into(), + queue, + base_proofs, + &base_vk, + &leaf_params, + ); + + save_recursive_layer_prover_input_artifacts( + job.block_number, + circuit_idx, + vec![circuit], + AggregationRound::LeafAggregation, + 0, + &*object_store, + None, + ) + .await + }); + + handles.push(handle); + } + + let circuit_ids_and_urls_results = futures::future::join_all(handles).await; + let circuit_ids_and_urls = circuit_ids_and_urls_results + .into_iter() + .flat_map(|x| x.unwrap()) + .collect(); + + WITNESS_GENERATOR_METRICS.witness_generation_time + [&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + tracing::info!( + "Leaf witness generation for block {} with circuit id {}: is complete in {:?}.", + job.block_number.0, + circuit_id, + started_at.elapsed(), + ); + + Ok(LeafAggregationArtifacts { + circuit_id, + block_number: job.block_number, + aggregations, + circuit_ids_and_urls, + closed_form_inputs: job.closed_form_inputs.0, + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + )] + async fn prepare_job( + metadata: LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let closed_form_input = Self::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let base_vk = keystore + .load_base_layer_verification_key(metadata.circuit_id) + .context("get_base_layer_vk_for_circuit_type()")?; + + let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), + ) as u8; + + let leaf_vk = keystore + .load_recursive_layer_verification_key(leaf_circuit_id) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + Ok(LeafAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + closed_form_inputs: closed_form_input, + proofs_ids: metadata.prover_job_ids_for_proofs, + base_vk, + leaf_params, + }) + } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some(metadata) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_leaf_aggregation_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + Ok(Some((metadata.id, metadata))) + } +} diff --git a/prover/crates/bin/witness_generator/src/rounds/mod.rs b/prover/crates/bin/witness_generator/src/rounds/mod.rs new file mode 100644 index 00000000000..6fd72c96869 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/mod.rs @@ -0,0 +1,195 @@ +use std::{marker::PhantomData, sync::Arc, time::Instant}; + +use anyhow::Context; +use async_trait::async_trait; +use tokio::task::JoinHandle; +use zksync_config::configs::FriWitnessGeneratorConfig; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_keystore::keystore::Keystore; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::protocol_version::ProtocolSemanticVersion; + +use crate::artifacts::ArtifactsManager; + +mod basic_circuits; +mod leaf_aggregation; +mod node_aggregation; +mod recursion_tip; +mod scheduler; + +pub use basic_circuits::BasicCircuits; +pub use leaf_aggregation::LeafAggregation; +pub use node_aggregation::NodeAggregation; +pub use recursion_tip::RecursionTip; +pub use scheduler::Scheduler; +use zksync_types::basic_fri_types::AggregationRound; + +use crate::metrics::WITNESS_GENERATOR_METRICS; + +#[async_trait] +pub trait JobManager: ArtifactsManager { + type Job: Send + 'static; + type Metadata: Send + 'static; + + const ROUND: AggregationRound; + const SERVICE_NAME: &'static str; + + async fn process_job( + job: Self::Job, + object_store: Arc, + max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result; + + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result; + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result>; +} + +#[derive(Debug)] +pub struct WitnessGenerator { + pub config: FriWitnessGeneratorConfig, + pub object_store: Arc, + pub public_blob_store: Option>, + pub connection_pool: ConnectionPool, + pub protocol_version: ProtocolSemanticVersion, + pub keystore: Keystore, + _round: PhantomData, +} + +impl WitnessGenerator +where + R: JobManager + ArtifactsManager, +{ + pub fn new( + config: FriWitnessGeneratorConfig, + object_store: Arc, + public_blob_store: Option>, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + keystore: Keystore, + ) -> Self { + Self { + config, + object_store, + public_blob_store, + connection_pool, + protocol_version, + keystore, + _round: Default::default(), + } + } +} + +#[async_trait] +impl JobProcessor for WitnessGenerator +where + R: JobManager + ArtifactsManager + Send + Sync, +{ + type Job = R::Job; + type JobId = u32; + type JobArtifacts = R::OutputArtifacts; + + const SERVICE_NAME: &'static str = R::SERVICE_NAME; + + async fn get_next_job(&self) -> anyhow::Result> { + if let Some((id, metadata)) = + R::get_metadata(self.connection_pool.clone(), self.protocol_version) + .await + .context("get_metadata()")? + { + tracing::info!("Processing {:?} job {:?}", R::ROUND, id); + Ok(Some(( + id, + R::prepare_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_job()")?, + ))) + } else { + Ok(None) + } + } + + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { + self.connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_witness_job_failed(&error, job_id, R::ROUND) + .await; + } + + async fn process_job( + &self, + _job_id: &Self::JobId, + job: Self::Job, + started_at: Instant, + ) -> JoinHandle> { + let object_store = self.object_store.clone(); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + R::process_job(job, object_store, max_circuits_in_flight, started_at).await + }) + } + + #[tracing::instrument(skip_all, fields(job_id = %job_id))] + async fn save_result( + &self, + job_id: Self::JobId, + started_at: Instant, + artifacts: Self::JobArtifacts, + ) -> anyhow::Result<()> { + tracing::info!("Saving {:?} artifacts for job {:?}", R::ROUND, job_id); + + let blob_save_started_at = Instant::now(); + + let blob_urls = R::save_to_bucket( + job_id, + artifacts.clone(), + &*self.object_store, + self.config.shall_save_to_public_bucket, + self.public_blob_store.clone(), + ) + .await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&R::ROUND.into()] + .observe(blob_save_started_at.elapsed()); + + tracing::info!("Saved {:?} artifacts for job {:?}", R::ROUND, job_id); + R::save_to_database( + &self.connection_pool, + job_id, + started_at, + blob_urls, + artifacts, + ) + .await?; + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &Self::JobId) -> anyhow::Result { + let mut prover_storage = self.connection_pool.connection().await.context(format!( + "failed to acquire DB connection for {:?}", + R::ROUND + ))?; + prover_storage + .fri_witness_generator_dal() + .get_witness_job_attempts(*job_id, R::ROUND) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context(format!("failed to get job attempts for {:?}", R::ROUND)) + } +} diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/artifacts.rs similarity index 95% rename from prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/node_aggregation/artifacts.rs index 09f01899bf3..e4f5c90080d 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/artifacts.rs @@ -9,12 +9,12 @@ use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregatio use crate::{ artifacts::{AggregationBlobUrls, ArtifactsManager}, metrics::WITNESS_GENERATOR_METRICS, - node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, + rounds::node_aggregation::{NodeAggregation, NodeAggregationArtifacts}, utils::AggregationWrapper, }; #[async_trait] -impl ArtifactsManager for NodeAggregationWitnessGenerator { +impl ArtifactsManager for NodeAggregation { type InputMetadata = NodeAggregationJobMetadata; type InputArtifacts = AggregationWrapper; type OutputArtifacts = NodeAggregationArtifacts; @@ -51,6 +51,8 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, ) -> AggregationBlobUrls { let started_at = Instant::now(); let key = AggregationsKey { diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/mod.rs similarity index 87% rename from prover/crates/bin/witness_generator/src/node_aggregation/mod.rs rename to prover/crates/bin/witness_generator/src/rounds/node_aggregation/mod.rs index f2c9a6fb891..e891d313ffc 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/mod.rs @@ -7,9 +7,8 @@ use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witness, }; -use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -19,7 +18,7 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - FriProofWrapper, + get_current_pod_name, FriProofWrapper, }; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ @@ -30,12 +29,10 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, + rounds::JobManager, utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, - witness_generator::WitnessGenerator, }; - mod artifacts; -mod job_processor; #[derive(Clone)] pub struct NodeAggregationArtifacts { @@ -58,38 +55,15 @@ pub struct NodeAggregationWitnessGeneratorJob { all_leafs_layer_params: Vec<(u8, RecursionLeafParametersWitness)>, } -#[derive(Debug)] -pub struct NodeAggregationWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -impl NodeAggregationWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } -} +pub struct NodeAggregation; #[async_trait] -impl WitnessGenerator for NodeAggregationWitnessGenerator { +impl JobManager for NodeAggregation { type Job = NodeAggregationWitnessGeneratorJob; type Metadata = NodeAggregationJobMetadata; - type Artifacts = NodeAggregationArtifacts; + + const ROUND: AggregationRound = AggregationRound::NodeAggregation; + const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; #[tracing::instrument( skip_all, @@ -98,7 +72,7 @@ impl WitnessGenerator for NodeAggregationWitnessGenerator { async fn process_job( job: NodeAggregationWitnessGeneratorJob, object_store: Arc, - max_circuits_in_flight: Option, + max_circuits_in_flight: usize, started_at: Instant, ) -> anyhow::Result { let node_vk_commitment = compute_node_vk_commitment(job.node_vk.clone()); @@ -126,7 +100,7 @@ impl WitnessGenerator for NodeAggregationWitnessGenerator { proofs_ids.len() ); - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight.unwrap())); + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); let mut handles = vec![]; for (circuit_idx, (chunk, proofs_ids_for_chunk)) in job @@ -233,8 +207,7 @@ impl WitnessGenerator for NodeAggregationWitnessGenerator { keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let artifacts = - NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + let artifacts = Self::get_artifacts(&metadata, object_store).await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); @@ -264,4 +237,22 @@ impl WitnessGenerator for NodeAggregationWitnessGenerator { .context("get_leaf_vk_params()")?, }) } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some(metadata) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_node_aggregation_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + + Ok(Some((metadata.id, metadata))) + } } diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/artifacts.rs similarity index 95% rename from prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/recursion_tip/artifacts.rs index b61aa948100..6d18795c2b3 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/artifacts.rs @@ -13,11 +13,11 @@ use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ artifacts::ArtifactsManager, - recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, + rounds::recursion_tip::{RecursionTip, RecursionTipArtifacts}, }; #[async_trait] -impl ArtifactsManager for RecursionTipWitnessGenerator { +impl ArtifactsManager for RecursionTip { type InputMetadata = Vec<(u8, u32)>; type InputArtifacts = Vec; type OutputArtifacts = RecursionTipArtifacts; @@ -78,6 +78,8 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/mod.rs similarity index 80% rename from prover/crates/bin/witness_generator/src/recursion_tip/mod.rs rename to prover/crates/bin/witness_generator/src/rounds/recursion_tip/mod.rs index 40abb756c8a..873f6798481 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs +++ b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/mod.rs @@ -35,22 +35,20 @@ use zkevm_test_harness::{ scheduler::aux::BaseLayerCircuitType, }, }; -use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; -use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{get_current_pod_name, keys::ClosedFormInputKey}; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ - artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, - witness_generator::WitnessGenerator, + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, rounds::JobManager, + utils::ClosedFormInputWrapper, }; mod artifacts; -mod job_processor; #[derive(Clone)] pub struct RecursionTipWitnessGeneratorJob { @@ -73,38 +71,15 @@ pub struct RecursionTipJobMetadata { pub final_node_proof_job_ids: Vec<(u8, u32)>, } -#[derive(Debug)] -pub struct RecursionTipWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -impl RecursionTipWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } -} +pub struct RecursionTip; #[async_trait] -impl WitnessGenerator for RecursionTipWitnessGenerator { +impl JobManager for RecursionTip { type Job = RecursionTipWitnessGeneratorJob; type Metadata = RecursionTipJobMetadata; - type Artifacts = RecursionTipArtifacts; + + const ROUND: AggregationRound = AggregationRound::RecursionTip; + const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; #[tracing::instrument( skip_all, @@ -113,7 +88,7 @@ impl WitnessGenerator for RecursionTipWitnessGenerator { async fn process_job( job: Self::Job, _object_store: Arc, - _max_circuits_in_flight: Option, + _max_circuits_in_flight: usize, started_at: Instant, ) -> anyhow::Result { tracing::info!( @@ -160,11 +135,8 @@ impl WitnessGenerator for RecursionTipWitnessGenerator { keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let recursion_tip_proofs = RecursionTipWitnessGenerator::get_artifacts( - &metadata.final_node_proof_job_ids, - object_store, - ) - .await?; + let recursion_tip_proofs = + Self::get_artifacts(&metadata.final_node_proof_job_ids, object_store).await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); @@ -241,4 +213,42 @@ impl WitnessGenerator for RecursionTipWitnessGenerator { node_vk, }) } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some((l1_batch_number, number_of_final_node_jobs)) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_recursion_tip_witness_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + + let final_node_proof_job_ids = connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_final_node_proof_job_ids_for(l1_batch_number) + .await; + + assert_eq!( + final_node_proof_job_ids.len(), + number_of_final_node_jobs as usize, + "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", + number_of_final_node_jobs, final_node_proof_job_ids.len() + ); + + Ok(Some(( + l1_batch_number.0, + RecursionTipJobMetadata { + l1_batch_number, + final_node_proof_job_ids, + }, + ))) + } } diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/scheduler/artifacts.rs similarity index 92% rename from prover/crates/bin/witness_generator/src/scheduler/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/scheduler/artifacts.rs index 77d1da685d0..ce9b32559b2 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/scheduler/artifacts.rs @@ -9,11 +9,11 @@ use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ artifacts::ArtifactsManager, - scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, + rounds::scheduler::{Scheduler, SchedulerArtifacts}, }; #[async_trait] -impl ArtifactsManager for SchedulerWitnessGenerator { +impl ArtifactsManager for Scheduler { type InputMetadata = u32; type InputArtifacts = FriProofWrapper; type OutputArtifacts = SchedulerArtifacts; @@ -32,6 +32,8 @@ impl ArtifactsManager for SchedulerWitnessGenerator { job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), diff --git a/prover/crates/bin/witness_generator/src/scheduler/mod.rs b/prover/crates/bin/witness_generator/src/rounds/scheduler/mod.rs similarity index 79% rename from prover/crates/bin/witness_generator/src/scheduler/mod.rs rename to prover/crates/bin/witness_generator/src/rounds/scheduler/mod.rs index 7af3d68d5a7..fc7dfa2accb 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/mod.rs +++ b/prover/crates/bin/witness_generator/src/rounds/scheduler/mod.rs @@ -5,9 +5,8 @@ use async_trait::async_trait; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; -use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -21,7 +20,7 @@ use zksync_prover_fri_types::{ recursion_layer_proof_config, zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, }, - FriProofWrapper, + get_current_pod_name, FriProofWrapper, }; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ @@ -29,12 +28,11 @@ use zksync_types::{ }; use crate::{ - artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::SchedulerPartialInputWrapper, witness_generator::WitnessGenerator, + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, rounds::JobManager, + utils::SchedulerPartialInputWrapper, }; mod artifacts; -mod job_processor; #[derive(Clone)] pub struct SchedulerArtifacts { @@ -60,38 +58,15 @@ pub struct SchedulerWitnessJobMetadata { pub recursion_tip_job_id: u32, } -#[derive(Debug)] -pub struct SchedulerWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -impl SchedulerWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } -} +pub struct Scheduler; #[async_trait] -impl WitnessGenerator for SchedulerWitnessGenerator { +impl JobManager for Scheduler { type Job = SchedulerWitnessGeneratorJob; type Metadata = SchedulerWitnessJobMetadata; - type Artifacts = SchedulerArtifacts; + + const ROUND: AggregationRound = AggregationRound::Scheduler; + const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; #[tracing::instrument( skip_all, @@ -100,7 +75,7 @@ impl WitnessGenerator for SchedulerWitnessGenerator { async fn process_job( job: SchedulerWitnessGeneratorJob, _object_store: Arc, - _max_circuits_in_flight: Option, + _max_circuits_in_flight: usize, started_at: Instant, ) -> anyhow::Result { tracing::info!( @@ -148,9 +123,7 @@ impl WitnessGenerator for SchedulerWitnessGenerator { keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let wrapper = - SchedulerWitnessGenerator::get_artifacts(&metadata.recursion_tip_job_id, object_store) - .await?; + let wrapper = Self::get_artifacts(&metadata.recursion_tip_job_id, object_store).await?; let recursion_tip_proof = match wrapper { FriProofWrapper::Base(_) => Err(anyhow::anyhow!( "Expected only recursive proofs for scheduler l1 batch {}, got Base", @@ -196,4 +169,38 @@ impl WitnessGenerator for SchedulerWitnessGenerator { recursion_tip_vk, }) } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some(l1_batch_number) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_scheduler_witness_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + let recursion_tip_job_id = connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_recursion_tip_proof_job_id(l1_batch_number) + .await + .context(format!( + "could not find recursion tip proof for l1 batch {}", + l1_batch_number + ))?; + + Ok(Some(( + l1_batch_number.0, + SchedulerWitnessJobMetadata { + l1_batch_number, + recursion_tip_job_id, + }, + ))) + } } diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs deleted file mode 100644 index b5745f98091..00000000000 --- a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - scheduler::{ - SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, - SchedulerWitnessJobMetadata, - }, - witness_generator::WitnessGenerator, -}; - -#[async_trait] -impl JobProcessor for SchedulerWitnessGenerator { - type Job = SchedulerWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = SchedulerArtifacts; - - const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(l1_batch_number) = prover_connection - .fri_witness_generator_dal() - .get_next_scheduler_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - let recursion_tip_job_id = prover_connection - .fri_prover_jobs_dal() - .get_recursion_tip_proof_job_id(l1_batch_number) - .await - .context(format!( - "could not find recursion tip proof for l1 batch {}", - l1_batch_number - ))?; - - Ok(Some(( - l1_batch_number, - ::prepare_job( - SchedulerWitnessJobMetadata { - l1_batch_number, - recursion_tip_job_id, - }, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_scheduler_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: SchedulerWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - tokio::spawn(async move { - ::process_job(job, object_store, None, started_at).await - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: SchedulerArtifacts, - ) -> anyhow::Result<()> { - let blob_save_started_at = Instant::now(); - - let blob_urls = - Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] - .observe(blob_save_started_at.elapsed()); - - Self::save_to_database( - &self.prover_connection_pool, - job_id.0, - started_at, - blob_urls, - artifacts, - ) - .await?; - - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_scheduler_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for SchedulerWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/witness_generator.rs b/prover/crates/bin/witness_generator/src/witness_generator.rs deleted file mode 100644 index eb9200d7950..00000000000 --- a/prover/crates/bin/witness_generator/src/witness_generator.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use async_trait::async_trait; -use zksync_object_store::ObjectStore; -use zksync_prover_keystore::keystore::Keystore; - -#[async_trait] -pub trait WitnessGenerator { - type Job: Send + 'static; - type Metadata; - type Artifacts; - - async fn process_job( - job: Self::Job, - object_store: Arc, - max_circuits_in_flight: Option, - started_at: Instant, - ) -> anyhow::Result; - - async fn prepare_job( - metadata: Self::Metadata, - object_store: &dyn ObjectStore, - keystore: Keystore, - ) -> anyhow::Result; -} diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index 379ddc3a4eb..be6452dfc7d 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -15,9 +15,8 @@ use zksync_types::{ L1BatchNumber, }; use zksync_witness_generator::{ - leaf_aggregation::LeafAggregationWitnessGenerator, - node_aggregation::NodeAggregationWitnessGenerator, utils::AggregationWrapper, - witness_generator::WitnessGenerator, + rounds::{JobManager, LeafAggregation, NodeAggregation}, + utils::AggregationWrapper, }; fn compare_serialized(expected: &T, actual: &T) { @@ -52,22 +51,13 @@ async fn test_leaf_witness_gen() { .unwrap(); let keystore = Keystore::locate(); - let job = LeafAggregationWitnessGenerator::prepare_job( - leaf_aggregation_job_metadata, - &*object_store, - keystore, - ) - .await - .unwrap(); - - let artifacts = LeafAggregationWitnessGenerator::process_job( - job, - object_store.clone(), - Some(500), - Instant::now(), - ) - .await - .unwrap(); + let job = LeafAggregation::prepare_job(leaf_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); + + let artifacts = LeafAggregation::process_job(job, object_store.clone(), 500, Instant::now()) + .await + .unwrap(); let aggregations = AggregationWrapper(artifacts.aggregations); @@ -147,22 +137,13 @@ async fn test_node_witness_gen() { }; let keystore = Keystore::locate(); - let job = NodeAggregationWitnessGenerator::prepare_job( - node_aggregation_job_metadata, - &*object_store, - keystore, - ) - .await - .unwrap(); - - let artifacts = NodeAggregationWitnessGenerator::process_job( - job, - object_store.clone(), - Some(500), - Instant::now(), - ) - .await - .unwrap(); + let job = NodeAggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); + + let artifacts = NodeAggregation::process_job(job, object_store.clone(), 500, Instant::now()) + .await + .unwrap(); let aggregations = AggregationWrapper(artifacts.next_aggregations); diff --git a/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json b/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json deleted file mode 100644 index 5fe5032746e..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n id = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json b/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json deleted file mode 100644 index 15a10f7ce3c..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n leaf_aggregation_witness_jobs_fri\n WHERE\n id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json b/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json deleted file mode 100644 index 94dbaa80a10..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json b/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json deleted file mode 100644 index 29838881a52..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n id = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json b/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json deleted file mode 100644 index 9053a0f5abb..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n recursion_tip_witness_jobs_fri\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json b/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json deleted file mode 100644 index c8e8a7aa603..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n node_aggregation_witness_jobs_fri\n WHERE\n id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json b/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json deleted file mode 100644 index bdd22927d38..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n scheduler_witness_jobs_fri\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json b/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json deleted file mode 100644 index 0ca284a3f57..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json b/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json deleted file mode 100644 index c1f9806625d..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json b/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json deleted file mode 100644 index 9121539b317..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json b/prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json similarity index 54% rename from prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json rename to prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json index c353ecf1bad..cf9ff8396ef 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = $2\n AND protocol_version_patch = $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $2\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $3\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", "describe": { "columns": [ { @@ -11,7 +11,6 @@ ], "parameters": { "Left": [ - "Int8", "Int4", "Text", "Int4" @@ -21,5 +20,5 @@ false ] }, - "hash": "d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48" + "hash": "e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed" } diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index c7ba0f60ef3..2040b444044 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -76,7 +76,6 @@ impl FriWitnessGeneratorDal<'_, '_> { /// The blobs arrive from core via prover gateway, as pubdata, this method loads the blobs. pub async fn get_next_basic_circuit_witness_job( &mut self, - last_l1_batch_to_process: u32, protocol_version: ProtocolSemanticVersion, picked_by: &str, ) -> Option { @@ -88,7 +87,7 @@ impl FriWitnessGeneratorDal<'_, '_> { attempts = attempts + 1, updated_at = NOW(), processing_started_at = NOW(), - picked_by = $3 + picked_by = $2 WHERE l1_batch_number = ( SELECT @@ -96,10 +95,9 @@ impl FriWitnessGeneratorDal<'_, '_> { FROM witness_inputs_fri WHERE - l1_batch_number <= $1 - AND status = 'queued' - AND protocol_version = $2 - AND protocol_version_patch = $4 + status = 'queued' + AND protocol_version = $1 + AND protocol_version_patch = $3 ORDER BY l1_batch_number ASC LIMIT @@ -110,7 +108,6 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING witness_inputs_fri.l1_batch_number "#, - i64::from(last_l1_batch_to_process), protocol_version.minor as i32, picked_by, protocol_version.patch.0 as i32, @@ -121,28 +118,6 @@ impl FriWitnessGeneratorDal<'_, '_> { .map(|row| L1BatchNumber(row.l1_batch_number as u32)) } - pub async fn get_basic_circuit_witness_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - witness_inputs_fri - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0) - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.attempts as u32); - - Ok(attempts) - } - pub async fn mark_witness_job( &mut self, status: FriWitnessJobStatus, @@ -189,46 +164,6 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(); } - pub async fn mark_witness_job_failed(&mut self, error: &str, block_number: L1BatchNumber) { - sqlx::query!( - r#" - UPDATE witness_inputs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - l1_batch_number = $2 - AND status != 'successful' - "#, - error, - i64::from(block_number.0) - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - - pub async fn mark_leaf_aggregation_job_failed(&mut self, error: &str, id: u32) { - sqlx::query!( - r#" - UPDATE leaf_aggregation_witness_jobs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - id = $2 - AND status != 'successful' - "#, - error, - i64::from(id) - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - pub async fn mark_leaf_aggregation_as_successful(&mut self, id: u32, time_taken: Duration) { sqlx::query!( r#" @@ -481,29 +416,6 @@ impl FriWitnessGeneratorDal<'_, '_> { }) } - pub async fn get_leaf_aggregation_job_attempts( - &mut self, - id: u32, - ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - leaf_aggregation_witness_jobs_fri - WHERE - id = $1 - "#, - i64::from(id) - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.attempts as u32); - - Ok(attempts) - } - async fn prover_job_ids_for( &mut self, block_number: L1BatchNumber, @@ -674,49 +586,6 @@ impl FriWitnessGeneratorDal<'_, '_> { }) } - pub async fn get_node_aggregation_job_attempts( - &mut self, - id: u32, - ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - node_aggregation_witness_jobs_fri - WHERE - id = $1 - "#, - i64::from(id) - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_node_aggregation_job_failed(&mut self, error: &str, id: u32) { - sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - id = $2 - AND status != 'successful' - "#, - error, - i64::from(id) - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - pub async fn mark_node_aggregation_as_successful(&mut self, id: u32, time_taken: Duration) { sqlx::query!( r#" @@ -1241,46 +1110,42 @@ impl FriWitnessGeneratorDal<'_, '_> { .map(|row| L1BatchNumber(row.l1_batch_number as u32)) } - pub async fn get_recursion_tip_witness_job_attempts( + pub async fn get_witness_job_attempts( &mut self, - l1_batch_number: L1BatchNumber, + job_id: u32, + aggregation_round: AggregationRound, ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - recursion_tip_witness_jobs_fri - WHERE - l1_batch_number = $1 - "#, - l1_batch_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.attempts as u32); + let table = match aggregation_round { + AggregationRound::BasicCircuits => "witness_inputs_fri", + AggregationRound::LeafAggregation => "leaf_aggregation_witness_jobs_fri", + AggregationRound::NodeAggregation => "node_aggregation_witness_jobs_fri", + AggregationRound::RecursionTip => "recursion_tip_witness_jobs_fri", + AggregationRound::Scheduler => "scheduler_witness_jobs_fri", + }; - Ok(attempts) - } + let job_id_column = match aggregation_round { + AggregationRound::BasicCircuits => "l1_batch_number", + AggregationRound::LeafAggregation => "id", + AggregationRound::NodeAggregation => "id", + AggregationRound::RecursionTip => "l1_batch_number", + AggregationRound::Scheduler => "l1_batch_number ", + }; - pub async fn get_scheduler_witness_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { - let attempts = sqlx::query!( + let query = format!( r#" SELECT attempts FROM - scheduler_witness_jobs_fri + {table} WHERE - l1_batch_number = $1 + {job_id_column} = {job_id} "#, - i64::from(l1_batch_number.0) - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.attempts as u32); + ); + + let attempts = sqlx::query(&query) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| row.get::("attempts") as u32); Ok(attempts) } @@ -1331,48 +1196,45 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(); } - pub async fn mark_recursion_tip_job_failed( + pub async fn mark_witness_job_failed( &mut self, error: &str, - l1_batch_number: L1BatchNumber, + job_id: u32, + aggregation_round: AggregationRound, ) { - sqlx::query!( - r#" - UPDATE recursion_tip_witness_jobs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - l1_batch_number = $2 - AND status != 'successful' - "#, - error, - l1_batch_number.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } + let table = match aggregation_round { + AggregationRound::BasicCircuits => "witness_inputs_fri", + AggregationRound::LeafAggregation => "leaf_aggregation_witness_jobs_fri", + AggregationRound::NodeAggregation => "node_aggregation_witness_jobs_fri", + AggregationRound::RecursionTip => "recursion_tip_witness_jobs_fri", + AggregationRound::Scheduler => "scheduler_witness_jobs_fri", + }; - pub async fn mark_scheduler_job_failed(&mut self, error: &str, block_number: L1BatchNumber) { - sqlx::query!( + let job_id_column = match aggregation_round { + AggregationRound::BasicCircuits => "l1_batch_number", + AggregationRound::LeafAggregation => "id", + AggregationRound::NodeAggregation => "id", + AggregationRound::RecursionTip => "l1_batch_number", + AggregationRound::Scheduler => "l1_batch_number ", + }; + + let query = format!( r#" - UPDATE scheduler_witness_jobs_fri + UPDATE {table} SET status = 'failed', - error = $1, + error = {error}, updated_at = NOW() WHERE - l1_batch_number = $2 - AND status != 'successful' + {job_id_column} = {job_id} + AND status != 'successful "#, - error, - i64::from(block_number.0) - ) - .execute(self.storage.conn()) - .await - .unwrap(); + ); + + sqlx::query(&query) + .execute(self.storage.conn()) + .await + .unwrap(); } pub async fn get_witness_jobs_stats( From 191e81ace560193a19b7480808fc936d2036dd81 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:12:13 +0300 Subject: [PATCH 13/16] chore(eth-sender): remove deprecated proof loading (#2958) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes deprecated proof loading for eth-sender ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/node/eth_sender/src/aggregator.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 1e0bd315b9d..4045e9ca3d8 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -526,22 +526,5 @@ pub async fn load_wrapped_fri_proofs_for_range( } } - // We also check file with deprecated name if patch 0 is allowed. - // TODO: remove in the next release. - let is_patch_0_present = allowed_versions.iter().any(|v| v.patch.0 == 0); - if is_patch_0_present { - match blob_store - .get_by_encoded_key(format!("l1_batch_proof_{l1_batch_number}.bin")) - .await - { - Ok(proof) => return Some(proof), - Err(ObjectStoreError::KeyNotFound(_)) => (), // do nothing, proof is not ready yet - Err(err) => panic!( - "Failed to load proof for batch {}: {}", - l1_batch_number.0, err - ), - } - } - None } From 3f406c7d0c0e76d798c2d838abde57ca692822c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Thu, 26 Sep 2024 13:23:41 +0200 Subject: [PATCH 14/16] fix(eth-watch): add missing check that from_block is not larger than finalized_block (#2969) Signed-off-by: tomg10 --- core/node/eth_watch/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index 537468bb6e4..a832733b355 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -149,6 +149,10 @@ impl EthWatch { .await .map_err(DalError::generalize)?; + // There are no new blocks so there is nothing to be done + if from_block > finalized_block { + continue; + } let processor_events = client .get_events( Web3BlockNumber::Number(from_block.into()), From 3010a1974d24dbbd7b85c900f0733e33fffaf59f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Thu, 26 Sep 2024 14:33:32 +0100 Subject: [PATCH 15/16] refactor: Deleted unnecessary files (#2959) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - ~`.cargo/config.toml` is a remnant from when we used private repos. It should no longer be necessary.~ - ~`configs/` doesn't seem to be used at all.~ - `building-from-scratch` just contains an old docker file that was used for Coinbase. No longer used. --- .../building-from-scratch/Dockerfile | 27 ------------------- 1 file changed, 27 deletions(-) delete mode 100644 docs/guides/external-node/building-from-scratch/Dockerfile diff --git a/docs/guides/external-node/building-from-scratch/Dockerfile b/docs/guides/external-node/building-from-scratch/Dockerfile deleted file mode 100644 index 5b015a4545b..00000000000 --- a/docs/guides/external-node/building-from-scratch/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM matterlabs/zk-environment:latest2.0-lightweight - -RUN git clone https://github.com/matter-labs/zksync-era - -WORKDIR /usr/src/zksync/zksync-era - -# core 24.16.0 (#2608), see: https://github.com/matter-labs/zksync-era/releases -RUN git reset --hard 1ac52c5 - -ENV ZKSYNC_HOME=/usr/src/zksync/zksync-era -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" - -# build zk tool -RUN zkt - -# build rust -RUN cargo build --release -RUN cp target/release/zksync_external_node /usr/bin - -# build contracts -RUN git submodule update --init --recursive -RUN zk_supervisor contracts - -# copy migrations (node expects them to be in specific directory) -RUN cp -r core/lib/dal/migrations/ migrations - -ENTRYPOINT [ "sh", "docker/external-node/entrypoint.sh"] From c3d4bc1b16707af9357abd7adb09c95d4b93e4bf Mon Sep 17 00:00:00 2001 From: Nisheeth Barthwal Date: Thu, 26 Sep 2024 17:52:36 +0200 Subject: [PATCH 16/16] fix: cargo-deny advisory due to structopt (#2972) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds https://rustsec.org/advisories/RUSTSEC-2024-0375 to `deny.toml`, which is due to `StructOpt`. ## Why ❔ * Currently it's impossible to migrate to clap as [zkevm_test_harness](https://github.com/matter-labs/era-zkevm_test_harness/blob/v1.5.0/Cargo.toml#L46) still depends on structopt. * This [unblocks](https://github.com/matter-labs/zksync-era/actions/runs/11054218994/job/30710479095) the CI ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- deny.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/deny.toml b/deny.toml index c2775fc057c..f3f100b9514 100644 --- a/deny.toml +++ b/deny.toml @@ -14,6 +14,7 @@ ignore = [ # all below caused by StructOpt which we still use and we should move to clap v3 instead "RUSTSEC-2021-0145", "RUSTSEC-2021-0139", + "RUSTSEC-2024-0375", ] [licenses]