From b49d2fde498f0700201afc0a27084d19e7568424 Mon Sep 17 00:00:00 2001 From: Mauro Toscano <12560266+MauroToscano@users.noreply.github.com> Date: Mon, 4 Sep 2023 17:28:01 -0300 Subject: [PATCH 1/3] Add plonk (#537) * Add plonk * Clippy * Clippy * Fmt * Fmt --- Cargo.toml | 2 +- README.md | 78 +- docs/src/SUMMARY.md | 6 + docs/src/plonk/SUMMARY.md | 7 + docs/src/plonk/constraint_system.md | 103 ++ docs/src/plonk/implementation.md | 282 ++++++ docs/src/plonk/plonk.md | 12 + docs/src/plonk/protocol.md | 246 +++++ docs/src/plonk/recap.md | 448 +++++++++ provers/plonk/Cargo.toml | 12 + .../src/constraint_system/conditional.rs | 131 +++ provers/plonk/src/constraint_system/errors.rs | 5 + .../src/constraint_system/examples/mimc.rs | 176 ++++ .../src/constraint_system/examples/mod.rs | 2 + .../src/constraint_system/examples/pow.rs | 46 + provers/plonk/src/constraint_system/mod.rs | 351 +++++++ .../plonk/src/constraint_system/operations.rs | 342 +++++++ provers/plonk/src/constraint_system/solver.rs | 909 +++++++++++++++++ provers/plonk/src/constraint_system/types.rs | 168 ++++ provers/plonk/src/lib.rs | 5 + provers/plonk/src/prover.rs | 920 ++++++++++++++++++ provers/plonk/src/setup.rs | 218 +++++ provers/plonk/src/test_utils/circuit_1.rs | 116 +++ provers/plonk/src/test_utils/circuit_2.rs | 157 +++ provers/plonk/src/test_utils/circuit_json.rs | 185 ++++ provers/plonk/src/test_utils/mod.rs | 8 + provers/plonk/src/test_utils/utils.rs | 93 ++ provers/plonk/src/verifier.rs | 423 ++++++++ 28 files changed, 5446 insertions(+), 5 deletions(-) create mode 100644 docs/src/plonk/SUMMARY.md create mode 100644 docs/src/plonk/constraint_system.md create mode 100644 docs/src/plonk/implementation.md create mode 100644 docs/src/plonk/plonk.md create mode 100644 docs/src/plonk/protocol.md create mode 100644 docs/src/plonk/recap.md create mode 100644 provers/plonk/Cargo.toml create mode 100644 provers/plonk/src/constraint_system/conditional.rs create mode 100644 provers/plonk/src/constraint_system/errors.rs create mode 100644 provers/plonk/src/constraint_system/examples/mimc.rs create mode 100644 provers/plonk/src/constraint_system/examples/mod.rs create mode 100644 provers/plonk/src/constraint_system/examples/pow.rs create mode 100644 provers/plonk/src/constraint_system/mod.rs create mode 100644 provers/plonk/src/constraint_system/operations.rs create mode 100644 provers/plonk/src/constraint_system/solver.rs create mode 100644 provers/plonk/src/constraint_system/types.rs create mode 100644 provers/plonk/src/lib.rs create mode 100644 provers/plonk/src/prover.rs create mode 100644 provers/plonk/src/setup.rs create mode 100644 provers/plonk/src/test_utils/circuit_1.rs create mode 100644 provers/plonk/src/test_utils/circuit_2.rs create mode 100644 provers/plonk/src/test_utils/circuit_json.rs create mode 100644 provers/plonk/src/test_utils/mod.rs create mode 100644 provers/plonk/src/test_utils/utils.rs create mode 100644 provers/plonk/src/verifier.rs diff --git a/Cargo.toml b/Cargo.toml index b090ec686..75e9eae2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["math", "crypto", "gpu", "benches"] +members = ["math", "crypto", "gpu", "benches", "provers/plonk"] exclude = ["ensure-no_std"] [workspace.package] diff --git a/README.md b/README.md index d19821e4a..176c5dba3 100644 --- a/README.md +++ b/README.md @@ -16,16 +16,22 @@ Zero-Knowledge and Validity Proofs have gained a lot of attention over the last So, we decided to build our library, focusing on performance, with clear documentation and developer-focused. Our core team is a group of passionate people from different backgrounds and different strengths; we think that the whole is greater than just the addition of the parts. We don't want to be a compilation of every research result in the ZK space. We want this to be a library that can be used in production, not just in academic research. We want to offer developers the main building blocks and proof systems so that they can build their applications on top of this library. ## Provers and Polynomial Commitment Schemes using LambdaWorks + +All provers are being migrated to Lambdaworks library + +Right now Plonk prover is in this repo, you can find the others here: + - [Cairo STARK LambdaWorks prover](https://github.com/lambdaclass/lambdaworks_cairo_prover/tree/main) -- [Plonk LambdaWorks prover](https://github.com/lambdaclass/lambdaworks_plonk_prover) - [CairoVM Trace Generation using LambdaWorks](https://github.com/lambdaclass/cairo-rs/pull/1184) - [ABI compatible KZG commitment scheme - EIP-4844](https://github.com/lambdaclass/lambdaworks_kzg) ## Main crates -- [Finite Field Algebra](https://github.com/lambdaclass/lambdaworks/tree/main/math/src/field) -- [Polynomial operations](https://github.com/lambdaclass/lambdaworks/blob/main/math/src/polynomial.rs) -- [Fast Fourier Transform](https://github.com/lambdaclass/lambdaworks/tree/main/fft) +- [Math](https://github.com/lambdaclass/lambdaworks/tree/main/math) +- [Crypto primitives](https://github.com/lambdaclass/lambdaworks/crypto) +- [Plonk Prover](https://github.com/lambdaclass/lambdaworks/provers/plonk) + +### Crypto - [Elliptic curves](https://github.com/lambdaclass/lambdaworks/tree/main/math/src/elliptic_curve) - [Multiscalar multiplication](https://github.com/lambdaclass/lambdaworks/tree/main/math/src/msm) @@ -114,6 +120,70 @@ make benchmark BENCH=field You can check the generated HTML report in `target/criterion/reports/index.html` +# Lambdaworks Plonk Prover +A fast implementation of the [Plonk](https://eprint.iacr.org/2019/953) zk-protocol written in Rust. This is part of the [Lambdaworks](https://github.com/lambdaclass/lambdaworks) zero-knowledge framework. It includes a high-level API to seamlessly build your own circuits. + +
+ +[![Telegram Chat][tg-badge]][tg-url] + +[tg-badge]: https://img.shields.io/static/v1?color=green&logo=telegram&label=chat&style=flat&message=join +[tg-url]: https://t.me/+98Whlzql7Hs0MDZh + +
+ +This prover is still in development and may contain bugs. It is not intended to be used in production yet. + +## Building a circuit +The following code creates a circuit with two public inputs `x`, `y` and asserts `x * e = y`: + +```rust +let system = &mut ConstraintSystem::::new(); +let x = system.new_public_input(); +let y = system.new_public_input(); +let e = system.new_variable(); + +let z = system.mul(&x, &e); +system.assert_eq(&y, &z);; +``` + +## Generating a proof +### Setup +A setup is needed in order to generate a proof for a new circuit. The following code generates a verifying key that will be used by both the prover and the verifier: + +```rust +let common = CommonPreprocessedInput::from_constraint_system(&system, &ORDER_R_MINUS_1_ROOT_UNITY); +let srs = test_srs(common.n); +let kzg = KZG::new(srs); // The commitment scheme for plonk. +let verifying_key = setup(&common, &kzg); +``` + +### Prover +First, we fix values for `x` and `e` and solve the constraint system: +```rust +let inputs = HashMap::from([(x, FieldElement::from(4)), (e, FieldElement::from(3))]); +let assignments = system.solve(inputs).unwrap(); +``` + +Finally, we call the prover: +```rust +let witness = Witness::new(assignments, &system); +let public_inputs = system.public_input_values(&assignments); +let prover = Prover::new(kzg.clone(), TestRandomFieldGenerator {}); +let proof = prover.prove(&witness, &public_inputs, &common, &verifying_key); +``` + +## Verifying a proof +Just call the verifier: + +```rust +let verifier = Verifier::new(kzg); +assert!(verifier.verify(&proof, &public_inputs, &common, &verifying_key)); +``` + +# More info +You can find more info in the [documentation](https://lambdaclass.github.io/lambdaworks_plonk_prover/). + ## 📚 References The following links, repos and projects have been important in the development of this library and we want to thank and acknowledge them. diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index daee6cb47..5ba25fbfb 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -4,3 +4,9 @@ - [FFT Library]() - [Benchmarks](./fft/benchmarks.md) + +- [Plonk]() + - [Recap](./plonk/recap.md) + - [Protocol](./plonk/protocol.md) + - [Implementation](./plonk/implementation.md) + - [Circuit API](./plonk/constraint_system.md) diff --git a/docs/src/plonk/SUMMARY.md b/docs/src/plonk/SUMMARY.md new file mode 100644 index 000000000..f1a17e1da --- /dev/null +++ b/docs/src/plonk/SUMMARY.md @@ -0,0 +1,7 @@ +# Lambdaworks Plonk Prover + +- [Recap](./recap.md) +- [Protocol](./protocol.md) +- [Implementation](./implementation.md) +- [Circuit API](./constraint_system.md) + diff --git a/docs/src/plonk/constraint_system.md b/docs/src/plonk/constraint_system.md new file mode 100644 index 000000000..e82ecec4e --- /dev/null +++ b/docs/src/plonk/constraint_system.md @@ -0,0 +1,103 @@ +# Circuit API +In this section, we'll discuss how to build your own constraint system to prove the execution of a particular program. + +## Simple Example + +Let's take the following simple program as an example. We have two public inputs: `x` and `y`. We want to prove to a verifier that we know a private input `e` such that `x * e = y`. You can achieve this by building the following constraint system: + +```rust +use lambdaworks_plonk::constraint_system::ConstraintSystem; +use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrField; + +fn main() { + let system = &mut ConstraintSystem::::new(); + let x = system.new_public_input(); + let y = system.new_public_input(); + let e = system.new_variable(); + + let z = system.mul(&x, &e); + + // This constraint system asserts that x * e == y + system.assert_eq(&y, &z); +} +``` + +This code creates a constraint system over the field of the BLS12381 curve. Then, it creates three variables: two public inputs `x` and `y`, and a private variable `e`. Note that every variable is private except for the public inputs. Finally, it adds the constraints that represent a multiplication and an assertion. + +Before generating proofs for this system, we need to run a setup and obtain a verifying key: + +```rust +let common = CommonPreprocessedInput::from_constraint_system(&system, &ORDER_R_MINUS_1_ROOT_UNITY); +let srs = test_srs(common.n); +let kzg = KZG::new(srs); // The commitment scheme for plonk. +let vk = setup(&common, &kzg); +``` + +Now we can generate proofs for our system. We just need to specify the public inputs and obtain a witness that is a solution for our constraint system: + +```rust +let inputs = HashMap::from([(x, FieldElement::from(4)), (e, FieldElement::from(3))]); +let assignments = system.solve(inputs).unwrap(); +let witness = Witness::new(assignments, &system); +``` + +Once you have all these ingredients, you can call the prover: + +```rust +let public_inputs = system.public_input_values(&assignments); +let prover = Prover::new(kzg.clone(), TestRandomFieldGenerator {}); +let proof = prover.prove(&witness, &public_inputs, &common, &vk); +``` + +and verify: + +```rust +let verifier = Verifier::new(kzg); +assert!(verifier.verify(&proof, &public_inputs, &common, &vk)); +``` + +## Building Complex Systems + +Some operations are common, and it makes sense to wrap the set of constraints that do these operations in a function and use it several times. Lambdaworks comes with a collection of functions to help you build your own constraint systems, such as conditionals, inverses, and hash functions. + +However, if you have an operation that does not come with Lambdaworks, you can easily extend Lambdaworks functionality. Suppose that the exponentiation operation is something common in your program. You can write the [square and multiply](https://en.wikipedia.org/wiki/Exponentiation_by_squaring) algorithm and put it inside a function: + +```rust +pub fn pow( + system: &mut ConstraintSystem, + base: Variable, + exponent: Variable, +) -> Variable { + let exponent_bits = system.new_u32(&exponent); + let mut result = system.new_constant(FieldElement::one()); + + for i in 0..32 { + if i != 0 { + result = system.mul(&result, &result); + } + let result_times_base = system.mul(&result, &base); + result = system.if_else(&exponent_bits[i], &result_times_base, &result); + } + result +} +``` + +This function can then be used to modify our simple program from the previous section. The following circuit checks that the prover knows `e` such that `pow(x, e) = y`: + +```rust +use lambdaworks_plonk::constraint_system::ConstraintSystem; +use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrField; + +fn main() { + let system = &mut ConstraintSystem::::new(); + let x = system.new_public_input(); + let y = system.new_public_input(); + let e = system.new_variable(); + + let z = pow(system, &x, &e); + system.assert_eq(&y, &z); +} +``` + +You can keep composing these functions in order to create more complex systems. + diff --git a/docs/src/plonk/implementation.md b/docs/src/plonk/implementation.md new file mode 100644 index 000000000..c8ee21c8a --- /dev/null +++ b/docs/src/plonk/implementation.md @@ -0,0 +1,282 @@ +# Implementation +In this section we discuss the implementation details of the PLONK algorithm. We use the notation and terminology of the [protocol](./protocol.md) and [recap](./recap.md) sections. + +At the moment our API supports the backend of PLONK. That is, all the setup, prove and verify algorithms. We temporarily rely on external sources for the definition of a circuit and the creation of the $Q$ and $V$ matrices, as well as the execution of it to obtain the trace matrix $T$. We mainly use gnark temporarily for that purpose. + +So to generate proofs and validate them, we need to feed the algorithms with precomputed values of the $Q$, $V$ and $T$ matrices, and the primitive root of unity $\omega$. + +Let us see our API on a test circuit that provides all these values. The program in this case is the one that takes an input $x$, a private input $e$ and computes $y = xe +5$. As in the toy example of the recap, the output of the program is added to the public inputs and the circuit actually asserts that the output is the claimed value. So more precisely, the prover will generate a proof for the statement `ASSERT(x*e+5==y)`, where both $x,y$ are public inputs. +# Usage +Here is the happy path. + +```rust +// This is the common preprocessed input for +// the test circuit ( ASSERT(x * e + 5 == y) ) +let common_preprocessed_input = test_common_preprocessed_input_2(); + +// Input +let x = FieldElement::from(2_u64); + +// Private input +let e = FieldElement::from(3_u64); + +let y, witness = test_witness_2(x, e); + +let srs = test_srs(common_preprocessed_input.n); +let kzg = KZG::new(srs); + +let verifying_key = setup(&common_preprocessed_input, &kzg); + +let random_generator = TestRandomFieldGenerator {}; +let prover = Prover::new(kzg.clone(), random_generator); + +let public_input = vec![x.clone(), y]; + +let proof = prover.prove( + &witness, + &public_input, + &common_preprocessed_input, + &verifying_key, +); + +let verifier = Verifier::new(kzg); +assert!(verifier.verify( + &proof, + &public_input, + &common_preprocessed_input, + &verifying_key +)); +``` + +Let's brake it down. The helper function `test_common_preprocessed_input_2()` returns an instance of the following struct for the particular test circuit: +```rust +pub struct CommonPreprocessedInput { + pub n: usize, + pub domain: Vec>, + pub omega: FieldElement, + pub k1: FieldElement, + + pub ql: Polynomial>, + pub qr: Polynomial>, + pub qo: Polynomial>, + pub qm: Polynomial>, + pub qc: Polynomial>, + + pub s1: Polynomial>, + pub s2: Polynomial>, + pub s3: Polynomial>, + + pub s1_lagrange: Vec>, + pub s2_lagrange: Vec>, + pub s3_lagrange: Vec>, +} +``` +Apart from the eight polynomials in the canonical basis, we store also here the number of constraints $n$, the domain $H$, the primitive $n$-th of unity $\omega$ and the element $k_1$. The element $k_2$ will be $k_1^2$. For convenience, we also store the polynomials $S_{\sigma i}$ in Lagrange form. + +The following lines define the particular values of the program input $x$ and the private input $e$. +```rust +// Input +let x = FieldElement::from(2_u64); + +// Private input +let e = FieldElement::from(3_u64); +let y, witness = test_witness_2(x, e); +``` + The function `test_witness_2(x, e)` returns an instance of the following struct, that holds the polynomials that interpolate the columns $A, B, C$ of the trace matrix $T$. +```rust +pub struct Witness { + pub a: Vec>, + pub b: Vec>, + pub c: Vec>, +} +``` +Next the commitment scheme KZG (Kate-Zaverucha-Goldberg) is instantiated. +```rust +let srs = test_srs(common_preprocessed_input.n); +let kzg = KZG::new(srs); +``` +The `setup` function performs the setup phase. It only needs the common preprocessed input and the commitment scheme. +```rust +let verifying_key = setup(&common_preprocessed_input, &kzg); +``` +It outputs an instance of the struct `VerificationKey`. +```rust +pub struct VerificationKey { + pub qm_1: G1Point, + pub ql_1: G1Point, + pub qr_1: G1Point, + pub qo_1: G1Point, + pub qc_1: G1Point, + + pub s1_1: G1Point, + pub s2_1: G1Point, + pub s3_1: G1Point, +} +``` +It stores the commitments of the eight polynomials of the common preprocessed input. The suffix `_1` means it is a commitment. It comes from the notation $[f]_1$, where $f$ is a polynomial. + +Then a prover is instantiated +```rust +let random_generator = TestRandomFieldGenerator {}; +let prover = Prover::new(kzg.clone(), random_generator); +``` +The prover is an instance of the struct `Prover`: +```rust +pub struct Prover +where + F: IsField, + CS: IsCommitmentScheme, + R: IsRandomFieldElementGenerator + { + commitment_scheme: CS, + random_generator: R, + phantom: PhantomData, +} +``` +It stores an instance of a commitment scheme and a random field element generator needed for blinding polynomials. + +Then the public input is defined. As we mentioned in the recap, the public input contains the output of the program. +```rust +let public_input = vec![x.clone(), y]; +``` + +We then generate a proof using the prover's method `prove` +```rust +let proof = prover.prove( + &witness, + &public_input, + &common_preprocessed_input, + &verifying_key, +); +``` +The output is an instance of the struct `Proof`. +```rust +pub struct Proof> { + // Round 1. + /// Commitment to the wire polynomial `a(x)` + pub a_1: CS::Commitment, + /// Commitment to the wire polynomial `b(x)` + pub b_1: CS::Commitment, + /// Commitment to the wire polynomial `c(x)` + pub c_1: CS::Commitment, + + // Round 2. + /// Commitment to the copy constraints polynomial `z(x)` + pub z_1: CS::Commitment, + + // Round 3. + /// Commitment to the low part of the quotient polynomial t(X) + pub t_lo_1: CS::Commitment, + /// Commitment to the middle part of the quotient polynomial t(X) + pub t_mid_1: CS::Commitment, + /// Commitment to the high part of the quotient polynomial t(X) + pub t_hi_1: CS::Commitment, + + // Round 4. + /// Value of `a(ζ)`. + pub a_zeta: FieldElement, + /// Value of `b(ζ)`. + pub b_zeta: FieldElement, + /// Value of `c(ζ)`. + pub c_zeta: FieldElement, + /// Value of `S_σ1(ζ)`. + pub s1_zeta: FieldElement, + /// Value of `S_σ2(ζ)`. + pub s2_zeta: FieldElement, + /// Value of `z(ζω)`. + pub z_zeta_omega: FieldElement, + + // Round 5 + /// Value of `p_non_constant(ζ)`. + pub p_non_constant_zeta: FieldElement, + /// Value of `t(ζ)`. + pub t_zeta: FieldElement, + /// Batch opening proof for all the evaluations at ζ + pub w_zeta_1: CS::Commitment, + /// Single opening proof for `z(ζω)`. + pub w_zeta_omega_1: CS::Commitment, +} +``` + +Finally, we instantiate a verifier. +```rust +let verifier = Verifier::new(kzg); +``` + +It's an instance of `Verifier`: +```rust +struct Verifier> { + commitment_scheme: CS, + phantom: PhantomData, +} +``` + +Finally, we call the verifier's method `verify` that outputs a `bool`. +```rust +assert!(verifier.verify( + &proof, + &public_input, + &common_preprocessed_input, + &verifying_key +)); +``` + +## Padding +All the matrices $Q, V, T, PI$ are padded with dummy rows so that their length is a power of two. To be able to interpolate their columns, we need a primitive root of unity $\omega$ of that order. Given the particular field used in our implementation, that means that the maximum possible size for a circuit is $2^{32}$. + +The entries of the dummy rows are filled in with zeroes in the $Q$, $V$ and $PI$ matrices. The $T$ matrix needs to be consistent with the $V$ matrix. Therefore it is filled with the value of the variable with index $0$. + +Some other rows in the $V$ matrix have also dummy values. These are the rows corresponding to the $B$ and $C$ columns of the public input rows. In the recap we denoted them with the empty `-` symbol. They are filled in with the same logic as the padding rows, as well as the corresponding values in the $T$ matrix. + +# Implementation details + +The implementation pretty much follows the rounds as are described in the [protocol](./protocol.md) section. There are a few details that are worth mentioning. + +## Commitment Scheme +The commitment scheme we use is the [Kate-Zaverucha-Goldberg](https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf) scheme with the `BLS 12 381` curve and the ate pairing. It can be found in the `commitments` module of the `lambdaworks_crypto` package. + +The order $r$ of the cyclic subgroup is + +``` +0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001 +``` + +The maximum power of two that divides $r-1$ is $2^{32}$. Therefore, that is the maximum possible order for a primitive root of unity in $\mathbb{F}_r$ with order a power of two. + +## Fiat-Shamir + +### Transcript strategy + +Here we describe our implementation of the transcript used for the Fiat-Shamir heuristic. + +A `Transcript` exposes two methods: `append` and `challenge`. + +The method `append` adds a message to the transcript by updating the internal state of the hasher with the raw bytes of the message. + +The method `challenge` returns the result of the hasher using the current internal state of the hasher. It subsequently resets the hasher and updates the internal state with the last result. + +Here is an example of this process: + +1. Start a fresh transcript. +2. Call `append` and pass `message_1`. +3. Call `append` and pass `message_2`. +4. The internal state of the hasher at this point is `message_2 || message_1`. +5. Call `challenge`. The output is `Hash(message_2 || message_1)`. +6. Call `append` and pass `message_3`. +7. Call `challenge`. The output is `Hash(message_3 || Hash(message_2 || message_1))`. +8. Call `append` and pass `message_4`. + +The internal state of the hasher at the end of this exercise is `message_4 || Hash(message_3 || Hash(message_2 || message_1))` + +The underlying hasher function we use is `h=sha3`. + +### Field elements +The result of every challenge is a $256$-bit string, which is interpreted as an integer in big-endian order. A field element is constructed out of it by taking modulo the field order. The prime field used in this implementation has a $255$-bit order. Therefore some field elements are more probable to occur than others because they have more representatives as 256-bit integers. + +### Strong Fiat-Shamir +The first messages added to the transcript are all commitments of the polynomials of the common preprocessed input and the values of the public inputs. This prevents a known vulnerability called "weak Fiat-Shamir". +Check out the following resources to learn more about it. + +- [What can go wrong (zkdocs)](https://www.zkdocs.com/docs/zkdocs/protocol-primitives/fiat-shamir/#what-can-go-wrong) +- [How not to Prove Yourself: Pitfalls of the Fiat-Shamir Heuristic and Applications to Helios](https://eprint.iacr.org/2016/771.pdf) +- [Weak Fiat-Shamir Attacks on Modern Proof Systems](https://eprint.iacr.org/2023/691) diff --git a/docs/src/plonk/plonk.md b/docs/src/plonk/plonk.md new file mode 100644 index 000000000..f80b28b76 --- /dev/null +++ b/docs/src/plonk/plonk.md @@ -0,0 +1,12 @@ +# PLONK + +In this document, we present an in-depth analysis of PLONK and the specific version that has been implemented in Lambdaworks. Additionally, we provide a step-by-step guide on how to utilize our high-level API, which is designed to simplify the user experience. + +Our version of PLONK is heavily inspired by [gnark](https://github.com/ConsenSys/gnark). We would like to take this opportunity to express our gratitude to ConsenSys for generously sharing their source code with the community. + +We have written this document with the aim of making it accessible to both novice and advanced users. So, whether you're a newcomer to PLONK or an experienced user, you will find this document to be a valuable resource. + +- [Recap](./recap.md) +- [Protocol](./protocol.md) +- [Implementation](./implementation.md) + diff --git a/docs/src/plonk/protocol.md b/docs/src/plonk/protocol.md new file mode 100644 index 000000000..add67ed58 --- /dev/null +++ b/docs/src/plonk/protocol.md @@ -0,0 +1,246 @@ +# Protocol + +## Details and tricks + +### Polynomial commitment scheme + +A polynomial commitment scheme (PCS) is a cryptographic tool that allows one party to commit to a polynomial, and later prove properties of that polynomial. +This commitment polynomial hides the original polynomial's coefficients and can be publicly shared without revealing any information about the original polynomial. +Later, the party can use the commitment to prove certain properties of the polynomial, such as that it satisfies certain constraints or that it evaluates to a certain value at a specific point. + +In the implementation section we'll explain the inner workings of the Kate-Zaverucha-Goldberg scheme, a popular PCS chosen in Lambdaworks for PLONK. + +For the moment we only need the following about it: + +It consists of a finite group $\mathbb{G}$ and the following algorithms: +- **Commit($f$)**: This algorithm takes a polynomial $f$ and produces an element of the group $\mathbb{G}$. It is called the commitment of $f$ and is denoted by $[f]_1$. It is homomorphic in the sense that $[f + g]_1 = [f]_1 + [g]_1$. The former sum being addition of polynomials. The latter is addition in the group $\mathbb{G}$. +- **Open($f$,$\zeta$)**: It takes a polynomial $f$ and a field element $\zeta$ and produces an element $\pi$ of the group $\mathbb{G}$. This element is called an opening proof for $f(\zeta)$. It is the proof that $f$ evaluated at $\zeta$ gives $f(\zeta)$. +- **Verify($[f]_1$, $\pi$, $\zeta$, $y$)**: It takes group elements $[f]_1$ and $\pi$, and also field elements $\zeta$ and $y$. With overwhelming probability it outputs _Accept_ if $f(z)=y$ and _Reject_ otherwise. + + +### Blindings + +As you will see in the protocol, the prover reveals the value taken by a bunch of the polynomials at a random $\zeta$. In order for the protocol to be _Honest Verifier Zero Knowledge_, these polynomials need to be _blinded_. This is a process that makes the values of these polynomials at $\zeta$ seemingly random by forcing them to be of certain degree. Here's how it works. + +Let's take for example the polynomial $a$ the prover constructs. This is the interpolation of the first column of the trace matrix $T$ at the domain $H$. +This matrix has all of the left operands of all the gates. The prover wishes to keep them secret. +Say the trace matrix $T$ has $N$ rows. And so $H$ is $\{1, \omega,\omega^2, \dots, \omega^{N-1}\}$. The invariant that the prover cannot violate is that $a_{\text{blinded}}(\omega^i)$ must take the value $T_{0, i}$, for all $i$. This is what the interpolation polynomial $a$ satisfies. And is the unique such polynomial of degree at most $N-1$ with such property. But for higher degrees, there are many such polynomials. + +The _blinding_ process takes $a$ and a desired degree $M\geq N$, and produces a new polynomial $a_{\text{blinded}}$ of degree exactly $M$. This new polynomial satisfies that $a_{\text{blinded}}(\omega^i) = a(\omega^i)$ for all $i$. But outside $H$ differs from $a$. + +This may seem hard but it's actually very simple. Let $z_H$ be the polynomial $z_H = X^N - 1$. If $M=N+k$, with $k\geq 0$, then sample random values $b_0, \dots, b_k$ and define +$$ a_{\text{blinded}} := (b_0 + b_1 X + \cdots + b_k X^k)z_H + a $$ + +The reason why this does the job is that $z_H(\omega^i)=0$ for all $i$. Therefore the added term vanishes at $H$ and leaves the values of $a$ at $H$ unchanged. + +### Linearization trick + +This is an optimization in PLONK to reduce the number of checks of the verifier. + +One of the main checks in PLONK boils down to check that $p(\zeta) = z_H(\zeta) t(\zeta)$, with $p$ some polynomial that looks like $p = a q_L + b q_R + ab q_M + \cdots$, and so on. In particular the verifier needs to get the value $p(\zeta)$ from somewhere. + +For the sake of simplicity, in this section assume $p$ is exactly $a q_L + bq_R$. Secret to the prover here are only $a, b$. The polynomials $q_L$ and $q_R$ are known also to the verifier. The verifier will already have the commitments $[a]_1, [b]_1, [q_L]_1$ and $[q_R]_1$. So the prover could send just $a(\zeta)$, $b(\zeta)$ along with their opening proofs and let the verifier compute by himself $q_L(\zeta)$ and $q_R(\zeta)$. Then with all these values the verifier could compute $p(\zeta) = a(\zeta)q_L(\zeta) + b(\zeta)q_R(\zeta)$. And also use his commitments to validate the opening proofs of $a(\zeta)$ and $b(\zeta)$. + +This has the problem that computing $q_L(\zeta)$ and $q_R(\zeta)$ is expensive. The prover can instead save the verifier this by sending also $q_L(\zeta), q_R(\zeta)$ along with opening proofs. Since the verifier will have the commitments $[q_L]_1$ and $[q_R]_1$ beforehand, he can check that the prover is not cheating and cheaply be convinced that the claimed values are actually $q_L(\zeta)$ and $q_R(\zeta)$. This is much better. It involves the check of four opening proofs and the computation of $p(\zeta)$ off the values received from the prover. But it can be further improved as follows. + +As before, the prover sends $a(\zeta), b(\zeta)$ along with their opening proofs. She constructs the polynomial $f = a(\zeta)q_L + b(\zeta)q_R$. She sends the value $f(\zeta)$ along with an opening proof of it. Notice that the value of $f(\zeta)$ is exactly $p(\zeta)$. The verifier can compute by himself $[f]_1$ as $a(\zeta)[q_L]_1 + b(\zeta)[q_R]_1$. The verifier has everything to check all three openings and get convinced that the claimed value $f(\zeta)$ is true. And this value is actually $p(\zeta)$. So this means no more work for the verifier. And the whole thing got reduced to three openings. + +This is called the linearization trick. The polynomial $f$ is called the _linearization_ of $p$. + + +## Setup + +There's a one time setup phase to compute some values common to any execution and proof of the particular circuit. Precisely, the following commitments are computed and published. +$$ [q_L]_1, [q_R]_1, [q_M]_1, [q_O]_1, [q_C]_1, [S_{\sigma 1}]_1, [S_{\sigma 2}]_1, [S_{\sigma 3}]_1$$ + +## Proving algorithm + +Next we describe the proving algorithm for a program of size $N$. That includes public inputs. Let $\omega$ be a primitive $N$-th root of unity. Let $H=\{1, \omega, \omega^2, \dots, \omega^{N-1}\}$. Define $Z_H := X^N-1$. + +Assume the eight polynomials of common preprocessed input are already given. + +The prover computes the trace matrix $T$ as described in the first sections. That means, with the first rows corresponding to the public inputs. It should be a $N \times 3$ matrix. + +### Round 1 + +Add to the transcript the following: +$$[S_{\sigma1}]_1, [S_{\sigma2}]_1, [S_{\sigma3}]_1, [q_L]_1, [q_R]_1, [q_M]_1, [q_O]_1, [q_C]_1$$ + +Compute polynomials $a',b',c'$ as the interpolation polynomials of the columns of $T$ at the domain $H$. +Sample random $b_1, b_2, b_3, b_4, b_5, b_6$ +Let + +$a := (b_1X + b_2)Z_H + a'$ + +$b := (b_3X + b_4)Z_H + b'$ + +$c := (b_5X + b_6)Z_H + c'$ + +Compute $[a]_1, [b]_1, [c]_1$ and add them to the transcript. + +### Round 2 + +Sample $\beta, \gamma$ from the transcript. + +Let $z_0 = 1$ and define recursively for $0\leq k < N$. + +$$ +z_{k+1} = z_k \frac{(a_k + \beta\omega^k + \gamma)(b_k + \beta\omega^kk_1 + \gamma)(c_k + \beta\omega^kk_2 + \gamma)}{(a_k + \beta S_{\sigma1}(\omega^k) + \gamma)(b_k + \beta S_{\sigma2}(\omega^k) + \gamma)(c_k + \beta S_{\sigma3}(\omega^k) + \gamma)} +$$ + +Compute the polynomial $z'$ as the interpolation polynomial at the domain $H$ of the values $(z_0, \dots, z_{N-1})$. + +Sample random values $b_7, b_8, b_9$ and let $z = (b_7X^2 + b_8X + b_9)Z_H + z'$. + +Compute $[z]_1$ and add it to the transcript. + +### Round 3 + +Sample $\alpha$ from the transcript. + +Let $pi$ be the interpolation of the public input matrix $PI$ at the domain $H$. + +Let + +$$ +\begin{aligned} +p_1 &= aq_L + bq_R + abq_M + cq_O + q_C + pi \\ +p_2 &= (a + \beta X + \gamma)(b + \beta k_1 X + \gamma)(c + \beta k_2 X + \gamma)z - (a + \beta S_{\sigma1} + \gamma)(b + \beta S_{\sigma2} + \gamma)(c + \beta S_{\sigma3} + \gamma)z(\omega X)\\ +p_3 &= (z - 1)L_1 +\end{aligned} +$$ + +and define $p = p_1 + \alpha p_2 + \alpha^2 p_3$. Compute $t$ such that $p = t Z_H$. Write $t = t_{lo}' + X^{N+2} t_{mid}' + X^{2(N+2)}t_{hi}'$ with $t_{lo}', t_{mid}'$ and $t_{hi}'$ polynomials of degree at most $N+1$. + +Sample random $b_{10}, b_{11}$ and define + +$$ +\begin{aligned} +t_{lo} &= t_{lo}' + b_{10}X^{N+2} \\ +t_{mid} &= t_{mid}' - b_{10} + b_{11}X^{N+2} \\ +t_{hi} &= t_{hi}' - b_{11} +\end{aligned} +$$ + +Compute $[t_{lo}]_1, [t_{mid}]_1,[t_{hi}]_1$ and add them to the transcript. + +### Round 4 + +Sample $\zeta$ from the transcript. + +Compute $\bar a = a(\zeta), \bar b = b(\zeta), \bar c = c(\zeta), \bar s_{\sigma1} = S_{\sigma1}(\zeta), \bar s_{\sigma2} = S_{\sigma2}(\zeta), \bar z_\omega = z(\zeta\omega)$ and add them to the transcript. + +### Round 5 + +Sample $\upsilon$ from the transcript. + +Let + +$$ +\begin{aligned} +\hat p_{nc1} &= \bar aq_L + \bar bq_R + \bar a\bar bq_M + \bar cq_O + q_C \\ +\hat p_{nc2} &=(\bar a + \beta\zeta + \gamma)(\bar b + \beta k_1\zeta + \gamma)(\bar c + \beta k_2\zeta + \gamma)z - (\bar a + \beta \bar s_{\sigma1} + \gamma)(\bar b + \beta \bar s_{\sigma2} + \gamma)\beta \bar z_\omega S_{\sigma3} \\ +\hat p_{nc3} &= L_1(\zeta) z +\end{aligned} +$$ + +Define + +$$ +\begin{aligned} +p_{nc} &= p_{nc1} + \alpha p_{nc2} + \alpha^2 p_{nc3} \\ +t_{\text{partial}} &= t_{lo} + \zeta^{N+2}t_{mid} + \zeta^{2(N+2)}t_{hi} +\end{aligned} +$$ + +The subscript $nc$ stands for "non constant", as is the part of the linearization of $p$ that has non constant factors. The subscript "partial" indicates that it is a partial evaluation of $t$ at $\zeta$. Partial meaning that only some power of $X$ ar replaced by the powers of $\zeta$. So in particular $t_{\text{partial}}(\zeta) = t(\zeta)$. + +Let $\pi_{\text{batch}}$ be the opening proof at $\zeta$ of the polynomial $f_{\text{batch}}$ defined as +$$t_{\text{partial}} +\upsilon p_{nc} + \upsilon^2 a + \upsilon^3 b + \upsilon^4 c + \upsilon^5 S_{\sigma1} + \upsilon^6 S_{\sigma2}$$ + +Let $\pi_{\text{single}}$ be the opening proof at $\zeta\omega$ of the polynomial $z$. + +Compute $\bar p_{nc} := p_{nc}(\zeta)$ and $\bar t = t(\zeta)$. + +### Proof + +The proof is: +$$[a]_1, [b]_1, [c]_1, [z]_1, [t_{lo}]_1, [t_{mid}]_1, [t_{hi}]_1, \bar a, \bar b, \bar c, \bar s_{\sigma1}, \bar s_{\sigma2}, \bar z_\omega, \pi_{\text{batch}}, \pi_{\text{single}}, \bar p_{nc}, \bar t$$ + +## Verification algorithm + +### Transcript initialization + +The first step is to initialize the transcript in the same way the prover did, adding to it the following elements. +$$[S_{\sigma1}]_1, [S_{\sigma2}]_1, [S_{\sigma3}]_1, [q_L]_1, [q_R]_1, [q_M]_1, [q_O]_1, [q_C]_1$$ + +### Extraction of values and commitments + +#### Challenges + +Firstly, the verifier needs to compute all the challenges. For that, he follows these steps: + +- Add $[a]_1, [b]_1, [c]_1$ to the transcript. +- Sample two challenges $\beta, \gamma$. +- Add $[z]_1$ to the transcript. +- Sample a challenge $\alpha$. +- Add $[t_{lo}]_1, [t_{mid}]_1, [t_{hi}]_1$ to the transcript. +- Sample a challenge $\zeta$. +- Add $\bar a, \bar b, \bar c, \bar s_{\sigma1}, \bar s_{\sigma2}, \bar z_\omega$ to the transcript. +- Sample a challenge $\upsilon$. + +#### Compute $pi(\zeta)$ + +Also he needs compute a few values off all these data. First, he computes the $PI$ matrix with the public inputs and outputs. He needs to compute $pi(\zeta)$, where $pi$ is the interpolation of $PI$ at the domain $H$. But he doesn't need to compute $pi$. He can instead compute $pi(\zeta)$ as +$$ \sum_{i=0}^n L_i(\zeta) (PI)_i,$$ +where $n$ is the number of public inputs and $L_i$ is the Lagrange basis at the domain $H$. + +#### Compute claimed values $p(\zeta)$ and $t(\zeta)$ + +He computes $\bar p_{c} := pi(\zeta) + \alpha \bar z_\omega (\bar c + \gamma) (\bar a + \beta \bar s_{\sigma1} + \gamma) (\bar b + \beta \bar s_{\sigma2} + \gamma) - \alpha^2 L_1(\zeta)$ + +This is the _constant_ part of the linearization of $p$. So adding it to what the prover claims to be $\bar p_{nc}$, he obtains +$$p(\zeta) = \bar p_{c} + \bar p_{nc}$$ + +With respect to $t(\zeta)$, this is actually already $/bar t$. + +#### Compute $[t_{\text{partial}}]_1$ and $[p_{nc}]_1$ + +He computes these off the commitments in the proof as follows +$$ [t_{\text{partial}}]_1 = [t_{lo}]_1 + \zeta^{N+2}[t_{mid}]_1 + \zeta^{2(N+2)}[t_{hi}]_1 $$ + +For $[p_{nc}]_1$, first compute + +$$ +\begin{aligned} +[\hat p_{nc1}]_1 &= \bar a[q_L]_1 + \bar b[q_R]_1 + (\bar a\bar b)[q_M]_1 + \bar c[q_O]_1 + [q_C]_1 \\ +[\hat p_{nc2}]_1 &= (\bar a + \beta\zeta + \gamma)(\bar b + \beta k_1\zeta + \gamma)(\bar c + \beta k_2\zeta + \gamma)[z]_1 - (\bar a + \beta \bar s_{\sigma1} + \gamma)(\bar b + \beta \bar s_{\sigma2} + \gamma)\beta \bar z_\omega [S_{\sigma3}]_1 \\ +[\hat p_{nc3}]_1 &= L_1(\zeta)[z]_1 +\end{aligned} +$$ + +Then $[p_{nc}]_1 = [p_{nc1}]_1 + [p_{nc2}]_1 + [p_{nc3}]_1$. + +#### Compute claimed value $f_{\text{batch}}(\zeta)$ and $[f_{\text{batch}}]_1$ + +Compute $f_{\text{batch}}(\zeta)$ as + +$$ +f_{\text{batch}}(\zeta) = +\bar t +\upsilon \bar p_{nc} + \upsilon^2 \bar a + \upsilon^3 \bar b + \upsilon^4 \bar c + \upsilon^5 \bar s_{\sigma1} + \upsilon^6 \bar s_{\sigma2} +$$ + +Also, the commitment of the polynomial $f_{\text{batch}}$ is +$$[f_{\text{batch}}]_1 = [t_{\text{partial}}]_1 +\upsilon [p_{nc}]_1 + \upsilon^2 [a]_1 + \upsilon^3 [b]_1 + \upsilon^4 [c]_1 + \upsilon^5 [S_{\sigma1}]_1 + \upsilon^6 [S_{\sigma2}]_1$$ + +### Proof check + +Now the verifier has all the necessary values to proceed with the checks. + +- Check that $p(\zeta)$ equals $(\zeta^N - 1)t(\zeta)$. +- Verify the opening of $f_{\text{batch}}$ at $\zeta$. That is, check that $\text{Verify}([f_{\text{batch}}]_1, \pi_{\text{batch}}, \zeta, f_{\text{batch}}(\zeta))$ outputs _Accept_. +- Verify the opening of $z$ at $\zeta\omega$. That is, check the validity of the proof $\pi_{single}$ using the commitment $[z]_1$ and the value $\bar z_\omega$. +That is, check that $\text{Verify}([z]_1, \pi_{\text{single}}, \zeta\omega, \bar z_\omega)$ outputs _Accept_. + +If all checks pass, he outputs _Accept_. Otherwise outputs _Reject_. + diff --git a/docs/src/plonk/recap.md b/docs/src/plonk/recap.md new file mode 100644 index 000000000..7a67618c5 --- /dev/null +++ b/docs/src/plonk/recap.md @@ -0,0 +1,448 @@ +# PLONK + +PLONK is a popular cryptographic proving system within the Zero Knowledge (ZK) community due to its efficiency and flexibility. It enables the verification of complex computations executed by untrusted parties through the transformation of programs into circuit representations. The system relies on a process called arithmetization, which converts logical circuits into polynomial representations. The main idea behind arithmetization is to express the computation as a set of polynomial equations. The solutions to these equations correspond to the outputs of the circuit. In this section, we will delve into the mechanics of how arithmetization works in PLONK, as well as the protocol used to generate and verify proofs. + +The paper can be found [here](https://eprint.iacr.org/2019/953.pdf) + +## Notation +We use the following notation. + +The symbol $\mathbb{F}$ denotes a finite field. It is fixed all along. The symbol $\omega$ denotes a primitive root of unity in $\mathbb{F}$. + +All polynomials have coefficients in $\mathbb{F}$ and the variable is usually denoted by $X$. We denote polynomials by single letters like $p, a, b, z$. We only denote them as $z(X)$ when we want to emphasize the fact that it is a polynomial in $X$, or we need that to explicitly define a polynomial from another one. For example when composing a polynomial $z$ with the polynomial $\omega X$, the result being denoted by $z' := z(\omega X)$. The symbol $'$ is **not** used to denote derivatives. + +When interpolating at a domain $H=\{h_0, \dots, h_n\} \subset \mathbb{F}$, the symbols $L_i$ denote the Lagrange basis. That is $L_i$ is the polynomial such that $L_i(h_j) = 0$ for all $j\neq i$, and that $L_i(h_i) = 1$. + +If $M$ is a matrix, then $M_{i,j}$ denotes the value at the row $i$ and column $j$. + +# The ideas and components + +## Programs. Our toy example + +For better clarity, we'll be using the following toy program throughout this recap. + +``` +INPUT: + x + +PRIVATE INPUT: + e + +OUTPUT: + e * x + x - 1 +``` + +The observer would have noticed that this program could also be written as $(e + 1) * x - 1$, which is more sensible. But the way it is written now serves us to better explain the arithmetization of PLONK. So we'll stick to it. + +The idea here is that the verifier holds some value $x$, say $x=3$. He gives it to the prover. She executes the program using her own chosen value $e$, and sends the output value, say $8$, along with a proof $\pi$ demonstrating correct execution of the program and obtaining the correct output. + +In the context of PLONK, both the inputs and outputs of the program are considered _public inputs_. This may sound odd, but it is because these are the inputs to the verification algorithm. This is the algorithm that takes, in this case, the tuple $(3, 8, \pi)$ and outputs _Accept_ if the toy program was executed with input $x=3$, some private value $e$ not revealed to the verifier, and out came $8$. Otherwise it outputs _Reject_. + +PLONK can be used to delegate program executions to untrusted parties, but it can also be used as a proof of knowledge. Our program could be used by a prover to demostrate that she knows the multiplicative inverse of some value $x$ in the finite field without revealing it. She would do it by sending the verifier the tuple $(x, 0, \pi)$, where $\pi$ is the proof of the execution of our toy program. + +In our toy example this is pointless because inverting field elements is easily performed by any verifier. But change our program to the following and you get proofs of knowledge of the preimage of SHA256 digests. + +``` +PRIVATE INPUT: + e + +OUTPUT: + SHA256(e) +``` + +Here there's no input aside from the prover's private input. As we mentioned, the output $h$ of the program is then part of the inputs to the verification algorithm. Which in this case just takes $(h, \pi)$. + +## PLONK Arithmetization + +This is the process that takes the circuit of a particular program and produces a set of mathematical tools that can be used to generate and verify proofs of execution. The end result will be a set of eight polynomials. To compute them we need first to define two matrices. We call them the $Q$ matrix and the $V$ matrix. The polynomials and the matrices depend only on the program and not on any particular execution of it. So they can be computed once and used for every execution instance. To understand what they are useful for, we need to start from _execution traces_. + +### Circuits and execution traces + +See the program as a sequence of gates that have left operand, a right operand and an output. The two most basic gates are multiplication and addition gates. In our example, one way of seeing our toy program is as a composition of three gates. + +Gate 1: left: e, right: x, output: u = e \* x +Gate 2: left: u, right: x, output: v = e + x +Gate 3: left: v, right: 1, output: w = v - 1 + +On executing the circuit, all these variables will take a concrete value. All that information can be put in table form. It will be a matrix with all left, right and output values of all the gates. One row per gate. We call the columns of this matrix $L, R, O$. Let's build them for $x=3$ and $e=2$. We get $u=6$, $v=9$ and $w=8$. So the first matrix is: + +| A | B | C | +| --- | --- | --- | +| 2 | 3 | 6 | +| 6 | 3 | 9 | +| 9 | - | 8 | + +The last gate subtracts a constant value that is part of the program and is not a variable. So it actually has only one input instead of two. And the output is the result of subtracting $1$ to it. That's why it is handled a bit different from the second gate. The symbol "-" in the $R$ column is a consequence of that. With that we mean "any value" because it won't change the result. In the next section we'll see how we implement that. Here we'll use this notation when any value can be put there. In case we have to choose some, we'll default to $0$. + +What we got is a valid execution trace. Not all matrices of that shape will be the trace of an execution. The matrices $Q$ and $V$ will be the tool we need to distinguish between valid and invalid execution traces. + +### The $Q$ matrix + +As we said, it only depends on the program itself and not on any particular evaluation of it. It has one row for each gate and its columns are called $Q_L, Q_R, Q_O, Q_M, Q_C$. They encode the type of gate of the rows and are designed to satisfy the following. + +**Claim:** if columns $L, R, O$ correspond to a valid evaluation of the circuit then for all $i$ the following equality holds $$A_i (Q_L)_i + B_i (Q_R)_i + A_i B_i Q_M + C_i (Q_O)_i + (Q_C)_i = 0$$ + +This is better seen with examples. A multiplication gate is represented by the row: + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| 0 | 0 | 1 | -1 | 0 | + +And the row in the trace matrix that corresponds to the execution of that gate is + +| A | B | C | +| --- | --- | --- | +| 2 | 3 | 6 | + +The equation in the claim for that row is that $2 \times 0 + 3 \times 0 + 2 \times 3 \times 1 + 6 \times (-1) + 0$, which equals $0$. The next is an addition gate. This is represented by the row + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| 1 | 1 | 0 | -1 | 0 | + +The corresponding row in the trace matrix its + +| A | B | C | +| --- | --- | --- | +| 6 | 3 | 9 | + +And the equation of the claim is $6 \times 1 + 3 \times 1 + 2 \times 3 \times 0 + 9 \times (-1) + 0$, which adds up to $0$. Our last row is the gate that adds a constant. Addition by constant C can be represented by the row + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| 1 | 0 | 0 | -1 | C | + +In our case $C=-1$. The corresponding row in the execution trace is + +| A | B | C | +| --- | --- | --- | +| 9 | - | 8 | + +And the equation of the claim is $9 \times 1 + 0 \times 0 + 9 \times 0 \times 0 + 8 \times (-1) + C$. This is also zero. + +Putting it altogether, the full $Q$ matrix is + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| 0 | 0 | 1 | -1 | 0 | +| 1 | 1 | 0 | -1 | 0 | +| 1 | 0 | 0 | -1 | -1 | + +And we saw that the claim is true for our particular execution: +$$ 2 \times 0 + 3 \times 0 + 2 \times 3 \times 1 + 6 \times (-1) + 0 = 0 $$ +$$ 6 \times 1 + 3 \times 1 + 6 \times 3 \times 0 + 9 \times (-1) + 0 = 0 $$ +$$ 9 \times 1 + 0 \times 0 + 9 \times 0 \times 0 + 8 \times (-1) + (-1) = 0 $$ + +Not important to our example, but multiplication by constant C can be represented by: + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| C | 0 | 0 | -1 | 0 | + +As you might have already noticed, there are several ways of representing the same gate in some cases. We'll exploit this in a moment. + +### The $V$ matrix + +The claim in the previous section is clearly not an "if and only if" statement because the following trace columns do satisfy the equations but do not correspond to a valid execution: + +| A | B | C | +| --- | --- | --- | +| 2 | 3 | 6 | +| 0 | 0 | 0 | +| 20 | - | 19 | + +The $V$ matrix encodes the carry of the results from one gate to the right or left operand of a subsequent one. These are called _wirings_. Like the $Q$ matrix, it's independent of the particular evaluation. It consists of indices for all input and intermediate variables. In this case that matrix is: + +| L | R | O | +| --- | --- | --- | +| 0 | 1 | 2 | +| 2 | 1 | 3 | +| 3 | - | 4 | + +Here $0$ is the index of $e$, $1$ is the index of $x$, $2$ is the index of $u$, $3$ is the index of $v$ and $4$ is the index of the output $w$. Now we can update the claim to have an "if and only if" statement. + +**Claim:** Let $T$ be a matrix with columns $A, B, C$. It correspond to a valid evaluation of the circuit if and only if a) for all $i$ the following equality holds $$A_i (Q_L)_i + B_i (Q_R)_i + A_i B_i Q_M + C_i (Q_O)_i + (Q_C)_i = 0,$$ b) for all $i,j,k,l$ such that $V_{i,j} = V_{k, l}$ we have $T_{i,j} = T_{k, l}$. + +So now our malformed example does not pass the second check. + +### Custom gates + +Our matrices are fine now. But they can be optimized. Let's do that to showcase this flexibility of PLONK and also reduce the size of our example. + +PLONK has the flexibility to construct more sophisticated gates as combinations of the five columns. And therefore the same program can be expressed in multiple ways. In our case all three gates can actually be merged into a single custom gate. The $Q$ matrix ends up being a single row. + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| 1 | 1 | 1 | -1 | 1 | + +and also the $V$ matrix + +| L | R | O | +| --- | --- | --- | +| 0 | 1 | 2 | + +The trace matrix for this representation is just + +| A | B | C | +| --- | --- | --- | +| 2 | 3 | 8 | + +And we check that it satisfies the equation + +$$ 2 \times 1 + 3 \times 1 + 2 \times 3 \times 1 + 8 \times (-1) + (-1) = 0$$ + +Of course, we can't always squash an entire program into a single gate. + +### Public inputs + +Aside from the gates that execute the program operations, additional rows must be incorporated into these matrices. This is due to the fact that the prover must demonstrate not only that she executed the program, but also that she used the appropriate inputs. Furthermore, the proof must include an assertion of the output value. As a result, a few extra rows are necessary. In our case these are the first two and the last one. The original one sits now in the third row. + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| -1 | 0 | 0 | 0 | 3 | +| -1 | 0 | 0 | 0 | 8 | +| 1 | 1 | 1 | -1 | 1 | +| 1 | -1 | 0 | 0 | 0 | + +And this is the updated $V$ matrix + +| L | R | O | +| --- | --- | --- | +| 0 | - | - | +| 1 | - | - | +| 2 | 0 | 3 | +| 1 | 3 | - | + +The first row is there to force the variable with index $0$ to take the value $3$. Similarly the second row forces variable with index $1$ to take the value $8$. These two will be the public inputs of the verifier. The last row checks that the output of the program is the claimed one. + +And the trace matrix is now + +| A | B | C | +| --- | --- | --- | +| 3 | - | - | +| 8 | - | - | +| 2 | 3 | 8 | +| 8 | 8 | - | + +With these extra rows, equations add up to zero only for valid executions of the program with input $3$ and output $8$. + +An astute observer would notice that by incorporating these new rows, the matrix $Q$ is no longer independent of the specific evaluation. This is because the first two rows of the $Q_C$ column contain concrete values that are specific to a particular execution instance. To maintain independence, we can remove these values and consider them as part of an extra one-column matrix called $PI$ (stands for Public Input). This column has zeros in all rows not related to public inputs. We put zeros in the $Q_C$ columns. The responsibility of filling in the $PI$ matrix is of the prover and verifier. In our example it is + +| $PI$ | +| ---- | +| 3 | +| 8 | +| 0 | +| 0 | + +And the final $Q$ matrix is + +| $Q_L$ | $Q_R$ | $Q_M$ | $Q_O$ | $Q_C$ | +| ----- | ----- | ----- | ----- | ----- | +| -1 | 0 | 0 | 0 | 0 | +| -1 | 0 | 0 | 0 | 0 | +| 1 | 1 | 1 | -1 | 1 | +| 1 | -1 | 0 | 0 | 0 | + +We ended up with two matrices that depend only on the program, $Q$ and $V$. And two matrices that depend on a particular evaluation, namely the $ABC$ and $PI$ matrices. The updated version of the claim is the following: + +**Claim:** Let $T$ be a matrix with columns $A, B, C$. It corresponds to a evaluation of the circuit if and only if a) for all $i$ the following equality holds $$A_i (Q_L)_i + B_i (Q_R)_i + A_i B_i Q_M + C_i (Q_O)_i + (Q_C)_i + (PI)_i = 0,$$ b) for all $i,j,k,l$ such that $V_{i,j} = V_{k,l}$ we have $T_{i,j} = T_{k,l}$. + +### From matrices to polynomials + +In the previous section we showed how the arithmetization process works in PLONK. For a program with $n$ public inputs and $m$ gates, we constructed two matrices $Q$ and $V$, of sizes $(n + m + 1) \times 5$ and $(n + m + 1) \times 3$ that satisfy the following. Let $N = n + m + 1.$ + +**Claim:** Let $T$ be a $N \times 3$ matrix with columns $A, B, C$ and $PI$ a $N \times 1$ matrix. They correspond to a valid execution instance with public input given by $PI$ if and only if a) for all $i$ the following equality holds $$A_i (Q_L)_i + B_i (Q_R)_i + A_i B_i Q_M + C_i (Q_O)_i + (Q_C)_i + (PI)_i = 0,$$ b) for all $i,j,k,l$ such that $V_{i,j} = V_{k,l}$ we have $T_{i,j} = T_{k,l}$, c) $(PI)_i = 0$ for all $i>n$. + +Polynomials enter now to squash most of these equations. We will traduce the set of all equations in conditions (a) and (b) to just a few equations on polynomials. + +Let $\omega$ be a primitive $N$-th root of unity and let $H = {\omega^i: 0\leq i < N}$. Let $a, b, c, q_L, q_R, q_M, q_O, q_C, pi$ be the polynomials of degree at most $N$ that interpolate the columns $A, B, C, Q_L, Q_R, Q_M, Q_O, Q_C, PI$ at the domain $H$. This means for example that $a(\omega^i) = A_i$ for all $i$. And similarly for all the other columns. + +With this, condition (a) of the claim is equivalent to $$a(x) q_L(x) + b(x) q_R(x) + a(x) b(x) q_M(x) + c(x) q_O(x) + q_c(x) + pi(x) = 0$$ for all $x$ in $H$.This is just by definition of the polynomials. But in polynomials land this is also equivalent to (a) there exists a polynomial $t$ such that $$a q_L + b q_R + a b q_M + c q_O + q_c + pi = z_H t$$, where $z_H$ is the polynomial $X^N -1$. + +To reduce condition (b) to polynomial equations we need to introduce the concept of permutation. A permutation is a rearrangement of a set. Usually denoted $\sigma$. For finite sets it is a map from a set to itself that takes all values. In our case the set will be the set of all pairs +$$I=\{(i,j): \text{ such that }0\leq i < N, \text{ and } 0\leq j < 3\}$$ +The matrix $V$ induces a permutation of this set where $\sigma((i,j))$ is equal to the indices of the _next_ occurrence of the value at position $(i,j)$. If already at the last occurrence, go to the first one. By _next_ we mean the following occurrence as if the columns were stacked on each other. Let's see how this works in the example circuit. Recall $V$ is + +| L | R | O | +| --- | --- | --- | +| 0 | - | - | +| 1 | - | - | +| 2 | 0 | 3 | +| 1 | 3 | - | + +The permutation in this case is the map $\sigma((0,0)) = (2,1)$, $\sigma((0,1)) = (0, 3)$, $\sigma((0,2)) = (0,2)$, $\sigma((0,3)) = (0,1)$, $\sigma((2,1)) = (0,0)$, $\sigma((3,1)) = (2,2)$, $\sigma((2,2)) = (3,1)$. For the positions with `-` values doesn't really matter right now. + +It's not hard to see that condition (b) is equivalent to: for all $(i,j)\in I$, $T_{i,j} = T_{\sigma((i,j))}$. + +A little less obvious is that this condition is in turn equivalent to checking whether the following sets $A$ and $B$ are equal +$$A = \{((i,j), T_{i,j}): (i,j) \in I\}$$ +$$B = \{(\sigma((i,j)), T_{i,j}): (i,j) \in I\}.$$ +The proof this equivalence is straightforward. Give it a try! + +In our example the sets in question are respectively +$$\{((0,0), T_{0,0}), ((0,1), T_{0,1}), ((0,2), T_{0,2}), ((0,3), T_{0,3}), ((2,1), T_{2,1}), ((3,1), T_{3,1}), ((2,2), T_{2,2})\},$$ +and +$$\{((2,1), T_{0,0}), ((0,3), T_{0,1}), ((0,2), T_{0,2}), ((0,1), T_{0,3}), ((0,0), T_{2,1}), ((2,2), T_{3,1}), ((3,1), T_{2,2})\},$$ + +You can check these sets coincide by inspection. Recall our trace matrix $T$ is + +| A | B | C | +| --- | --- | --- | +| 3 | - | - | +| 8 | - | - | +| 2 | 3 | 8 | +| 8 | 8 | - | + +Checking equality of these sets is something that can be reduced to polynomial equations. It is a very nice method that PLONK uses. To understand it better let's start with a simpler case. + +#### Equality of sets + +Suppose we have two sets $A=\{a_0, a_1\}$ $B=\{b_0, b_1\}$ of two field elements in $\mathbb{F}$. And we are interested in checking whether they are equal. + +One thing we could do is compute $a_0a_1$ and $b_0b_1$ and compare them. If the sets are equal, then those elements are necessarily equal. + +But the converse is not true. For example the sets $A=\{4, 15\}$ and $B=\{6, 10\}$ both have $60$ as the result of the product of their elements. But they are not equal. So this is not good to check equality. + +Polynomials come to rescue here. What we can do instead is consider the following sets _of polynomials_ $A'=\{a_0 + X, a_1 + X\}$, $B'=\{b_0 + X, b_1 + X\}$. Sets $A$ and $B$ are equal if and only if sets $A'$ and $B'$ are equal. This is because equality of polynomials boils down to equality of their coefficients. But the difference with $A'$ and $B'$ is that now the approach of multiplying the elements works. That is, $A'$ and $B'$ are equal if and only if $(a_0 + X)(a_1 + X) = (b_0 + X)(b_1 + X)$. This is not entirely evident but follows from a property that polynomials have, called _unique factorization_. Here the important fact is that linear polynomials act as sort of prime factors. Anyway, you can take that for granted. The last part of this trick is to use the Schwartz-Zippel lemma and go back to the land of field elements. That means, if for some random element $\gamma$ we have $(a_0 + \gamma)(a_1 + \gamma) = (b_0 + \gamma)(b_1 + \gamma)$, then with overwhelming probability the equality $(a_0 + X)(a_1 + X) = (b_0 + X)(b_1 + X)$ holds. + +Putting this altogether, if for some random element $\gamma$ we have $(a_0 + \gamma)(a_1 + \gamma) = (b_0 + \gamma)(b_1 + \gamma)$, then the sets $A$ and $B$ are equal. Of course this also holds for sets with more than two elements. Let's write that down. + +_Fact:_ Let $A=\{a_0, \dots, a_{k-1}\}$ and $B=\{b_0, \dots, b_{k-1}\}$ be sets of field elements. If for some random $\gamma$ the following equality holds +$$\prod_{i=0}^{k-1}(a_i + \gamma) = \prod_{i=0}^{k-1}(b_i + \gamma),$$ +then with overwhelming probability $A$ is equal to $B$. + +And here comes the trick that reduces this check to polynomial equations. Let +$H$ be a domain of the form $\{1, \omega, \dots, \omega^{k-1}\}$ for some primitive $k$-th root of unity $\omega$. Let $f$ and $g$ be respectively the polynomials that interpolate the following values at $H$. +$$(a_0 + \gamma, \dots, a_{k-1} + \gamma),$$ +$$(b_0 + \gamma, \dots, b_{k-1} + \gamma),$$ + +Then $\prod_{i=0}^{k-1}(a_i + \gamma)$ equals $\prod_{i=0}^{k-1}(b_i + \gamma)$ if and only if there exists a polynomial $Z$ such that +$$Z(\omega^0) = 1$$ +$$Z(h)f(h) = g(h)Z(\omega h)$$ +for all $h\in H$. + +Let's see why. Suppose that $\prod_{i=0}^{k-1}(a_i + \gamma)$ equals $\prod_{i=0}^{k-1}(b_i + \gamma)$. Construct $Z$ as the polynomial that interpolates the following values $$(1, \frac{a_0 + \gamma}{b_0 + \gamma}, \frac{(a_0 + \gamma)(a_1 + \gamma)}{(b_0 + \gamma)(b_1 + \gamma)}, \dots, \prod_{i=0}^{k-2} \frac{a_i + \gamma}{b_i + \gamma}),$$ +in the same domain as $f$ and $g$. That works. Conversely, suppose such a polynomial $Z$ exists. By evaluating the equation $Z(X)f(X) = g(X)Z(\omega X)$ at $1, \omega, \dots, \omega^{k-2}$ and using recursion we get that $Z(\omega^{k-1}) = \prod_{i=0}^{k-2}(a_i + \gamma)/\prod_{i=0}^{k-2}(b_i + \gamma)$. Moreover, evaluating it at $\omega^{k-1}$ we obtain that $$Z(\omega^{k-1})\frac{f(\omega^{k-1})}{g(\omega^{k-1})} = Z(\omega^k) = Z(w^0) = 1.$$ +The second equality holds because $\omega^k = \omega^0$ since it is a $k$-th root of unity. Expanding with the values of $f, g$ and $Z$ one obtains that $\prod_{i=0}^{k-1}(a_i + \gamma)/\prod_{i=0}^{k-1}(b_i + \gamma)$ equals $1$. Which is what we wanted. + +In summary. We proved the following: + +_Fact:_ Let $A=\{a_0, \dots, a_{k-1}\}$ and $B=\{b_0, \dots, b_{k-1}\}$ be sets of field elements. Let $\gamma$ be a random field element. Let $\omega$ be a primitive $k$-th root of unity and $H=\{1, \omega, \omega^2, \dots, \omega^{k-1}\}$. Let $f$ and $g$ be respectively the polynomials that interpolate the values $\{a_0 + \gamma, \dots, a_{k-1} + \gamma\}$ and $\{b_0 + \gamma, \dots, b_{k-1} + \gamma\}$ at $H$. If there exists a polynomial $Z$ such that +$$Z(\omega^0) = 1$$ +$$Z(X)f(X) = g(X)Z(\omega X)$$ +for all $h\in H$, then with overwhelming probability the sets $A$ and $B$ are equal. + +#### Sets of tuples + +In the previous section we saw how to check whether two sets of field elements are equal using polynomial equations. To be able to use it in our context we need to extend it to sets of tuples of field elements. This is pretty straightforward. + +Let's start with the easy case. Let $A=\{(a_0, a_1), (a_2, a_3)\}$ and $B=\{(b_0, b_1), (b_2, b_3)\}$ be two sets of pairs of field elements. That is $a_i, b_i \in \mathbb{F}$ for all $i$. The trick is very similar to the previous section. +$$A'=\{a_0 + a_1 Y + X, a_2 + a_3 Y + X\}$$ +$$B'=\{b_0 + b_1 Y + X, b_2 + b_3 Y + X\}$$ + +Just as before, by looking at coefficients we can see that the sets $A$ and $B$ are equal if and only if $A'$ and $B'$ are equal. +And notice that these are sets of polynomials, we got rid of the tuples! And now the situation is very similar to the previous section. We have that $A'$ and $B'$ are equal if and only if the product of their elements coincide. This is true also because polynomials in two variables are a unique factorization domain. So as before, we can use the Schwartz-Zippel lemma. Precisely, if for random $\beta, \gamma$, the elements +$$(a_0 + \beta a_1 + \gamma)(a_2 + \beta a_3 + \gamma),$$ +and +$$(b_0 + \beta b_1 + \gamma)(b_2 + \beta b_3 + \gamma)$$ +coincide, then $A$ and $B$ are equal with overwhelming probability. + +Here is the statement for sets of more than two pairs of field elements. + +_Fact:_ Let $A=\{\bar a_0, \dots, \bar a_{k-1}\}$ and $B=\{\bar b_0, \dots, \bar b_{k-1}\}$ be sets of pairs of field elements. So that $\bar a_i = (a_{i,0}, a_{i,1})$ and the same for $\bar b_i$. Let $\beta, \gamma$ be a random field elements. Let $\omega$ be a $k$-th root of unity and $H=\{1, \omega, \omega^2, \dots, \omega^{k-1}\}$. Let $f$ and $g$ be respectively the polynomials that interpolate the values +$$\{a_{i,0} + a_{i,1}\beta + \gamma, \dots, a_{k-1,0} + a_{k-1,1}\beta + \gamma\},$$ +and +$$\{b_{i,0} + b_{i,1}\beta + \gamma, \dots, b_{k-1,0} + b_{k-1,1}\beta + \gamma\},$$ +at $H$. If there exists a polynomial $Z$ such that +$$Z(\omega^0) = 1$$ +$$Z(X)f(X) = g(X)Z(\omega X)$$ +for all $h\in H$, then with overwhelming probability the sets $A$ and $B$ are equal. + +#### Going back to our case + +Recall we want to rephrase condition (b) in terms of polynomials. We have already seen that condition (b) is equivalent to $A$ and $B$ being equal, where +$$A = \{((i,j), T_{i,j}): (i,j) \in I\}$$ +and +$$B = \{(\sigma((i,j)), T_{i,j}): (i,j) \in I\}.$$ + +We cannot directly use the facts of the previous sections because our sets are not sets of field elements. Nor are they sets of pairs of field elements. They are sets of pairs with some indexes $(i,j)$ in the first coordinate and a field element $v$ in the second one. So the solution is to convert them to sets of pairs of field elements and apply the result of the previous section. So how do we map an element of the form $((i,j), v)$ to something of the form $(a_0, a_1)$ with $a_0$ and $a_1$ field elements? The second coordinate is trivial, we can just leave $v$ as it is and take $a_1 = v$. For the indexes pair $(i,j)$ there are multiple ways. The important thing to achieve here is that different pairs get mapped to different field elements. Recall that $i$ ranges from $0$ to $N-1$ and $j$ ranges from $0$ to $2$. One way is to take a $3N$-th primitive root of unity $\eta$ and define $a_0 = \eta^{3i + j}$. Putting it altogether, we are mapping the pair $((i,j), v)$ to the pair $(\eta^{3i + j}, v)$, which is a pair of field elements. Now we can consider the sets +$$A = \{(\eta^{3i + j}, T_{i,j}): (i,j) \in I\}$$ +and +$$B = \{(\eta^{3k + l}, T_{i,j}): (i,j) \in I, \sigma((i,j)) = (k, l)\}.$$ +We have that condition (b) is equivalent to $A$ and $B$ being equal. + +Applying the method of the previous section to these sets, we obtain the following. + +_Fact:_ Let $\eta$ be a $3N$-th root of unity and $\beta$ and $\gamma$ random field elements. Let $D = \{1, \eta, \eta^2, \dots, \eta^{3N-1}\}$. Let $f$ and $g$ be the polynomials that interpolate, respectively, the following values at $D$: +$$\{T_{i,j} + \eta^{3i + j}\beta + \gamma: (i,j) \in I\},$$ +and +$$\{T_{i,j} + \eta^{3k + l}\beta + \gamma: (i,j) \in I, \sigma((i,j)) = (k,l)\},$$ +Suppose there exists a polynomial $Z$ such that +$$Z(\eta^0) = 1$$ +$$Z(d)f(d) = g(d)Z(\eta d),$$ +for all $h\in D$. +Then the sets $A = \{((i,j), T_{i,j}): (i,j) \in I\}$ and $B = \{(\sigma((i,j)), T_{i,j}): (i,j) \in I\}$ are equal with overwhelming probability. + +One last minute definitions. Notice that $\omega=\eta^3$ is a primitive $N$-th root of unity. Let $H = \{1, \omega, \omega^2, \dots, \omega^{N-1}\}$. + +Define $S_{\sigma 1}$ to be the interpolation at $H$ of +$$\{\eta^{3k + l}: (i,0) \in I, \sigma((i,0)) = (k,l)\},$$ +Similarly define $S_{\sigma 2}$ and $S_{\sigma 3}$ to be the interpolation at $H$ of the sets of values +$$\{\eta^{3k + l}: (i,1) \in I, \sigma((i,1)) = (k,l)\},$$ +$$\{\eta^{3k + l}: (i,2) \in I, \sigma((i,2)) = (k,l)\},$$ +These will be useful during the protocol to work with such polynomials $Z$ and the above equations. + +#### A more compact form + +The last fact is equivalent the following. There's no new idea here, just a more compact form of the same thing that allows the polynomial $Z$ to be of degree at most $N$. + +_Fact:_ Let $\omega$ be a $N$-th root of unity. Let $H = \{1, \omega, \omega^2, \dots, \omega^{N-1}\}$. Let $k_1$ and $k_2$ be two field elements such that $\omega^i \neq \omega^jk_1 \neq \omega^lk_2$ for all $i,j,l$. Let $\beta$ and $\gamma$ be random field elements. Let $f$ and $g$ be the polynomials that interpolate, respectively, the following values at $H$: +$$\{(T_{0,j} + \omega^{i}\beta + \gamma)(T_{1,j} + \omega^{i}k_1\beta + \gamma)(T_{2,j} + \omega^{i}k_2\beta + \gamma): 0\leq in$. + +Then we constructed polynomials $q_L, q_R, q_M, q_O, q_C, S_{\sigma1},S_{\sigma2}, S_{\sigma3}$, $f$, $g$ off the matrices $Q$ and $V$. They are the result of interpolating at a domain $H = \{1, \omega, \omega^2, \dots, \omega^{N-1}\}$ for some $N$-th primitive root of unity and a few random values. And also constructed polynomials $a,b,c, pi$ off the matrices $T$ and $PI$. Loosely speaking, the above fact can be reformulated in terms of polynomial equations as follows. + +**Fact:** Let $z_H = X^N - 1$. Let $T$ be a $N \times 3$ matrix with columns $A, B, C$ and $PI$ a $N \times 1$ matrix. They correspond to a valid execution instance with public input given by $PI$ if and only if + +a) There is a polynomial $t_1$ such that the following equality holds $$a q_L + b q_R + a b q_M + c q_O + q_C + pi = z_H t_1,$$ + +b) There are polynomials $t_2, t_3$, $z$ such that $zf - gz' = z_H t_2$ and $(z-1)L_1 = z_H t_3$, where $z'(X) = z(X\omega)$ + +You might be wondering where the polynomials $t_i$ came from. Recall that for a polynomial $F$, we have $F(h) = 0$ for all $h \in H$ if and only if $F = z_H t$ for some polynomial $t$. + +Finally both conditions (a) and (b) are equivalent to a single equation (c) if we let more randomness to come into play. This is: + +(c) Let $\alpha$ be a random field element. There is a polynomial $t$ such that +$$ +\begin{aligned} +z_H t = &a q_L + b q_R + a b q_M + c q_O + q_C + pi \\ + &+ \alpha(gz' - fz) \\ + &+ \alpha^2(z-1)L_1 \\ +\end{aligned} +$$ + +This last step is not obvious. You can check the paper to see the proof. Anyway, this is the equation you'll recognize below in the description of the protocol. + +Randomness is a delicate matter and an important part of the protocol is where it comes from, who chooses it and when they choose it. Check out the protocol to see how it works. + diff --git a/provers/plonk/Cargo.toml b/provers/plonk/Cargo.toml new file mode 100644 index 000000000..fe24503d5 --- /dev/null +++ b/provers/plonk/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "lambdaworks-plonk" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +lambdaworks-math.workspace = true +lambdaworks-crypto.workspace = true +serde = "1.0" +serde_json = "1.0" diff --git a/provers/plonk/src/constraint_system/conditional.rs b/provers/plonk/src/constraint_system/conditional.rs new file mode 100644 index 000000000..fd4e37e38 --- /dev/null +++ b/provers/plonk/src/constraint_system/conditional.rs @@ -0,0 +1,131 @@ +use lambdaworks_math::field::{element::FieldElement, traits::IsField}; + +use super::{Constraint, ConstraintSystem, ConstraintType, Variable}; + +impl ConstraintSystem +where + F: IsField, +{ + /// Adds a constraint to enforce that `v1` is equal to `v2`. + pub fn assert_eq(&mut self, v1: &Variable, v2: &Variable) { + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: FieldElement::one(), + qr: -FieldElement::one(), + qm: FieldElement::zero(), + qo: FieldElement::zero(), + qc: FieldElement::zero(), + }, + l: *v1, + r: *v2, + o: self.null_variable(), + hint: None, + }); + } + + /// Creates a new variable `w` constrained to be `v1` in case + /// `boolean_condition` is `1` and `v2` otherwise. + pub fn if_else( + &mut self, + boolean_condition: &Variable, + v1: &Variable, + v2: &Variable, + ) -> Variable { + let not_boolean_condition = self.not(boolean_condition); + let if_branch = self.mul(v1, boolean_condition); + let else_branch = self.mul(v2, ¬_boolean_condition); + self.add(&if_branch, &else_branch) + } + + /// Creates a new variable `w` constrained to be `v1` in case + /// `condition` is not zero and `v2` otherwise. + pub fn if_nonzero_else( + &mut self, + condition: &Variable, + v1: &Variable, + v2: &Variable, + ) -> Variable { + let (is_zero, _) = self.inv(condition); + self.if_else(&is_zero, v2, v1) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use lambdaworks_math::field::{ + element::FieldElement as FE, fields::u64_prime_field::U64PrimeField, + }; + + use crate::constraint_system::ConstraintSystem; + + #[test] + fn test_assert_eq_1() { + let system = &mut ConstraintSystem::>::new(); + + let v = system.new_variable(); + let w = system.new_variable(); + let z = system.mul(&v, &w); + let output = system.new_variable(); + system.assert_eq(&z, &output); + + let inputs = HashMap::from([(v, FE::from(2)), (w, FE::from(2).inv())]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&output).unwrap(), &FE::one()); + } + + #[test] + fn test_assert_eq_2() { + let system = &mut ConstraintSystem::>::new(); + + let v = system.new_variable(); + let w = system.new_variable(); + let z = system.mul(&v, &w); + let output = system.new_variable(); + system.assert_eq(&z, &output); + + let inputs = HashMap::from([(v, FE::from(2)), (w, FE::from(2)), (output, FE::from(1))]); + + let _assignments = system.solve(inputs).unwrap_err(); + } + + #[test] + fn test_if_nonzero_else_1() { + let system = &mut ConstraintSystem::>::new(); + + let v = system.new_variable(); + let v2 = system.mul(&v, &v); + let v4 = system.mul(&v2, &v2); + let w = system.add_constant(&v4, -FE::one()); + let output = system.if_nonzero_else(&w, &v, &v2); + + let inputs = HashMap::from([(v, FE::from(256))]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!( + assignments.get(&output).unwrap(), + assignments.get(&v2).unwrap() + ); + } + + #[test] + fn test_if_nonzero_else_2() { + let system = &mut ConstraintSystem::>::new(); + + let v = system.new_variable(); + let v2 = system.mul(&v, &v); + let v4 = system.mul(&v2, &v2); + let w = system.add_constant(&v4, -FE::one()); + let output = system.if_nonzero_else(&w, &v, &v2); + + let inputs = HashMap::from([(v, FE::from(255))]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!( + assignments.get(&output).unwrap(), + assignments.get(&v).unwrap() + ); + } +} diff --git a/provers/plonk/src/constraint_system/errors.rs b/provers/plonk/src/constraint_system/errors.rs new file mode 100644 index 000000000..d091fc2cf --- /dev/null +++ b/provers/plonk/src/constraint_system/errors.rs @@ -0,0 +1,5 @@ +#[derive(Debug, PartialEq, Eq)] +pub enum SolverError { + InconsistentSystem, + UnableToSolve, +} diff --git a/provers/plonk/src/constraint_system/examples/mimc.rs b/provers/plonk/src/constraint_system/examples/mimc.rs new file mode 100644 index 000000000..96fde9ca3 --- /dev/null +++ b/provers/plonk/src/constraint_system/examples/mimc.rs @@ -0,0 +1,176 @@ +use lambdaworks_math::field::{element::FieldElement as FE, traits::IsField}; + +use crate::constraint_system::{ConstraintSystem, Variable}; + +/// The MIMC hash function. +pub fn mimc( + system: &mut ConstraintSystem, + coefficients: &[FE], + data: &[Variable], +) -> Variable { + let mut h = system.new_constant(FE::zero()); + + for item in data.iter() { + let mut x = *item; + for c in coefficients.iter() { + // x = (x + h + c) ** 5 + x = system.linear_combination(&x, FE::one(), &h, FE::one(), c.clone(), None); + let x_pow_2 = system.mul(&x, &x); + let x_pow_4 = system.mul(&x_pow_2, &x_pow_2); + x = system.mul(&x_pow_4, &x); + } + // h = x + 2h + item + h = system.linear_combination(&x, FE::one(), &h, FE::from(2), FE::zero(), None); + h = system.add(&h, item); + } + h +} + +#[cfg(test)] +pub mod tests { + use std::collections::HashMap; + + use lambdaworks_math::{ + elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrField, + field::element::FieldElement as FE, + }; + + use crate::constraint_system::{examples::mimc::mimc, ConstraintSystem}; + + #[test] + fn test_mimc() { + let coefficients = vec![ + "1dbfc7763d69ca7d15701422f37bc6692bd01ebc4da42360f81f9adb4a91b01a", + "4fd2cddd334dab1c4005161c290f25a0e18d4175ecfa898b17095d8ec2dd344a", + "9cc76e9b37ba649b0accb508950d834af091f3d687c208d9013e1685075f092", + "16472c2e925fbba0fad047c428a4e8e4801414975e9841d5518b57fbcf26dde1", + "1c2e148c40ea201b748bee72845b349bfa4a4497837af0d569ae47afc6e4243", + "705ea7625cbcb5daf4d3dc5d0322e7b3adbe32227dc52234035881407825dbaf", + "1272efa088fdd0c941712554929ee2bf4e298fce57337dda8f4d704a8bdec1ea", + "4f966f7b066b2d838afab7b99705b1fbecff809f083be8a03ea1a656be14d72a", + "283392b9145c98fc9680ee035816761cb79155557f0b302511a928c221b04c03", + "430a47a5110d6ee4da087ee3291a217f7afba21d696eb74de6ce41cf50aeeff4", + "1c9fbf2d5b15f5b4b9aaa8dfb452a310b6fa3de7b2b7c68260f8e4aff63840ef", + "49c756d15bbf811f532811dba19f5fda9df678bcdd4017024ef4daded412af7d", + "3d6d63a3302df941979292e4be9a85f9a960698ce9a2e5d430423f4adf7a9bb8", + "5f6c2da1c738096eaac7763afc219965955b33e619ce5679c3f5d3aef1792b0a", + "32d630538e47bf4f8968170577a08cb1b26864879c86dafb652cce5068bdb5aa", + "2eb8b2a5593fdef777738374339441e112704f378f7cca12d4146d30a005b96", + "123313cced613293c40586b110f8e4244cd67cc4380c8f5df4ec60f42216ce28", + "42d1c99dccb35f9afa170ee24eb146903819160985f2460d7785ac4381ca037c", + "35375cf9debbeba36a0ed9286c67a18bd2112dc028387b905b36c23dce8c4926", + "67e693adf50e0e16fa03d5f9481d71ed0f63ed4527e080941d1ba0473c18bcc3", + "1d5f6a82f699df8c7fff5b5f90047128ead7923635c92a4849ff28689b6c7258", + "372a3d44e73aae9443ca680956bcd23dbdd5f790e0c5cfa45a0fcfb9ee920144", + "630b2c9009da6417963e8d45ae92e59322746e545e04f026004a2f76c12422f5", + "33269ebd4d0f0a2874a217899b11a13361d262c1be48f2a46e6b132f897a5ac4", + "394d93f60615db568325c284dd916d735072cb57b6cd2a0072d976154d8a3eea", + "14e83ce42e31effc8be6e0119ecc4157c1c44206e159aff0761e92a945aa0591", + "3495919dabe2a35059ef2e1802ae59992fd7b3a14786378ac9f622b907c6da55", + "5e88df9396c526cd97c00d7e1865a2175cf44f5271c85bad098d14013238df41", + "925666e8a081d7b9f6b74ed57bd8e41533c20d7715b0bb47fabca5a465c4019", + "332e4e5ff2e5d1afaaf9ef551934b1006ba305f26b5b35940e71605a5ebb5f56", + "3462e730e81f90ddf9bb1046abaca984656932c13d1f00c387181c3c9aa43576", + "65bad101fa269d55e51bbf694f5541225e26986350b4165ede5a7e1232355a69", + "6e66ec021919cde6932d3b0d4c2c63076f0da7e33b3af529548304096d127502", + "4c609941ec5da50d43b8d6d7d45fdd4faa8bb69929fc3337ddfc1bee29f7b94", + "127f12060eb1a416ee0d304c538e094a13eb18310a2ecfd0fa81cce82a59e43d", + "e247806a33437f19022c6958e51a172f6fd58853ef95d2ea3f8123ce9c2a399", + "361564af11cea08fdc3afd9bd53471561356ad5b62e762c7d6023fbb12d5b7b", + "6e12938c2d2d52577956a23d5df8a8e56d8e7a5bdfcc9cd3330835c9a865608b", + "2c65d8fca4105323322504d653328c6692137481e686f256c3acf98b8888edd", + "217451f2b930057065d940024678dabf1525e8375522a23da9186255df7514fc", + "b2dddc8994767c7d3632cc7bc089becf8ef3b65540fb4709b8cc78ba12b044b", + "5331126da252555886cd62e62bf8fb25c1040470bc827734f516c2f0c90fff3f", + "317a581c6091951f08e8580adc43c1b02729900d2acb2de27e0f6b034b7d8c56", + "6c6741993eb1d5bc90edd8ac667037865ea3e9c0788d3c319739a8bcc0893ba9", + "54420b8489fa8145c279c03817315b31bc39445306be7f48fcba9a46c9f4f3b2", + "2059da76bbb25f44687caae9f61e4afb811bf899a3aae060751049f7d21dd606", + "6a30ab452ec8fe1e76c742ab33ec57448512ff27385a0a2bac70a1686d573570", + "5416c3b67ec815aa7481f04a9b8dbde374786caea7d3f0ba4a98f326efec02a1", + "4b32a6c01df4fdf6f2cc3bacec09b008ccc5c3644b69139c4346a3e75fab3f4b", + "2e91572a13a6baf97560b43b5b862aebd8b7d95c0fda9c097d823cc9ef0599e", + "18d4e26bbbdfa70d96ed89322834c8b1d36a3b3d373e9be7cfa588a8b5c0287b", + "3d3f818cd10fcf2d1fe9ef125dfbe112e8298eca96f58b0b86f42608b976d165", + "31e7074392297131067adb72832e19c1e271d312c551d7e4f40b441f942da24f", + "245ddf2de52031410d4171e67579a57f7866bb3ae20a4525f82505d050a86281", + "289673cb2bef13266ce3f7179e624b2e383d24a45ef6c375ba998e3fe9286a36", + "3c59fba0c6311941376d9b0280c32e726d0711a734999892707246ba7b2bd32", + "144c00621ab41c0c0f354ef520654c0150d61f502b7d923b9822a1a33294cab1", + "68c6a95568f6ed64ff72c30387d7606d072448ecc708997317c6db6cf515cbaf", + "662fe152dc7461f350a0a8c9286fa3d635ff00931d3a296e358345595f72ed3c", + "217b043aadd7058a7e9270dc0a2f571a8d1ccd116297b85823de86d173e54321", + "68d4303e7691e3a4795db36ea36432f8f1075438e9b0f8d1fb5999dfd4974d38", + "1e26115ea5e4c4f066b84107cd8f6fce5792d77dda305a01790a54f3e234d210", + "6e1bd58127f00ccc79c3609843b1ad75de0527f21df9eacfe29ab4c563a67753", + "5a4aad3966fd75111f70775b9fa62a6d1c18702b26fe5851c83efa2b10954c92", + "71c059faec1533beacad247017b29980008f0d937370301ee2401018fc2aa7da", + "2b625e82f540d4603233baec3d48d81d9d855962b50771c6d5df82012044e896", + "47e22b67d921cd1626e262ff3c739c646696e6336f1aedeb95881c62b511268b", + "286509b96b3aa4a9d101a53e83b1b25fdd76dc3c00052c9126b8200d1449834e", + "476bb19f615e3a38389ead4b38e8a61665c089682e7f7f46830d211db2616135", + "67a0f1036c1628ed81e80f2493dbc5100b736843bb3a0d36f67d2b2dce99a192", + "956eedcce3f1bb98cc45a3ad88ab894ddb3f7e775a11b961698f73c8381e07c", + "63b48515137ba347cfff4389958351d07be7f13ee7187d4d5902d085637ee7bf", + "3ff35869606dfa185b81adc1465fc268a4f481f75562aca9e4b46c00a77ec6c2", + "729558bf05bd766305ddebb83018c6a52916acf31fd71085bd2165515bccaa86", + "557fce386beeea241a2b8bb4fbb47cec057e235ca733fca67935761e140c61b3", + "7f5af6d793912f4649026b8e7c55bafb8c14c003b296afdd2924c4540df0f45", + "34ae79f5d988f866842080049ee7af47c48a7f2c1466638f8800259b4f2af2fb", + "27fffd50aeb4aeac31469860bb68f2673d176f334f084440b8d806534f1d4698", + "124f077ee1466fd7d3dc1e15b460663820dfb1cf542988480333d260f1ead81f", + "12ad6f35913b3a56083aee7ee7e4a489ad73c400c030b2befa1cbc20313e359f", + "3befad3a0b8f4debf05a376cd38129e0c87d7b446443611252269bd6f0206da0", + "60974721e0b87c5dc35408f791d7843feed7f63cd5975a661ed67be0bffd343d", + "4db00887ffb9981dba5da03142d103d18096731637be4bdf1831a261eb4abc1c", + "3449ca4e443a46c14719d5771d05701bbbc4db571a3d7770240c3bc91c020dcb", + "10e2e709f73f334e5f1373567b95d5e3edcf807f613826ea7120044c8444556f", + "5d767c67116c8a0b388f24ac74212190be52a295c46cde008fdb8539ee58a49", + "24616115f5f6421892eccc479da1b684c6b525bc1d3ff3cc95727863a2bc035d", + "1f3973d80f425cf3e02e44930a273219df15dc4cb04c32ee086bccdfa6dc312", + "338900eb90ef72de7560c97f7d8e64a68114e9ee696c0141fb6a922db16353e0", + "8a621008ece8b2cad60e9cf048cb4cb8eb95e7a7c9d517ebaf165fec3387fc1", + "3b3472a80d728abd9758e42fd22a478ebcd08b59ffa3c5e628e9e789a71a82a9", + "167848c58dddcac256afdf24a93e12829f611534cc437bba34c774241cfc1812", + "18c263472f9e8f2f262f6a572e33723761bb322cdc021b2cb4b136b0b74db77d", + "1a54a00df68e3d7ec52e62b61c624b0b6951031d982315a46c444eb55347b669", + "323e73ff092080d3d3c326c037bc64bf3c5553af4817447155cac913ee16232", + "4b16f8918636214e2483bd6c0cac7ce1755891c8044b6a5a5848f8044382c9b4", + "55da1b7e81416386c36dd95b752c15142b1225c39f88d269cca7cae381acabd8", + "dbfbbf19841c1792826f69ad92b862e3800af884a9cc2166c43a8b02b64eb16", + "37af211973056b0ee14d5776101e03e20360924d488bdee58b840a9cc65b530a", + "33a2cf480bbaabc0529bd29b5ead59325ff6eb4eb99b83f8e4e52b8bdd8d8ceb", + "10fe5117d64559e99e3bc90440f1d0c87ee1cbc7d14cbf524cc6e25c54291fcb", + "71408145cdaa0a727a889eff3586f0755d76abf8e157ad07fb199cf1444cee49", + "305f5892133b16e865cd1bcd3ca96f39a552e9e24ce724a6679d53fb4d421de1", + "3792c249ab22a410bd9765026d09c4975767a364ed4ce8cda5c739d413538f4d", + "54051fb18e4577eef62592a030adedcc11b22ade24a32e76f8bf68ef96039c22", + "3562918322d14865722d461ee61e323c3988de5496d311e5e3b752a173d0f524", + "4966ed088e26f77208302acb1977596cfed466aa7021ee9fa455a1568b9cc8ee", + "217e57ad60015c9a4c3525239f1226f1a12b00dc220c2a3476edb9d6e33718b", + "65f67c734e6dd080b1490d748c4ca54c3be080f68ff0983449d5de28dadad1c", + "3db4cc8fd2f2f8e1478ad41b7c1e5c5ef19301bb87f44b49b378fe3e7e3a2264", + "3261a8cb17034b0c32bc98cc77513ad895233f70e86d8ff6df57485ad194afc6", + ]; + let coefficients: Vec<_> = coefficients + .iter() + .map(|hex_str| FE::from_hex(hex_str).unwrap()) + .collect(); + + let system = &mut ConstraintSystem::::new(); + let data = vec![system.new_variable()]; + let output = mimc(system, &coefficients, &data); + + let input_value = + FE::from_hex("23a950068dd3d1e21cee48e7919be7ae32cdef70311fc486336ea9d4b5042535") + .unwrap(); + let expected_output_value = + FE::from_hex("136ff6a4e5fc9a2103cc54252d93c3be07f781dc4405acd9447bee65cfdc7c14") + .unwrap(); + + let inputs = HashMap::from([(data[0], input_value)]); + let assignments = system.solve(inputs).unwrap(); + + assert_eq!(assignments.get(&output).unwrap(), &expected_output_value); + } +} diff --git a/provers/plonk/src/constraint_system/examples/mod.rs b/provers/plonk/src/constraint_system/examples/mod.rs new file mode 100644 index 000000000..f5eea98c7 --- /dev/null +++ b/provers/plonk/src/constraint_system/examples/mod.rs @@ -0,0 +1,2 @@ +pub mod mimc; +pub mod pow; diff --git a/provers/plonk/src/constraint_system/examples/pow.rs b/provers/plonk/src/constraint_system/examples/pow.rs new file mode 100644 index 000000000..b1d298c59 --- /dev/null +++ b/provers/plonk/src/constraint_system/examples/pow.rs @@ -0,0 +1,46 @@ +use lambdaworks_math::field::{element::FieldElement as FE, traits::IsPrimeField}; + +use crate::constraint_system::{ConstraintSystem, Variable}; + +/// A square and multiply implementation. +pub fn pow( + system: &mut ConstraintSystem, + base: Variable, + exponent: Variable, +) -> Variable { + let exponent_bits = system.new_u32(&exponent); + let mut result = system.new_constant(FE::one()); + + assert_eq!(exponent_bits.len(), 32); + for (i, bit) in exponent_bits.iter().enumerate() { + if i != 0 { + result = system.mul(&result, &result); + } + let result_times_base = system.mul(&result, &base); + result = system.if_else(bit, &result_times_base, &result); + } + result +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use lambdaworks_math::field::fields::u64_prime_field::U64PrimeField; + + use crate::constraint_system::{examples::pow::pow, ConstraintSystem}; + use lambdaworks_math::field::element::FieldElement as FE; + + #[test] + fn test_pow() { + let system = &mut ConstraintSystem::>::new(); + + let base = system.new_variable(); + let exponent = system.new_variable(); + let result = pow(system, base, exponent); + let inputs = HashMap::from([(base, FE::from(3)), (exponent, FE::from(10))]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &FE::from(59049)); + } +} diff --git a/provers/plonk/src/constraint_system/mod.rs b/provers/plonk/src/constraint_system/mod.rs new file mode 100644 index 000000000..059aac770 --- /dev/null +++ b/provers/plonk/src/constraint_system/mod.rs @@ -0,0 +1,351 @@ +pub mod conditional; +pub mod errors; +pub mod examples; +pub mod operations; +pub mod solver; +pub mod types; + +use std::collections::HashMap; + +use lambdaworks_math::field::{element::FieldElement, traits::IsField}; + +/// A constraint that enforces relations between variables. If `ConstraintType` +/// represents (Q_L, Q_R, Q_M, Q_O, Q_C), then the constraint enforces that +/// `a Q_L + b Q_R + a b Q_M + c Q_O + Q_C = 0` where `a`, `b`, and `c` are the +/// values taken by the variables `l`, `r` and `o` respectively. +#[derive(Clone)] +pub struct Constraint { + constraint_type: ConstraintType, + hint: Option>, + l: Variable, + r: Variable, + o: Variable, +} + +/// A `ConstraintType` represents a type of gate and is determined by the values +/// of the coefficients Q_L, Q_R, Q_M, Q_O, Q_C +#[derive(Clone)] +struct ConstraintType { + ql: FieldElement, + qr: FieldElement, + qm: FieldElement, + qo: FieldElement, + qc: FieldElement, +} + +/// A `Column` is either `L`, `R` or `O`. It represents the role played by a +/// variable in a constraint. +#[derive(Clone, PartialEq, Eq, Hash)] +pub enum Column { + L, + R, + O, +} + +/// A `Hint` is used to insert values to the solver. This is helpful when a +/// constraint is hard to solve but easy to check. +#[derive(Clone)] +pub struct Hint { + function: fn(&FieldElement) -> FieldElement, + input: Column, + output: Column, +} + +/// Represents a variable as an ID. +pub type Variable = usize; + +/// A collection of variables and constraints that encodes correct executions +/// of a program. Variables can be of two types: Public or private. +pub struct ConstraintSystem { + num_variables: usize, + public_input_variables: Vec, + constraints: Vec>, +} + +impl ConstraintSystem +where + F: IsField, +{ + /// Returns a new empty constraint system. + pub fn new() -> Self { + Self { + num_variables: 0, + public_input_variables: Vec::new(), + constraints: Vec::new(), + } + } + + /// Adds a constraint to the system. + pub fn add_constraint(&mut self, constraint: Constraint) { + self.constraints.push(constraint); + } + + /// Returns a null variable to be used as a placeholder + /// in constraints. + pub fn null_variable(&self) -> Variable { + 0 + } + + /// Creates a new variable. + pub fn new_variable(&mut self) -> Variable { + let variable_id = self.num_variables; + self.num_variables += 1; + variable_id + } + + /// Creates a new public variable. + pub fn new_public_input(&mut self) -> Variable { + let new_variable = self.new_variable(); + self.public_input_variables.push(new_variable); + new_variable + } + + /// A dummy constraint meant to be used as padding. + fn padding_constraint(&self) -> Constraint { + let zero = FieldElement::zero(); + Constraint { + constraint_type: ConstraintType { + ql: zero.clone(), + qr: zero.clone(), + qm: zero.clone(), + qo: zero.clone(), + qc: zero, + }, + hint: None, + l: self.null_variable(), + r: self.null_variable(), + o: self.null_variable(), + } + } + + /// Returns the public input header used in PLONK to prove the usage of the + /// public input values. + fn public_input_header(&self) -> Vec> { + let zero = FieldElement::zero(); + let minus_one = -FieldElement::one(); + let mut public_input_constraints = Vec::new(); + for public_input in self.public_input_variables.iter() { + let public_input_constraint = Constraint { + constraint_type: ConstraintType { + ql: minus_one.clone(), + qr: zero.clone(), + qm: zero.clone(), + qo: zero.clone(), + qc: zero.clone(), + }, + hint: None, + l: *public_input, + r: self.null_variable(), + o: self.null_variable(), + }; + public_input_constraints.push(public_input_constraint); + } + public_input_constraints + } + + /// Returns the `LRO` and `Q` matrices. Each matrix has one row per constraint. + /// The `LRO` matrix has 3 columns with the values of the variables IDs of every + /// constraint. The `Q` matrix has 5 columns with the coefficients of the + /// constraint types. + /// Their layout is: + /// ####################### + /// # public input header # + /// ####################### + /// # circuit constraints # + /// ####################### + /// # padding # + /// ####################### + pub fn to_matrices(&self) -> (Vec, Vec>) { + let header = self.public_input_header(); + let body = &self.constraints; + let total_length = (header.len() + body.len()).next_power_of_two(); + let pad = vec![self.padding_constraint(); total_length - header.len() - body.len()]; + + let mut full_constraints = header; + full_constraints.extend_from_slice(body); + full_constraints.extend_from_slice(&pad); + + let n = full_constraints.len(); + + let mut lro = vec![self.null_variable(); n * 3]; + // Make a single vector with | l_1 .. l_m | r_1 .. r_m | o_1 .. o_m | concatenated. + for (index, constraint) in full_constraints.iter().enumerate() { + lro[index] = constraint.l; + lro[index + n] = constraint.r; + lro[index + n * 2] = constraint.o; + } + + let mut q = vec![FieldElement::zero(); 5 * n]; + for (index, constraint) in full_constraints.iter().enumerate() { + let ct = &constraint.constraint_type; + q[index] = ct.ql.clone(); + q[index + n] = ct.qr.clone(); + q[index + 2 * n] = ct.qm.clone(); + q[index + 3 * n] = ct.qo.clone(); + q[index + 4 * n] = ct.qc.clone(); + } + (lro, q) + } + + /// This method filters the `values` hashmap to return the list of values + /// corresponding to the public variables + pub fn public_input_values( + &self, + values: &HashMap>, + ) -> Vec> { + let mut public_inputs = Vec::new(); + for key in &self.public_input_variables { + if let Some(value) = values.get(key) { + public_inputs.push(value.clone()); + } + } + public_inputs + } +} + +impl Default for ConstraintSystem { + fn default() -> Self { + Self::new() + } +} + +/// This method takes the `LRO` matrix and computes the permutation used in PLONK to +/// build the copy constraint polynomial. +pub fn get_permutation(lro: &[Variable]) -> Vec { + // For each variable store the indexes where it appears. + let mut last_usage: HashMap = HashMap::new(); + let mut permutation = vec![0_usize; lro.len()]; + + for _ in 0..2 { + for (index, variable) in lro.iter().enumerate() { + if last_usage.contains_key(variable) { + permutation[index] = last_usage[variable]; + } + last_usage.insert(*variable, index); + } + } + + permutation +} + +#[cfg(test)] +mod tests { + use crate::{ + prover::Prover, + setup::{setup, CommonPreprocessedInput, Witness}, + test_utils::utils::{test_srs, TestRandomFieldGenerator, KZG, ORDER_R_MINUS_1_ROOT_UNITY}, + verifier::Verifier, + }; + + use super::*; + use lambdaworks_math::{ + elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrField, + field::{element::FieldElement as FE, fields::u64_prime_field::U64PrimeField}, + }; + + /* + Program: + v0 = 1 + v1 = 2 + v2 = v0 + v1 + v3 = v1 + v0 + v4 = v2 + v3 + + Variables: + L R O + 0 1 2 + 1 0 3 + 2 3 4 + 0 0 0 --> padding to next power of two + + LRO : 0 1 2 0 1 0 3 0 2 3 4 0 + Permutation: 11 4 8 0 1 3 9 5 2 6 10 7 + + */ + #[test] + fn test_permutation() { + let system = &mut ConstraintSystem::>::new(); + + let v0 = system.new_variable(); + let v1 = system.new_variable(); + + let v2 = system.add(&v0, &v1); + let v3 = system.add(&v1, &v0); + system.add(&v2, &v3); + + let (lro, _) = system.to_matrices(); + + let permutation = get_permutation(&lro); + let expected = vec![11, 4, 8, 0, 1, 3, 9, 5, 2, 6, 10, 7]; + assert_eq!(expected, permutation); + } + + #[test] + fn test_prove_simple_program_1() { + // Program + let system = &mut ConstraintSystem::::new(); + + let e = system.new_variable(); + let x = system.new_public_input(); + let y = system.new_public_input(); + + let z = system.mul(&x, &e); + system.assert_eq(&y, &z); + + // Common preprocessed input + let common_preprocessed_input = + CommonPreprocessedInput::from_constraint_system(system, &ORDER_R_MINUS_1_ROOT_UNITY); + + // Setup + let srs = test_srs(common_preprocessed_input.n); + let kzg = KZG::new(srs); + let verifying_key = setup(&common_preprocessed_input, &kzg); + + // Prover: + // 1. Generate public inputs and witness + let inputs = HashMap::from([(x, FE::from(4)), (e, FE::from(3))]); + let assignments = system.solve(inputs).unwrap(); + let public_inputs = system.public_input_values(&assignments); + let witness = Witness::new(assignments, system); + + // 2. Generate proof + let random_generator = TestRandomFieldGenerator {}; + let prover = Prover::new(kzg.clone(), random_generator); + let proof = prover.prove( + &witness, + &public_inputs, + &common_preprocessed_input, + &verifying_key, + ); + + // Verifier + let verifier = Verifier::new(kzg); + assert!(verifier.verify( + &proof, + &public_inputs, + &common_preprocessed_input, + &verifying_key + )); + } + + #[test] + fn test_fibonacci() { + let system = &mut ConstraintSystem::>::new(); + + let x0_initial = system.new_variable(); + let x1_initial = system.new_variable(); + let mut x0 = x0_initial; + let mut x1 = x1_initial; + + for _ in 2..10001 { + let x2 = system.add(&x1, &x0); + (x0, x1) = (x1, x2); + } + + let inputs = HashMap::from([(x0_initial, FE::from(0)), (x1_initial, FE::from(1))]); + + let expected_output = FE::from(19257); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&x1).unwrap(), &expected_output); + } +} diff --git a/provers/plonk/src/constraint_system/operations.rs b/provers/plonk/src/constraint_system/operations.rs new file mode 100644 index 000000000..756dd2f44 --- /dev/null +++ b/provers/plonk/src/constraint_system/operations.rs @@ -0,0 +1,342 @@ +use lambdaworks_math::field::{element::FieldElement as FE, traits::IsField}; + +use super::{Column, Constraint, ConstraintSystem, ConstraintType, Hint, Variable}; + +impl ConstraintSystem +where + F: IsField, +{ + /// Creates a new variable `w` constrained to be equal to `c1 * v1 + c2 * v2 + b`. + /// Optionally a hint can be provided to insert values in `v1`, `v2` or `w`. To do + /// so use the `L`, `R`, and `O` input/output columns of the hint to refer to `v1`, + /// `v2` and `w` respectively. + pub fn linear_combination( + &mut self, + v1: &Variable, + c1: FE, + v2: &Variable, + c2: FE, + b: FE, + hint: Option>, + ) -> Variable { + let result = self.new_variable(); + + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: c1, + qr: c2, + qm: FE::zero(), + qo: -FE::one(), + qc: b, + }, + l: *v1, + r: *v2, + o: result, + hint, + }); + result + } + + /// Creates a new variable `w` constrained to be equal to `c * v + b`. + /// Optionally a hint can be provided to insert values in `v1`, `v2` or `w`. To do + /// so use the `L`, `R`, and `O` input/output columns of the hint to refer to `v1`, + /// `v2` and `w` respectively. + pub fn linear_function( + &mut self, + v: &Variable, + c: FE, + b: FE, + hint: Option>, + ) -> Variable { + let result = self.new_variable(); + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: c, + qr: FE::zero(), + qm: FE::zero(), + qo: -FE::one(), + qc: b, + }, + l: *v, + r: self.null_variable(), + o: result, + hint, + }); + result + } + + /// Creates a new variable `w` constrained to be equal to `v1 + v2`. + pub fn add(&mut self, v1: &Variable, v2: &Variable) -> Variable { + self.linear_combination(v1, FE::one(), v2, FE::one(), FE::zero(), None) + } + + /// Creates a new variable `w` constrained to be equal to `v1 + constant`. + pub fn add_constant(&mut self, v: &Variable, constant: FE) -> Variable { + self.linear_function(v, FE::one(), constant, None) + } + + /// Creates a new variable `w` constrained to be equal to `v1 * v2`. + pub fn mul(&mut self, v1: &Variable, v2: &Variable) -> Variable { + let result = self.new_variable(); + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::zero(), + qm: FE::one(), + qo: -FE::one(), + qc: FE::zero(), + }, + l: *v1, + r: *v2, + o: result, + hint: None, + }); + result + } + + /// Creates a new variable `w` constrained to be equal to `v1 / v2`. + pub fn div(&mut self, v1: &Variable, v2: &Variable) -> Variable { + // TODO: check 0.div(0) does not compile + let result = self.new_variable(); + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::zero(), + qm: FE::one(), + qo: -FE::one(), + qc: FE::zero(), + }, + l: result, + r: *v2, + o: *v1, + hint: None, + }); + result + } + + /// Creates two new variables `is_zero` and `v_inverse`. The former is constrained + /// to be a boolean value holding `1` if `v` is zero and `0` otherwise. The latter + /// is constrained to be `v^{-1}` when `v` is not zero and equal to `0` otherwise. + pub fn inv(&mut self, v: &Variable) -> (Variable, Variable) { + let is_zero = self.new_variable(); + let v_inverse = self.new_variable(); + let hint = Some(Hint { + function: |v: &FE| { + if *v == FE::zero() { + FE::one() + } else { + FE::zero() + } + }, + input: Column::L, + output: Column::R, + }); + // v * z == 0 + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::zero(), + qm: FE::one(), + qo: FE::zero(), + qc: FE::zero(), + }, + l: *v, + r: is_zero, + o: self.null_variable(), + hint, + }); + // v * w + z == 1 + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::zero(), + qm: FE::one(), + qo: FE::one(), + qc: -FE::one(), + }, + l: *v, + r: v_inverse, // w + o: is_zero, // z + hint: Some(Hint { + function: |v: &FE| { + if *v == FE::zero() { + FE::zero() + } else { + v.inv() + } + }, + input: Column::L, + output: Column::R, + }), + }); + (is_zero, v_inverse) + } + + /// Returns a new variable `w` constrained to satisfy `w = 1 - v`. When `v` is boolean + /// this is the `not` operator. + pub fn not(&mut self, v: &Variable) -> Variable { + let result = self.new_variable(); + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: -FE::one(), + }, + l: *v, + r: result, + o: self.null_variable(), + hint: None, + }); + result + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use super::*; + use lambdaworks_math::field::{ + element::FieldElement as FE, fields::u64_prime_field::U64PrimeField, + }; + + #[test] + fn test_linear_combination() { + let system = &mut ConstraintSystem::>::new(); + + let v1 = system.new_variable(); + let c1 = FE::from(15); + let v2 = system.new_variable(); + let c2 = -FE::from(7); + let b = FE::from(99); + let result = system.linear_combination(&v1, c1, &v2, c2, b, None); + + let x = FE::from(17); + let y = FE::from(29); + + let inputs = HashMap::from([(v1, x), (v2, y)]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &(x * c1 + y * c2 + b)); + } + + #[test] + fn test_linear_function() { + let system = &mut ConstraintSystem::>::new(); + + let v = system.new_variable(); + let c = FE::from(8); + let b = FE::from(109); + let result = system.linear_function(&v, c, b, None); + + let x = FE::from(17); + + let inputs = HashMap::from([(v, x)]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &(x * c + b)); + } + + #[test] + fn test_add() { + let system = &mut ConstraintSystem::>::new(); + + let input1 = system.new_variable(); + let input2 = system.new_variable(); + let result = system.add(&input1, &input2); + + let a = FE::from(3); + let b = FE::from(10); + + let inputs = HashMap::from([(input1, a), (input2, b)]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &(a + b)); + } + + #[test] + fn test_mul() { + let system = &mut ConstraintSystem::>::new(); + + let input1 = system.new_variable(); + let input2 = system.new_variable(); + let result = system.mul(&input1, &input2); + + let a = FE::from(3); + let b = FE::from(11); + + let inputs = HashMap::from([(input1, a), (input2, b)]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &(a * b)); + } + + #[test] + fn test_div() { + let system = &mut ConstraintSystem::>::new(); + + let input1 = system.new_variable(); + let input2 = system.new_variable(); + let result = system.div(&input1, &input2); + + let a = FE::from(3); + let b = FE::from(11); + + let inputs = HashMap::from([(input1, a), (input2, b)]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &(a / b)); + } + + #[test] + fn test_add_constant() { + let system = &mut ConstraintSystem::>::new(); + + let input1 = system.new_variable(); + let b = FE::from(11); + let result = system.add_constant(&input1, b); + + let a = FE::from(3); + + let inputs = HashMap::from([(input1, a)]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result).unwrap(), &(a + b)); + } + + #[test] + fn test_not() { + let system = &mut ConstraintSystem::>::new(); + + let boolean = system.new_boolean(); + let result1 = system.not(&boolean); + let result2 = system.not(&result1); + + let inputs = HashMap::from([(boolean, FE::one())]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&result1).unwrap(), &FE::zero()); + assert_eq!(assignments.get(&result2).unwrap(), &FE::one()); + } + + #[test] + fn test_inv() { + let system = &mut ConstraintSystem::>::new(); + + let v = system.new_variable(); + let w = system.new_variable(); + let (v_is_zero, v_inverse) = system.inv(&v); + let (w_is_zero, w_inverse) = system.inv(&w); + + let inputs = HashMap::from([(v, FE::from(2)), (w, FE::from(0))]); + + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&v_inverse).unwrap(), &FE::from(2).inv()); + assert_eq!(assignments.get(&v_is_zero).unwrap(), &FE::zero()); + + assert_eq!(assignments.get(&w_inverse).unwrap(), &FE::from(0)); + assert_eq!(assignments.get(&w_is_zero).unwrap(), &FE::one()); + } +} diff --git a/provers/plonk/src/constraint_system/solver.rs b/provers/plonk/src/constraint_system/solver.rs new file mode 100644 index 000000000..1bcf4ca6d --- /dev/null +++ b/provers/plonk/src/constraint_system/solver.rs @@ -0,0 +1,909 @@ +use std::collections::HashMap; + +use lambdaworks_math::field::{element::FieldElement as FE, traits::IsField}; + +use super::{errors::SolverError, Column, Constraint, ConstraintSystem, Variable}; + +/// Finds a solution to the system extending the `assignments` map. It uses the +/// simple strategy of going through all the constraints trying to determine an +/// unkwown value of a variable in terms of known values. It stops when it goes +/// through every constraint and there's nothing else to be solved this way. +/// It returns an error in case there is no such solution or in case this strategy +/// is not enough. +impl ConstraintSystem +where + F: IsField, +{ + pub fn solve( + &self, + mut assignments: HashMap>, + ) -> Result>, SolverError> { + loop { + let old_solved = assignments.keys().len(); + for constraint in self.constraints.iter() { + assignments = solve_hint(assignments, constraint); + assignments = solve_constraint(assignments, constraint); + } + if old_solved == assignments.keys().len() { + break; + } + } + + // Check the system is solved + for constraint in self.constraints.iter() { + let a = assignments.get(&constraint.l); + let b = assignments.get(&constraint.r); + let c = assignments.get(&constraint.o); + + match (a, b, c) { + (Some(a), Some(b), Some(c)) => { + let ct = &constraint.constraint_type; + let result = a * &ct.ql + b * &ct.qr + a * b * &ct.qm + c * &ct.qo + &ct.qc; + if result != FE::zero() { + return Err(SolverError::InconsistentSystem); + } + } + _ => return Err(SolverError::UnableToSolve), + } + } + Ok(assignments) + } +} + +fn solve_hint( + mut assignments: HashMap>, + constraint: &Constraint, +) -> HashMap> { + let column_to_variable = |column: &Column| match column { + Column::L => constraint.l, + Column::R => constraint.r, + Column::O => constraint.o, + }; + if let Some(hint) = &constraint.hint { + if !assignments.contains_key(&column_to_variable(&hint.output)) { + if let Some(input) = assignments.get(&column_to_variable(&hint.input)) { + assignments.insert(column_to_variable(&hint.output), (hint.function)(input)); + } + } + } + + assignments +} + +fn solve_constraint( + mut assignments: HashMap>, + constraint: &Constraint, +) -> HashMap> { + let ct = &constraint.constraint_type; + let a = assignments.get(&constraint.l); + let b = assignments.get(&constraint.r); + let c = assignments.get(&constraint.o); + let zero = FE::zero(); + + match ( + (a, b, c), + (ct.ql == zero, ct.qr == zero, ct.qm == zero, ct.qo == zero), + ) { + ((Some(a), Some(b), None), _) => { + if ct.qo != FE::zero() { + let c = -(a * &ct.ql + b * &ct.qr + a * b * &ct.qm + &ct.qc) * ct.qo.inv(); + assignments.insert(constraint.o, c); + } + } + ((Some(a), None, Some(c)), _) => { + let denominator = &ct.qr + a * &ct.qm; + if denominator != FE::zero() { + let b = -(a * &ct.ql + c * &ct.qo + &ct.qc) * denominator.inv(); + assignments.insert(constraint.r, b); + } + } + ((None, Some(b), Some(c)), _) => { + let denominator = &ct.ql + b * &ct.qm; + if denominator != FE::zero() { + let a = -(b * &ct.qr + c * &ct.qo + &ct.qc) * denominator.inv(); + assignments.insert(constraint.l, a); + } + } + ((Some(a), None, None), _) => { + let b_coefficient = &ct.qr + a * &ct.qm; + if b_coefficient == FE::zero() && ct.qo != FE::zero() { + let c = -(a * &ct.ql + &ct.qc) * ct.qo.inv(); + assignments.insert(constraint.o, c); + } else if b_coefficient != FE::zero() && ct.qo == FE::zero() { + let b = -(a * &ct.ql + &ct.qc) * b_coefficient.inv(); + assignments.insert(constraint.r, b); + } + } + ((None, Some(b), None), _) => { + let a_coefficient = &ct.ql + b * &ct.qm; + if a_coefficient == FE::zero() && ct.qo != FE::zero() { + let c = -(b * &ct.qr + &ct.qc) * ct.qo.inv(); + assignments.insert(constraint.o, c); + } else if a_coefficient != FE::zero() && ct.qo == FE::zero() { + let a = -(b * &ct.qr + &ct.qc) * a_coefficient.inv(); + assignments.insert(constraint.l, a); + } + } + ((None, None, Some(c)), (false, true, true, _)) => { + let a = -(c * &ct.qo + &ct.qc) * ct.ql.inv(); + assignments.insert(constraint.l, a); + } + ((None, None, Some(c)), (true, false, true, _)) => { + let b = -(c * &ct.qo + &ct.qc) * ct.qr.inv(); + assignments.insert(constraint.r, b); + } + ((None, None, None), (true, true, true, false)) => { + let c = -&ct.qc * ct.qo.inv(); + assignments.insert(constraint.o, c); + } + ((None, None, None), (true, false, true, true)) => { + let b = -&ct.qc * ct.qr.inv(); + assignments.insert(constraint.r, b); + } + ((None, None, None), (false, true, true, true)) => { + let a = -&ct.qc * ct.ql.inv(); + assignments.insert(constraint.l, a); + } + _ => {} + } + assignments +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use crate::constraint_system::{ + errors::SolverError, Constraint, ConstraintSystem, ConstraintType, + }; + use lambdaworks_math::field::{ + element::FieldElement as FE, fields::u64_prime_field::U64PrimeField, + }; + + #[test] + fn test_case_all_values_are_known() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: -FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(a, FE::from(2)), (b, FE::from(3)), (c, FE::from(12))]); + system.solve(inputs).unwrap(); + } + + #[test] + fn test_case_b_and_c_are_known() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: -FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(b, FE::from(3)), (c, FE::from(12))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&a).unwrap(), &FE::from(2)); + } + + #[test] + fn test_case_b_and_c_are_known_but_as_coefficient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::from(3), + qr: FE::one(), + qm: -FE::one(), + qo: -FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(b, FE::from(3)), (c, FE::from(12))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_a_and_c_are_known() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: -FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(a, FE::from(2)), (c, FE::from(12))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&b).unwrap(), &FE::from(3)); + } + + #[test] + fn test_case_a_and_c_are_known_but_bs_coefficient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::from(2), + qm: -FE::one(), + qo: FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(a, FE::from(2)), (c, FE::from(12))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_a_and_b_are_known() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: -FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(a, FE::from(2)), (b, FE::from(3))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&c).unwrap(), &FE::from(12)); + } + + #[test] + fn test_case_a_and_b_are_known_but_cs_coefficient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: FE::zero(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint); + let inputs = HashMap::from([(a, FE::from(2)), (b, FE::from(3))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_only_a_is_known_but_bs_coeffient_is_zero_and_cs_coefficient_is_nonzero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: -FE::from(2), + qm: FE::one(), + qo: FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: b, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(a, FE::from(2))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&b).unwrap(), &FE::from(3)); + assert_eq!(assignments.get(&c).unwrap(), &-FE::from(3)); + } + + #[test] + fn test_case_only_a_is_known_but_bs_coefficient_is_nonzero_and_cs_coeffient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: FE::zero(), + qc: -FE::from(5), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: b, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(a, FE::from(1))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&b).unwrap(), &FE::from(2)); + assert_eq!(assignments.get(&c).unwrap(), &-FE::from(2)); + } + + #[test] + fn test_case_only_a_is_known_but_bs_cofficient_is_zero_and_cs_coeffient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: -FE::one(), + qo: FE::zero(), + qc: -FE::from(5), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: b, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(a, FE::from(1))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + // TODO: This system is actually solvable but not with our current solver logic + fn test_case_only_a_is_known_but_bs_cofficient_is_nonzero_and_cs_coeffient_is_nonzero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: FE::one(), + qc: -FE::from(5), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: b, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(a, FE::from(1))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_only_b_is_known_but_as_coeffient_is_zero_and_cs_coefficient_is_nonzero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: -FE::from(3), + qr: FE::one(), + qm: FE::one(), + qo: FE::one(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(b, FE::from(3))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&a).unwrap(), &FE::from(4)); + assert_eq!(assignments.get(&c).unwrap(), &-FE::from(4)); + } + + #[test] + fn test_case_only_b_is_known_but_as_coefficient_is_nonzero_and_cs_coeffient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::one(), + qo: FE::zero(), + qc: -FE::from(5), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(b, FE::from(1))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&a).unwrap(), &FE::from(2)); + assert_eq!(assignments.get(&c).unwrap(), &-FE::from(2)); + } + + #[test] + fn test_case_only_b_is_known_but_as_coefficient_is_zero_and_cs_coeffient_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: -FE::one(), + qo: FE::zero(), + qc: -FE::from(5), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: c, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(b, FE::from(1))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_only_c_is_known_but_bs_coeffient_is_zero_and_qm_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::from(2), + qr: FE::zero(), + qm: FE::zero(), + qo: FE::one(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(c, FE::from(2))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&a).unwrap(), &-FE::from(1)); + assert_eq!(assignments.get(&b).unwrap(), &FE::from(1)); + } + + #[test] + fn test_case_only_c_is_known_and_bs_coeffient_is_zero_but_qm_is_nonzero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::from(2), + qr: FE::zero(), + qm: FE::one(), + qo: FE::one(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(c, FE::from(2))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_only_c_is_known_but_as_coeffient_is_zero_and_qm_is_zero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::from(2), + qm: FE::zero(), + qo: FE::one(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(c, FE::from(2))]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&a).unwrap(), &FE::from(1)); + assert_eq!(assignments.get(&b).unwrap(), &-FE::from(1)); + } + + #[test] + fn test_case_only_c_is_known_but_as_coeffient_is_nonzero_and_qm_is_nonzero() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::from(2), + qm: FE::one(), + qo: FE::one(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::zero(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + let inputs = HashMap::from([(c, FE::from(2))]); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } + + #[test] + fn test_case_all_values_are_unknown() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let c = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::from(2), + qr: FE::zero(), + qm: FE::zero(), + qo: FE::zero(), + qc: -FE::from(2), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::from(2), + qm: FE::zero(), + qo: FE::zero(), + qc: -FE::from(4), + }, + hint: None, + l: a, + r: b, + o: c, + }; + let constraint3 = Constraint { + constraint_type: ConstraintType { + ql: FE::zero(), + qr: FE::zero(), + qm: FE::zero(), + qo: FE::from(2), + qc: -FE::from(6), + }, + hint: None, + l: a, + r: b, + o: c, + }; + system.add_constraint(constraint1); + system.add_constraint(constraint2); + system.add_constraint(constraint3); + let inputs = HashMap::from([]); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&a).unwrap(), &FE::from(1)); + assert_eq!(assignments.get(&b).unwrap(), &FE::from(2)); + assert_eq!(assignments.get(&c).unwrap(), &FE::from(3)); + } + + #[test] + fn test_inconsistent_system() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let constraint1 = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + system.add_constraint(constraint1); + let constraint2 = Constraint { + constraint_type: ConstraintType { + ql: -FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + let inputs = HashMap::from([(a, FE::from(2))]); + system.add_constraint(constraint2); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::InconsistentSystem + ); + } + + #[test] + fn test_indeterminate_system() { + let mut system = ConstraintSystem::>::new(); + let a = system.new_variable(); + let b = system.new_variable(); + let constraint = Constraint { + constraint_type: ConstraintType { + ql: FE::one(), + qr: FE::one(), + qm: FE::zero(), + qo: FE::zero(), + qc: FE::one(), + }, + hint: None, + l: a, + r: b, + o: system.null_variable(), + }; + let inputs = HashMap::from([]); + system.add_constraint(constraint); + assert_eq!( + system.solve(inputs).unwrap_err(), + SolverError::UnableToSolve + ); + } +} diff --git a/provers/plonk/src/constraint_system/types.rs b/provers/plonk/src/constraint_system/types.rs new file mode 100644 index 000000000..c1637919a --- /dev/null +++ b/provers/plonk/src/constraint_system/types.rs @@ -0,0 +1,168 @@ +use lambdaworks_math::field::{ + element::FieldElement as FE, + traits::{IsField, IsPrimeField}, +}; + +use super::{Column, Constraint, ConstraintSystem, ConstraintType, Hint, Variable}; + +impl ConstraintSystem +where + F: IsField, +{ + /// Returns a new variable `w` constrained to take the value `value`. + pub fn new_constant(&mut self, value: FE) -> Variable { + let constant = self.new_variable(); + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: -FE::one(), + qr: FE::zero(), + qm: FE::zero(), + qo: FE::zero(), + qc: value, + }, + l: constant, + r: self.null_variable(), + o: self.null_variable(), + hint: None, + }); + constant + } + + /// Returns a new variable `w` constrained to take either `0` or `1` + /// values. + pub fn new_boolean(&mut self) -> Variable { + let boolean = self.new_variable(); + self.add_constraint(Constraint { + constraint_type: ConstraintType { + ql: -FE::one(), + qr: FE::zero(), + qm: FE::one(), + qo: FE::zero(), + qc: FE::zero(), + }, + l: boolean, + r: boolean, + o: self.null_variable(), + hint: None, + }); + boolean + } + + /// Returns 32 new variables `[b31, b30,..., b1, b0]` constrained to take either + /// `0` or `1` values and to represent the binary decomposition + /// of the representative of the value of `v`: + /// `v = b0 + b1 * 2 + b2 * 2^2 + ... + b31 * 2^31`. + pub fn new_u32(&mut self, v: &Variable) -> Vec + where + F: IsPrimeField, + { + let bits: Vec<_> = (0..32).map(|_| self.new_boolean()).collect(); + let mut aux_vars: Vec = Vec::new(); + let hint_function = |v: &FE| { + if v.representative() & 1.into() == 1.into() { + FE::one() + } else { + FE::zero() + } + }; + + let hint = Some(Hint { + function: hint_function, + input: Column::O, + output: Column::R, + }); + // t1 := 2 b_0 + b_1 + let t_0 = self.linear_combination( + &bits[0], + FE::from(2), + &bits[1], + FE::one(), + FE::zero(), + hint.clone(), + ); + aux_vars.push(t_0); + for bit in bits.iter().take(32).skip(2) { + // t_i := 2 t_{i-1} + b_i + let t_i = self.linear_combination( + aux_vars.last().unwrap(), + FE::from(2), + bit, + FE::one(), + FE::zero(), + hint.clone(), + ); + aux_vars.push(t_i); + } + self.assert_eq(v, aux_vars.last().unwrap()); + bits + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use lambdaworks_math::field::{element::FieldElement, fields::u64_prime_field::U64PrimeField}; + + use crate::constraint_system::ConstraintSystem; + + #[test] + fn test_constant() { + let system = &mut ConstraintSystem::>::new(); + let constant = system.new_constant(FieldElement::from(17)); + let inputs = HashMap::new(); + let assignments = system.solve(inputs).unwrap(); + assert_eq!(assignments.get(&constant).unwrap(), &FieldElement::from(17)); + } + + #[test] + fn test_boolean_1() { + let system = &mut ConstraintSystem::>::new(); + + let boolean = system.new_boolean(); + let inputs = HashMap::from([(boolean, FieldElement::from(2))]); + // system is inconsistent + system.solve(inputs).unwrap_err(); + } + + #[test] + fn test_boolean_2() { + let system = &mut ConstraintSystem::>::new(); + + let boolean = system.new_boolean(); + let inputs = HashMap::from([(boolean, FieldElement::from(1))]); + // system is solvable + system.solve(inputs).unwrap(); + } + + #[test] + fn test_boolean_3() { + let system = &mut ConstraintSystem::>::new(); + + let boolean = system.new_boolean(); + let inputs = HashMap::from([(boolean, FieldElement::from(0))]); + // system is solvable + system.solve(inputs).unwrap(); + } + + #[test] + fn test_u32() { + let system = &mut ConstraintSystem::>::new(); + + let input = system.new_variable(); + let u32_var = system.new_u32(&input); + + let a = 59049; + let inputs = HashMap::from([(input, FieldElement::from(a))]); + + let assignments = system.solve(inputs).unwrap(); + + #[allow(clippy::needless_range_loop)] + for i in 0..32 { + assert_eq!( + assignments.get(&u32_var[i]).unwrap().representative(), + (a >> (31 - i)) & 1 + ); + } + } +} diff --git a/provers/plonk/src/lib.rs b/provers/plonk/src/lib.rs new file mode 100644 index 000000000..a1124b47e --- /dev/null +++ b/provers/plonk/src/lib.rs @@ -0,0 +1,5 @@ +pub mod constraint_system; +pub mod prover; +pub mod setup; +pub mod test_utils; +pub mod verifier; diff --git a/provers/plonk/src/prover.rs b/provers/plonk/src/prover.rs new file mode 100644 index 000000000..508afdfb3 --- /dev/null +++ b/provers/plonk/src/prover.rs @@ -0,0 +1,920 @@ +use lambdaworks_crypto::fiat_shamir::transcript::Transcript; +use lambdaworks_math::errors::DeserializationError; +use lambdaworks_math::fft::polynomial::FFTPoly; +use lambdaworks_math::field::traits::IsFFTField; +use lambdaworks_math::traits::{Deserializable, IsRandomFieldElementGenerator, Serializable}; +use std::marker::PhantomData; +use std::mem::size_of; + +use crate::setup::{ + new_strong_fiat_shamir_transcript, CommonPreprocessedInput, VerificationKey, Witness, +}; +use lambdaworks_crypto::commitments::traits::IsCommitmentScheme; +use lambdaworks_math::{field::element::FieldElement, polynomial::Polynomial}; +use lambdaworks_math::{field::traits::IsField, traits::ByteConversion}; + +/// Plonk proof. +/// The challenges are denoted +/// Round 2: β,γ, +/// Round 3: α, +/// Round 4: ζ, +/// Round 5: υ. +/// Here `Z_H` denotes the domain polynomial, `z` is the polynomial +/// that encodes the copy constraints, and `p` is the sum of `z` and +/// the polynomial that encodes the gates constraints. +/// The polynomial `t` is defined as `p / Z_H`. +/// `a`, `b`, and `c` are the wire assignment polynomials. +/// `S_σ1(ζ), S_σ2(ζ) and S_σ3(ζ)` are the copy permutation polynomials. +/// The polynomial `p` can be "linearized" and the result can be written as +/// `linearized_p = p_non_constant + p_constant`, where +/// `p_non_constant` is the sum of all the terms with a "non-constant" +/// polynomial factor, such as `b(ζ)Q_R(X)`, and `p_constant` is the +/// sum of all the rest (such as `PI(ζ)`). +pub struct Proof> { + // Round 1. + /// Commitment to the wire polynomial `a(x)` + pub a_1: CS::Commitment, + /// Commitment to the wire polynomial `b(x)` + pub b_1: CS::Commitment, + /// Commitment to the wire polynomial `c(x)` + pub c_1: CS::Commitment, + + // Round 2. + /// Commitment to the copy constraints polynomial `z(x)` + pub z_1: CS::Commitment, + + // Round 3. + /// Commitment to the low part of the quotient polynomial t(X) + pub t_lo_1: CS::Commitment, + /// Commitment to the middle part of the quotient polynomial t(X) + pub t_mid_1: CS::Commitment, + /// Commitment to the high part of the quotient polynomial t(X) + pub t_hi_1: CS::Commitment, + + // Round 4. + /// Value of `a(ζ)`. + pub a_zeta: FieldElement, + /// Value of `b(ζ)`. + pub b_zeta: FieldElement, + /// Value of `c(ζ)`. + pub c_zeta: FieldElement, + /// Value of `S_σ1(ζ)`. + pub s1_zeta: FieldElement, + /// Value of `S_σ2(ζ)`. + pub s2_zeta: FieldElement, + /// Value of `z(ζω)`. + pub z_zeta_omega: FieldElement, + + // Round 5 + /// Value of `p_non_constant(ζ)`. + pub p_non_constant_zeta: FieldElement, + /// Value of `t(ζ)`. + pub t_zeta: FieldElement, + /// Batch opening proof for all the evaluations at ζ + pub w_zeta_1: CS::Commitment, + /// Single opening proof for `z(ζω)`. + pub w_zeta_omega_1: CS::Commitment, +} + +impl Serializable for Proof +where + F: IsField, + CS: IsCommitmentScheme, + FieldElement: ByteConversion, + CS::Commitment: Serializable, +{ + fn serialize(&self) -> Vec { + let field_elements = [ + &self.a_zeta, + &self.b_zeta, + &self.c_zeta, + &self.s1_zeta, + &self.s2_zeta, + &self.z_zeta_omega, + &self.p_non_constant_zeta, + &self.t_zeta, + ]; + let commitments = [ + &self.a_1, + &self.b_1, + &self.c_1, + &self.z_1, + &self.t_lo_1, + &self.t_mid_1, + &self.t_hi_1, + &self.w_zeta_1, + &self.w_zeta_omega_1, + ]; + + let mut serialized_proof: Vec = Vec::new(); + + field_elements.iter().for_each(|element| { + let serialized_element = element.to_bytes_be(); + serialized_proof.extend_from_slice(&(serialized_element.len() as u32).to_be_bytes()); + serialized_proof.extend_from_slice(&serialized_element); + }); + + commitments.iter().for_each(|commitment| { + let serialized_commitment = commitment.serialize(); + serialized_proof.extend_from_slice(&(serialized_commitment.len() as u32).to_be_bytes()); + serialized_proof.extend_from_slice(&serialized_commitment); + }); + + serialized_proof + } +} + +// TODO: Remove this once FieldElements implement Serializable +fn deserialize_field_element( + bytes: &[u8], + offset: usize, +) -> Result<(usize, FieldElement), DeserializationError> +where + F: IsField, + FieldElement: ByteConversion, +{ + let mut offset = offset; + let element_size_bytes: [u8; size_of::()] = bytes + .get(offset..offset + size_of::()) + .ok_or(DeserializationError::InvalidAmountOfBytes)? + .try_into() + .map_err(|_| DeserializationError::InvalidAmountOfBytes)?; + let element_size = u32::from_be_bytes(element_size_bytes) as usize; + offset += size_of::(); + let field_element = FieldElement::from_bytes_be( + bytes + .get(offset..offset + element_size) + .ok_or(DeserializationError::InvalidAmountOfBytes)?, + )?; + offset += element_size; + Ok((offset, field_element)) +} + +fn deserialize_commitment( + bytes: &[u8], + offset: usize, +) -> Result<(usize, Commitment), DeserializationError> +where + Commitment: Deserializable, +{ + let mut offset = offset; + let element_size_bytes: [u8; size_of::()] = bytes + .get(offset..offset + size_of::()) + .ok_or(DeserializationError::InvalidAmountOfBytes)? + .try_into() + .map_err(|_| DeserializationError::InvalidAmountOfBytes)?; + let element_size = u32::from_be_bytes(element_size_bytes) as usize; + offset += size_of::(); + let commitment = Commitment::deserialize( + bytes + .get(offset..offset + element_size) + .ok_or(DeserializationError::InvalidAmountOfBytes)?, + )?; + offset += element_size; + Ok((offset, commitment)) +} + +impl Deserializable for Proof +where + F: IsField, + CS: IsCommitmentScheme, + FieldElement: ByteConversion, + CS::Commitment: Deserializable, +{ + fn deserialize(bytes: &[u8]) -> Result + where + Self: Sized, + { + let (offset, a_zeta) = deserialize_field_element(bytes, 0)?; + let (offset, b_zeta) = deserialize_field_element(bytes, offset)?; + let (offset, c_zeta) = deserialize_field_element(bytes, offset)?; + let (offset, s1_zeta) = deserialize_field_element(bytes, offset)?; + let (offset, s2_zeta) = deserialize_field_element(bytes, offset)?; + let (offset, z_zeta_omega) = deserialize_field_element(bytes, offset)?; + let (offset, p_non_constant_zeta) = deserialize_field_element(bytes, offset)?; + let (offset, t_zeta) = deserialize_field_element(bytes, offset)?; + + let (offset, a_1) = deserialize_commitment(bytes, offset)?; + let (offset, b_1) = deserialize_commitment(bytes, offset)?; + let (offset, c_1) = deserialize_commitment(bytes, offset)?; + let (offset, z_1) = deserialize_commitment(bytes, offset)?; + let (offset, t_lo_1) = deserialize_commitment(bytes, offset)?; + let (offset, t_mid_1) = deserialize_commitment(bytes, offset)?; + let (offset, t_hi_1) = deserialize_commitment(bytes, offset)?; + let (offset, w_zeta_1) = deserialize_commitment(bytes, offset)?; + let (_, w_zeta_omega_1) = deserialize_commitment(bytes, offset)?; + + Ok(Proof { + a_1, + b_1, + c_1, + z_1, + t_lo_1, + t_mid_1, + t_hi_1, + a_zeta, + b_zeta, + c_zeta, + s1_zeta, + s2_zeta, + z_zeta_omega, + p_non_constant_zeta, + t_zeta, + w_zeta_1, + w_zeta_omega_1, + }) + } +} + +pub struct Prover, R: IsRandomFieldElementGenerator> { + commitment_scheme: CS, + random_generator: R, + phantom: PhantomData, +} + +struct Round1Result { + a_1: Hiding, + b_1: Hiding, + c_1: Hiding, + p_a: Polynomial>, + p_b: Polynomial>, + p_c: Polynomial>, +} + +struct Round2Result { + z_1: Hiding, + p_z: Polynomial>, + beta: FieldElement, + gamma: FieldElement, +} + +struct Round3Result { + t_lo_1: Hiding, + t_mid_1: Hiding, + t_hi_1: Hiding, + p_t_lo: Polynomial>, + p_t_mid: Polynomial>, + p_t_hi: Polynomial>, + alpha: FieldElement, +} + +struct Round4Result { + a_zeta: FieldElement, + b_zeta: FieldElement, + c_zeta: FieldElement, + s1_zeta: FieldElement, + s2_zeta: FieldElement, + z_zeta_omega: FieldElement, + zeta: FieldElement, +} + +struct Round5Result { + w_zeta_1: Hiding, + w_zeta_omega_1: Hiding, + p_non_constant_zeta: FieldElement, + t_zeta: FieldElement, +} + +impl Prover +where + F: IsField + IsFFTField, + CS: IsCommitmentScheme, + FieldElement: ByteConversion, + CS::Commitment: Serializable, + R: IsRandomFieldElementGenerator, +{ + pub fn new(commitment_scheme: CS, random_generator: R) -> Self { + Self { + commitment_scheme, + random_generator, + phantom: PhantomData, + } + } + + fn blind_polynomial( + &self, + target: &Polynomial>, + blinder: &Polynomial>, + n: u64, + ) -> Polynomial> + where + F: IsField, + R: IsRandomFieldElementGenerator, + { + let bs: Vec> = (0..n).map(|_| self.random_generator.generate()).collect(); + let random_part = Polynomial::new(&bs); + target + blinder * random_part + } + + fn round_1( + &self, + witness: &Witness, + common_preprocessed_input: &CommonPreprocessedInput, + ) -> Round1Result { + let p_a = Polynomial::interpolate_fft(&witness.a) + .expect("xs and ys have equal length and xs are unique"); + let p_b = Polynomial::interpolate_fft(&witness.b) + .expect("xs and ys have equal length and xs are unique"); + let p_c = Polynomial::interpolate_fft(&witness.c) + .expect("xs and ys have equal length and xs are unique"); + + let z_h = Polynomial::new_monomial(FieldElement::one(), common_preprocessed_input.n) + - FieldElement::one(); + let p_a = self.blind_polynomial(&p_a, &z_h, 2); + let p_b = self.blind_polynomial(&p_b, &z_h, 2); + let p_c = self.blind_polynomial(&p_c, &z_h, 2); + + let a_1 = self.commitment_scheme.commit(&p_a); + let b_1 = self.commitment_scheme.commit(&p_b); + let c_1 = self.commitment_scheme.commit(&p_c); + + Round1Result { + a_1, + b_1, + c_1, + p_a, + p_b, + p_c, + } + } + + fn round_2( + &self, + witness: &Witness, + common_preprocessed_input: &CommonPreprocessedInput, + beta: FieldElement, + gamma: FieldElement, + ) -> Round2Result { + let cpi = common_preprocessed_input; + let mut coefficients: Vec> = vec![FieldElement::one()]; + let (s1, s2, s3) = (&cpi.s1_lagrange, &cpi.s2_lagrange, &cpi.s3_lagrange); + + let k2 = &cpi.k1 * &cpi.k1; + + let lp = |w: &FieldElement, eta: &FieldElement| w + &beta * eta + γ + + for i in 0..&cpi.n - 1 { + let (a_i, b_i, c_i) = (&witness.a[i], &witness.b[i], &witness.c[i]); + let num = lp(a_i, &cpi.domain[i]) + * lp(b_i, &(&cpi.domain[i] * &cpi.k1)) + * lp(c_i, &(&cpi.domain[i] * &k2)); + let den = lp(a_i, &s1[i]) * lp(b_i, &s2[i]) * lp(c_i, &s3[i]); + let new_factor = num / den; + let new_term = coefficients.last().unwrap() * &new_factor; + coefficients.push(new_term); + } + + let p_z = Polynomial::interpolate_fft(&coefficients) + .expect("xs and ys have equal length and xs are unique"); + let z_h = Polynomial::new_monomial(FieldElement::one(), common_preprocessed_input.n) + - FieldElement::one(); + let p_z = self.blind_polynomial(&p_z, &z_h, 3); + let z_1 = self.commitment_scheme.commit(&p_z); + Round2Result { + z_1, + p_z, + beta, + gamma, + } + } + + fn round_3( + &self, + common_preprocessed_input: &CommonPreprocessedInput, + public_input: &[FieldElement], + Round1Result { p_a, p_b, p_c, .. }: &Round1Result, + Round2Result { + p_z, beta, gamma, .. + }: &Round2Result, + alpha: FieldElement, + ) -> Round3Result { + let cpi = common_preprocessed_input; + let k2 = &cpi.k1 * &cpi.k1; + + let one = Polynomial::new_monomial(FieldElement::one(), 0); + let p_x = &Polynomial::new_monomial(FieldElement::one(), 1); + let zh = Polynomial::new_monomial(FieldElement::one(), cpi.n) - &one; + + let z_x_omega_coefficients: Vec> = p_z + .coefficients() + .iter() + .enumerate() + .map(|(i, x)| x * &cpi.domain[i % cpi.n]) + .collect(); + let z_x_omega = Polynomial::new(&z_x_omega_coefficients); + let mut e1 = vec![FieldElement::zero(); cpi.domain.len()]; + e1[0] = FieldElement::one(); + let l1 = Polynomial::interpolate_fft(&e1) + .expect("xs and ys have equal length and xs are unique"); + let mut p_pi_y = public_input.to_vec(); + p_pi_y.append(&mut vec![FieldElement::zero(); cpi.n - public_input.len()]); + let p_pi = Polynomial::interpolate_fft(&p_pi_y) + .expect("xs and ys have equal length and xs are unique"); + + // Compute p + // To leverage FFT we work with the evaluation form of every polynomial + // involved + // TODO: check a factor of 4 is a sensible upper bound + let degree = 4 * cpi.n; + let offset = &cpi.k1; + let p_a_eval = p_a.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_b_eval = p_b.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_c_eval = p_c.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let ql_eval = cpi.ql.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let qr_eval = cpi.qr.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let qm_eval = cpi.qm.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let qo_eval = cpi.qo.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let qc_eval = cpi.qc.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_pi_eval = p_pi.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_x_eval = p_x.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_z_eval = p_z.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_z_x_omega_eval = z_x_omega + .evaluate_offset_fft(1, Some(degree), offset) + .unwrap(); + let p_s1_eval = cpi.s1.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_s2_eval = cpi.s2.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let p_s3_eval = cpi.s3.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + let l1_eval = l1.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + + let p_constraints_eval: Vec<_> = p_a_eval + .iter() + .zip(p_b_eval.iter()) + .zip(p_c_eval.iter()) + .zip(ql_eval.iter()) + .zip(qr_eval.iter()) + .zip(qm_eval.iter()) + .zip(qo_eval.iter()) + .zip(qc_eval.iter()) + .zip(p_pi_eval.iter()) + .map(|((((((((a, b), c), ql), qr), qm), qo), qc), pi)| { + a * b * qm + a * ql + b * qr + c * qo + qc + pi + }) + .collect(); + + let f_eval: Vec<_> = p_a_eval + .iter() + .zip(p_b_eval.iter()) + .zip(p_c_eval.iter()) + .zip(p_x_eval.iter()) + .map(|(((a, b), c), x)| { + (a + x * beta + gamma) + * (b + x * beta * &cpi.k1 + gamma) + * (c + x * beta * &k2 + gamma) + }) + .collect(); + + let g_eval: Vec<_> = p_a_eval + .iter() + .zip(p_b_eval.iter()) + .zip(p_c_eval.iter()) + .zip(p_s1_eval.iter()) + .zip(p_s2_eval.iter()) + .zip(p_s3_eval.iter()) + .map(|(((((a, b), c), s1), s2), s3)| { + (a + s1 * beta + gamma) * (b + s2 * beta + gamma) * (c + s3 * beta + gamma) + }) + .collect(); + + let p_permutation_1_eval: Vec<_> = g_eval + .iter() + .zip(f_eval.iter()) + .zip(p_z_eval.iter()) + .zip(p_z_x_omega_eval.iter()) + .map(|(((g, f), z), y)| g * y - f * z) + .collect(); + + let p_permutation_2_eval: Vec<_> = p_z_eval + .iter() + .zip(l1_eval.iter()) + .map(|(z, l)| (z - FieldElement::one()) * l) + .collect(); + + let p_eval: Vec<_> = p_permutation_2_eval + .iter() + .zip(p_permutation_1_eval.iter()) + .zip(p_constraints_eval.iter()) + .map(|((p2, p1), co)| (p2 * &alpha + p1) * &alpha + co) + .collect(); + + let mut zh_eval = zh.evaluate_offset_fft(1, Some(degree), offset).unwrap(); + FieldElement::inplace_batch_inverse(&mut zh_eval); + let c: Vec<_> = p_eval + .iter() + .zip(zh_eval.iter()) + .map(|(a, b)| a * b) + .collect(); + let mut t = Polynomial::interpolate_offset_fft(&c, offset).unwrap(); + + Polynomial::pad_with_zero_coefficients_to_length(&mut t, 3 * (&cpi.n + 2)); + let p_t_lo = Polynomial::new(&t.coefficients[..&cpi.n + 2]); + let p_t_mid = Polynomial::new(&t.coefficients[&cpi.n + 2..2 * (&cpi.n + 2)]); + let p_t_hi = Polynomial::new(&t.coefficients[2 * (&cpi.n + 2)..3 * (&cpi.n + 2)]); + + let b_0 = self.random_generator.generate(); + let b_1 = self.random_generator.generate(); + + let p_t_lo = &p_t_lo + &b_0 * Polynomial::new_monomial(FieldElement::one(), cpi.n + 2); + let p_t_mid = + &p_t_mid - b_0 + &b_1 * Polynomial::new_monomial(FieldElement::one(), cpi.n + 2); + let p_t_hi = &p_t_hi - b_1; + + let t_lo_1 = self.commitment_scheme.commit(&p_t_lo); + let t_mid_1 = self.commitment_scheme.commit(&p_t_mid); + let t_hi_1 = self.commitment_scheme.commit(&p_t_hi); + + Round3Result { + t_lo_1, + t_mid_1, + t_hi_1, + p_t_lo, + p_t_mid, + p_t_hi, + alpha, + } + } + + fn round_4( + &self, + CommonPreprocessedInput { s1, s2, omega, .. }: &CommonPreprocessedInput, + Round1Result { p_a, p_b, p_c, .. }: &Round1Result, + Round2Result { p_z, .. }: &Round2Result, + zeta: FieldElement, + ) -> Round4Result { + let a_zeta = p_a.evaluate(&zeta); + let b_zeta = p_b.evaluate(&zeta); + let c_zeta = p_c.evaluate(&zeta); + let s1_zeta = s1.evaluate(&zeta); + let s2_zeta = s2.evaluate(&zeta); + let z_zeta_omega = p_z.evaluate(&(&zeta * omega)); + Round4Result { + a_zeta, + b_zeta, + c_zeta, + s1_zeta, + s2_zeta, + z_zeta_omega, + zeta, + } + } + + fn round_5( + &self, + common_preprocessed_input: &CommonPreprocessedInput, + round_1: &Round1Result, + round_2: &Round2Result, + round_3: &Round3Result, + round_4: &Round4Result, + upsilon: FieldElement, + ) -> Round5Result { + let cpi = common_preprocessed_input; + let (r1, r2, r3, r4) = (round_1, round_2, round_3, round_4); + // Precompute variables + let k2 = &cpi.k1 * &cpi.k1; + let zeta_raised_n = Polynomial::new_monomial(r4.zeta.pow(cpi.n + 2), 0); // TODO: Paper says n and 2n, but Gnark uses n+2 and 2n+4 + let zeta_raised_2n = Polynomial::new_monomial(r4.zeta.pow(2 * cpi.n + 4), 0); + + let l1_zeta = (&r4.zeta.pow(cpi.n as u64) - FieldElement::one()) + / (&r4.zeta - FieldElement::one()) + / FieldElement::from(cpi.n as u64); + + let mut p_non_constant = &cpi.qm * &r4.a_zeta * &r4.b_zeta + + &r4.a_zeta * &cpi.ql + + &r4.b_zeta * &cpi.qr + + &r4.c_zeta * &cpi.qo + + &cpi.qc; + + let r_2_1 = (&r4.a_zeta + &r2.beta * &r4.zeta + &r2.gamma) + * (&r4.b_zeta + &r2.beta * &cpi.k1 * &r4.zeta + &r2.gamma) + * (&r4.c_zeta + &r2.beta * &k2 * &r4.zeta + &r2.gamma) + * &r2.p_z; + let r_2_2 = (&r4.a_zeta + &r2.beta * &r4.s1_zeta + &r2.gamma) + * (&r4.b_zeta + &r2.beta * &r4.s2_zeta + &r2.gamma) + * &r2.beta + * &r4.z_zeta_omega + * &cpi.s3; + p_non_constant = p_non_constant + (r_2_2 - r_2_1) * &r3.alpha; + + let r_3 = &r2.p_z * l1_zeta; + p_non_constant = p_non_constant + (r_3 * &r3.alpha * &r3.alpha); + + let partial_t = &r3.p_t_lo + zeta_raised_n * &r3.p_t_mid + zeta_raised_2n * &r3.p_t_hi; + + // TODO: Refactor to remove clones. + let polynomials = vec![ + partial_t, + p_non_constant, + r1.p_a.clone(), + r1.p_b.clone(), + r1.p_c.clone(), + cpi.s1.clone(), + cpi.s2.clone(), + ]; + let ys: Vec> = polynomials.iter().map(|p| p.evaluate(&r4.zeta)).collect(); + let w_zeta_1 = self + .commitment_scheme + .open_batch(&r4.zeta, &ys, &polynomials, &upsilon); + + let w_zeta_omega_1 = + self.commitment_scheme + .open(&(&r4.zeta * &cpi.omega), &r4.z_zeta_omega, &r2.p_z); + + Round5Result { + w_zeta_1, + w_zeta_omega_1, + p_non_constant_zeta: ys[1].clone(), + t_zeta: ys[0].clone(), + } + } + + pub fn prove( + &self, + witness: &Witness, + public_input: &[FieldElement], + common_preprocessed_input: &CommonPreprocessedInput, + vk: &VerificationKey, + ) -> Proof { + let mut transcript = new_strong_fiat_shamir_transcript::(vk, public_input); + + // Round 1 + let round_1 = self.round_1(witness, common_preprocessed_input); + transcript.append(&round_1.a_1.serialize()); + transcript.append(&round_1.b_1.serialize()); + transcript.append(&round_1.c_1.serialize()); + + // Round 2 + // TODO: Handle error + let beta = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + let gamma = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + + let round_2 = self.round_2(witness, common_preprocessed_input, beta, gamma); + transcript.append(&round_2.z_1.serialize()); + + // Round 3 + let alpha = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + let round_3 = self.round_3( + common_preprocessed_input, + public_input, + &round_1, + &round_2, + alpha, + ); + transcript.append(&round_3.t_lo_1.serialize()); + transcript.append(&round_3.t_mid_1.serialize()); + transcript.append(&round_3.t_hi_1.serialize()); + + // Round 4 + let zeta = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + let round_4 = self.round_4(common_preprocessed_input, &round_1, &round_2, zeta); + + transcript.append(&round_4.a_zeta.to_bytes_be()); + transcript.append(&round_4.b_zeta.to_bytes_be()); + transcript.append(&round_4.c_zeta.to_bytes_be()); + transcript.append(&round_4.s1_zeta.to_bytes_be()); + transcript.append(&round_4.s2_zeta.to_bytes_be()); + transcript.append(&round_4.z_zeta_omega.to_bytes_be()); + + // Round 5 + let upsilon = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + let round_5 = self.round_5( + common_preprocessed_input, + &round_1, + &round_2, + &round_3, + &round_4, + upsilon, + ); + + Proof { + a_1: round_1.a_1, + b_1: round_1.b_1, + c_1: round_1.c_1, + z_1: round_2.z_1, + t_lo_1: round_3.t_lo_1, + t_mid_1: round_3.t_mid_1, + t_hi_1: round_3.t_hi_1, + a_zeta: round_4.a_zeta, + b_zeta: round_4.b_zeta, + c_zeta: round_4.c_zeta, + s1_zeta: round_4.s1_zeta, + s2_zeta: round_4.s2_zeta, + z_zeta_omega: round_4.z_zeta_omega, + w_zeta_1: round_5.w_zeta_1, + w_zeta_omega_1: round_5.w_zeta_omega_1, + p_non_constant_zeta: round_5.p_non_constant_zeta, + t_zeta: round_5.t_zeta, + } + } +} + +#[cfg(test)] +mod tests { + use lambdaworks_math::{ + cyclic_group::IsGroup, + elliptic_curve::{ + short_weierstrass::{ + curves::bls12_381::{curve::BLS12381Curve, default_types::FrElement}, + point::ShortWeierstrassProjectivePoint, + }, + traits::IsEllipticCurve, + }, + }; + + use crate::{ + test_utils::circuit_1::{test_common_preprocessed_input_1, test_witness_1}, + test_utils::utils::{test_srs, FpElement, TestRandomFieldGenerator, KZG}, + }; + + use super::*; + + fn alpha() -> FrElement { + FrElement::from_hex_unchecked( + "583cfb0df2ef98f2131d717bc6aadd571c5302597c135cab7c00435817bf6e50", + ) + } + + fn beta() -> FrElement { + FrElement::from_hex_unchecked( + "bdda7414bdf5bf42b77cbb3af4a82f32ec7622dd6c71575bede021e6e4609d4", + ) + } + + fn gamma() -> FrElement { + FrElement::from_hex_unchecked( + "58f6690d9b36e62e4a0aef27612819288df2a3ff5bf01597cf06779503f51583", + ) + } + + fn zeta() -> FrElement { + FrElement::from_hex_unchecked( + "2a4040abb941ee5e2a42602a7a60d282a430a4cf099fa3bb0ba8f4da628ec59a", + ) + } + + fn upsilon() -> FrElement { + FrElement::from_hex_unchecked( + "2d15959489a2a8e44693221ca7cbdcab15253d6bae9fd7fe0664cff02fe4f1cf", + ) + } + + #[test] + fn test_round_1() { + let witness = test_witness_1(FrElement::from(2), FrElement::from(2)); + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + let kzg = KZG::new(srs); + let random_generator = TestRandomFieldGenerator {}; + + let prover = Prover::new(kzg, random_generator); + let round_1 = prover.round_1(&witness, &common_preprocessed_input); + let a_1_expected = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb"), + FpElement::from_hex_unchecked("114d1d6855d545a8aa7d76c8cf2e21f267816aef1db507c96655b9d5caac42364e6f38ba0ecb751bad54dcd6b939c2ca"), + ).unwrap(); + let b_1_expected = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("44ed7c3ed015c6a39c350cd06d03b48d3e1f5eaf7a256c5b6203886e6e78cd9b76623d163da4dfb0f2491e7cc06408"), + FpElement::from_hex_unchecked("14c4464d2556fdfdc8e31068ef8d953608e511569a236c825f2ddab4fe04af03aba29e38b9b2b6221243124d235f4c67"), + ).unwrap(); + let c_1_expected = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("7726dc031bd26122395153ca428d5e6dea0a64c1f9b3b1bb2f2508a5eb6ea0ea0363294fad3160858bc87e46d3422fd"), + FpElement::from_hex_unchecked("8db0c15bfd77df7fe66284c3b04e6043eaba99ef6a845d4f7255fd0da95f2fb8e474df2e7f8e1a38829f7a9612a9b87"), + ).unwrap(); + assert_eq!(round_1.a_1, a_1_expected); + assert_eq!(round_1.b_1, b_1_expected); + assert_eq!(round_1.c_1, c_1_expected); + } + + #[test] + fn test_round_2() { + let witness = test_witness_1(FrElement::from(2), FrElement::from(2)); + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + let kzg = KZG::new(srs); + let random_generator = TestRandomFieldGenerator {}; + let prover = Prover::new(kzg, random_generator); + + let result_2 = prover.round_2(&witness, &common_preprocessed_input, beta(), gamma()); + let z_1_expected = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("3e8322968c3496cf1b5786d4d71d158a646ec90c14edf04e758038e1f88dcdfe8443fcecbb75f3074a872a380391742"), + FpElement::from_hex_unchecked("11eac40d09796ff150004e7b858d83ddd9fe995dced0b3fbd7535d6e361729b25d488799da61fdf1d7b5022684053327"), + ).unwrap(); + assert_eq!(result_2.z_1, z_1_expected); + } + + #[test] + fn test_round_3() { + let witness = test_witness_1(FrElement::from(2), FrElement::from(2)); + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + let kzg = KZG::new(srs); + let public_input = vec![FieldElement::from(2_u64), FieldElement::from(4)]; + let random_generator = TestRandomFieldGenerator {}; + let prover = Prover::new(kzg, random_generator); + let round_1 = prover.round_1(&witness, &common_preprocessed_input); + let round_2 = prover.round_2(&witness, &common_preprocessed_input, beta(), gamma()); + let round_3 = prover.round_3( + &common_preprocessed_input, + &public_input, + &round_1, + &round_2, + alpha(), + ); + + let t_lo_1_expected = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("9f511a769e77e87537b0749d65f467532fbf0f9dc1bcc912c333741be9d0a613f61e5fe595996964646ce30794701e5"), + FpElement::from_hex_unchecked("89fd6bb571323912210517237d6121144fc01ba2756f47c12c9cc94fc9197313867d68530f152dc8d447f10fcf75a6c"), + ).unwrap(); + let t_mid_1_expected = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("f96d8a93f3f5be2ab2819891f41c9f883cacea63da423e6ed1701765fcd659fc11e056a48c554f5df3a9c6603d48ca8"), + FpElement::from_hex_unchecked("14fa74fa049b7276007b739f3b8cfeac09e8cfabd4f858b6b99798c81124c34851960bebda90133cb03c981c08c8b6d3"), + ).unwrap(); + let t_hi_1_expected = ShortWeierstrassProjectivePoint::::neutral_element(); + + assert_eq!(round_3.t_lo_1, t_lo_1_expected); + assert_eq!(round_3.t_mid_1, t_mid_1_expected); + assert_eq!(round_3.t_hi_1, t_hi_1_expected); + } + + #[test] + fn test_round_4() { + let witness = test_witness_1(FrElement::from(2), FrElement::from(2)); + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + let kzg = KZG::new(srs); + let random_generator = TestRandomFieldGenerator {}; + let prover = Prover::new(kzg, random_generator); + + let round_1 = prover.round_1(&witness, &common_preprocessed_input); + let round_2 = prover.round_2(&witness, &common_preprocessed_input, beta(), gamma()); + + let round_4 = prover.round_4(&common_preprocessed_input, &round_1, &round_2, zeta()); + let expected_a_value = FrElement::from_hex_unchecked( + "2c090a95b57f1f493b7b747bba34fef7772fd72f97d718ed69549641a823eb2e", + ); + let expected_b_value = FrElement::from_hex_unchecked( + "5975959d91369ba4e7a03c6ae94b7fe98e8b61b7bf9af63c8ae0759e17ac0c7e", + ); + let expected_c_value = FrElement::from_hex_unchecked( + "6bf31edeb4344b7d2df2cb1bd40b4d13e182d9cb09f89591fa043c1a34b4a93", + ); + let expected_z_value = FrElement::from_hex_unchecked( + "38e2ec8e7c3dab29e2b8e9c8ea152914b8fe4612e91f2902c80238efcf21f4ee", + ); + let expected_s1_value = FrElement::from_hex_unchecked( + "472f66db4fb6947d9ed9808241fe82324bc08aa2a54be93179db8e564e1137d4", + ); + let expected_s2_value = FrElement::from_hex_unchecked( + "5588f1239c24efe0538868d0f716984e69c6980e586864f615e4b0621fdc6f81", + ); + + assert_eq!(round_4.a_zeta, expected_a_value); + assert_eq!(round_4.b_zeta, expected_b_value); + assert_eq!(round_4.c_zeta, expected_c_value); + assert_eq!(round_4.z_zeta_omega, expected_z_value); + assert_eq!(round_4.s1_zeta, expected_s1_value); + assert_eq!(round_4.s2_zeta, expected_s2_value); + } + + #[test] + fn test_round_5() { + let witness = test_witness_1(FrElement::from(2), FrElement::from(2)); + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + let kzg = KZG::new(srs); + let public_input = vec![FieldElement::from(2_u64), FieldElement::from(4)]; + let random_generator = TestRandomFieldGenerator {}; + let prover = Prover::new(kzg, random_generator); + + let round_1 = prover.round_1(&witness, &common_preprocessed_input); + let round_2 = prover.round_2(&witness, &common_preprocessed_input, beta(), gamma()); + + let round_3 = prover.round_3( + &common_preprocessed_input, + &public_input, + &round_1, + &round_2, + alpha(), + ); + + let round_4 = prover.round_4(&common_preprocessed_input, &round_1, &round_2, zeta()); + + let expected_w_zeta_1 = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("fa6250b80a418f0548b132ac264ff9915b2076c0c2548da9316ae19ffa35bbcf905d9f02f9274739608045ef83a4757"), + FpElement::from_hex_unchecked("17713ade2dbd66e923d4092a5d2da98202959dd65a15e9f7791fab3c0dd08788aa9b4a1cb21d04e0c43bd29225472145"), + ).unwrap(); + let expected_w_zeta_omega_1 = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("4484f08f8eaccf28bab8ee9539e6e7f4059cb1ce77b9b18e9e452f387163dc0b845f4874bf6445399e650d362799ff5"), + FpElement::from_hex_unchecked("1254347a0fa2ac856917825a5cff5f9583d39a52edbc2be5bb10fabd0c04d23019bcb963404345743120310fd734a61a"), + ).unwrap(); + + let round_5 = prover.round_5( + &common_preprocessed_input, + &round_1, + &round_2, + &round_3, + &round_4, + upsilon(), + ); + assert_eq!(round_5.w_zeta_1, expected_w_zeta_1); + assert_eq!(round_5.w_zeta_omega_1, expected_w_zeta_omega_1); + } +} diff --git a/provers/plonk/src/setup.rs b/provers/plonk/src/setup.rs new file mode 100644 index 000000000..974c50e3f --- /dev/null +++ b/provers/plonk/src/setup.rs @@ -0,0 +1,218 @@ +use std::collections::HashMap; + +use crate::constraint_system::{get_permutation, ConstraintSystem, Variable}; +use crate::test_utils::utils::{generate_domain, generate_permutation_coefficients}; +use lambdaworks_crypto::commitments::traits::IsCommitmentScheme; +use lambdaworks_crypto::fiat_shamir::default_transcript::DefaultTranscript; +use lambdaworks_crypto::fiat_shamir::transcript::Transcript; +use lambdaworks_math::fft::polynomial::FFTPoly; +use lambdaworks_math::field::traits::IsFFTField; +use lambdaworks_math::field::{element::FieldElement, traits::IsField}; +use lambdaworks_math::polynomial::Polynomial; +use lambdaworks_math::traits::{ByteConversion, Serializable}; + +// TODO: implement getters +pub struct Witness { + pub a: Vec>, + pub b: Vec>, + pub c: Vec>, +} + +impl Witness { + pub fn new(values: HashMap>, system: &ConstraintSystem) -> Self { + let (lro, _) = system.to_matrices(); + let abc: Vec<_> = lro.iter().map(|v| values[v].clone()).collect(); + let n = lro.len() / 3; + + Self { + a: abc[..n].to_vec(), + b: abc[n..2 * n].to_vec(), + c: abc[2 * n..].to_vec(), + } + } +} + +// TODO: implement getters +#[derive(Clone)] +pub struct CommonPreprocessedInput { + pub n: usize, + /// Number of constraints + pub domain: Vec>, + pub omega: FieldElement, + pub k1: FieldElement, + + pub ql: Polynomial>, + pub qr: Polynomial>, + pub qo: Polynomial>, + pub qm: Polynomial>, + pub qc: Polynomial>, + + pub s1: Polynomial>, + pub s2: Polynomial>, + pub s3: Polynomial>, + + pub s1_lagrange: Vec>, + pub s2_lagrange: Vec>, + pub s3_lagrange: Vec>, +} + +impl CommonPreprocessedInput { + pub fn from_constraint_system( + system: &ConstraintSystem, + order_r_minus_1_root_unity: &FieldElement, + ) -> Self { + let (lro, q) = system.to_matrices(); + let n = lro.len() / 3; + let omega = F::get_primitive_root_of_unity(n.trailing_zeros() as u64).unwrap(); + let domain = generate_domain(&omega, n); + + let m = q.len() / 5; + let ql: Vec<_> = q[..m].to_vec(); + let qr: Vec<_> = q[m..2 * m].to_vec(); + let qm: Vec<_> = q[2 * m..3 * m].to_vec(); + let qo: Vec<_> = q[3 * m..4 * m].to_vec(); + let qc: Vec<_> = q[4 * m..].to_vec(); + + let permutation = get_permutation(&lro); + let permuted = + generate_permutation_coefficients(&omega, n, &permutation, order_r_minus_1_root_unity); + + let s1_lagrange: Vec<_> = permuted[..n].to_vec(); + let s2_lagrange: Vec<_> = permuted[n..2 * n].to_vec(); + let s3_lagrange: Vec<_> = permuted[2 * n..].to_vec(); + + Self { + domain, + n, + omega, + k1: order_r_minus_1_root_unity.clone(), + ql: Polynomial::interpolate_fft(&ql).unwrap(), // TODO: Remove unwraps + qr: Polynomial::interpolate_fft(&qr).unwrap(), + qo: Polynomial::interpolate_fft(&qo).unwrap(), + qm: Polynomial::interpolate_fft(&qm).unwrap(), + qc: Polynomial::interpolate_fft(&qc).unwrap(), + s1: Polynomial::interpolate_fft(&s1_lagrange).unwrap(), + s2: Polynomial::interpolate_fft(&s2_lagrange).unwrap(), + s3: Polynomial::interpolate_fft(&s3_lagrange).unwrap(), + s1_lagrange, + s2_lagrange, + s3_lagrange, + } + } +} + +pub struct VerificationKey { + pub qm_1: G1Point, + pub ql_1: G1Point, + pub qr_1: G1Point, + pub qo_1: G1Point, + pub qc_1: G1Point, + + pub s1_1: G1Point, + pub s2_1: G1Point, + pub s3_1: G1Point, +} + +pub fn setup>( + common_input: &CommonPreprocessedInput, + commitment_scheme: &CS, +) -> VerificationKey { + VerificationKey { + qm_1: commitment_scheme.commit(&common_input.qm), + ql_1: commitment_scheme.commit(&common_input.ql), + qr_1: commitment_scheme.commit(&common_input.qr), + qo_1: commitment_scheme.commit(&common_input.qo), + qc_1: commitment_scheme.commit(&common_input.qc), + + s1_1: commitment_scheme.commit(&common_input.s1), + s2_1: commitment_scheme.commit(&common_input.s2), + s3_1: commitment_scheme.commit(&common_input.s3), + } +} + +pub fn new_strong_fiat_shamir_transcript( + vk: &VerificationKey, + public_input: &[FieldElement], +) -> DefaultTranscript +where + F: IsField, + FieldElement: ByteConversion, + CS: IsCommitmentScheme, + CS::Commitment: Serializable, +{ + let mut transcript = DefaultTranscript::new(); + + transcript.append(&vk.s1_1.serialize()); + transcript.append(&vk.s2_1.serialize()); + transcript.append(&vk.s3_1.serialize()); + transcript.append(&vk.ql_1.serialize()); + transcript.append(&vk.qr_1.serialize()); + transcript.append(&vk.qm_1.serialize()); + transcript.append(&vk.qo_1.serialize()); + transcript.append(&vk.qc_1.serialize()); + + for value in public_input.iter() { + transcript.append(&value.to_bytes_be()); + } + transcript +} + +#[cfg(test)] +mod tests { + use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrField; + use lambdaworks_math::elliptic_curve::{ + short_weierstrass::curves::bls12_381::curve::BLS12381Curve, traits::IsEllipticCurve, + }; + + use super::*; + use crate::test_utils::circuit_1::test_common_preprocessed_input_1; + use crate::test_utils::utils::{test_srs, FpElement, KZG}; + + #[test] + fn setup_works_for_simple_circuit() { + let common_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_input.n); + let kzg = KZG::new(srs); + + let vk = setup::(&common_input, &kzg); + + let expected_ql = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("1492341357755e31a6306abf3237f84f707ded7cb526b8ffd40901746234ef27f12bc91ef638e4977563db208b765f12"), + FpElement::from_hex_unchecked("ec3ff8288ea339010658334f494a614f7470c19a08d53a9cf5718e0613bb65d2cdbc1df374057d9b45c35cf1f1b5b72"), + ).unwrap(); + let expected_qr = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("107ab09b6b8c6fc55087aeb8045e17a6d016bdacbc64476264328e71f3e85a4eacaee34ee963e9c9249b6b1bc9653674"), + FpElement::from_hex_unchecked("f98e3fe5a53545b67a51da7e7a6cedc51af467abdefd644113fb97edf339aeaa5e2f6a5713725ec76754510b76a10be"), + ).unwrap(); + let expected_qo = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("9fd00baa112a0064ce5c3c2d243e657b25df8a2f237b91eec27e83157f6ca896a2401d07ec7d7d097d2f2a344e2018f"), + FpElement::from_hex_unchecked("15922cfa65972d80823c6bb9aeb0637c864b636267bfee2818413e9cdc5f7948575c4ce097bb8b9db8087c4ed5056592"), + ).unwrap(); + let expected_qm = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("9fd00baa112a0064ce5c3c2d243e657b25df8a2f237b91eec27e83157f6ca896a2401d07ec7d7d097d2f2a344e2018f"), + FpElement::from_hex_unchecked("46ee4efd3e8b919c8df3bfc949b495ade2be8228bc524974eef94041a517cdbc74fb31e1998746201f683b12afa4519"), + ).unwrap(); + + let expected_s1 = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("187ee12de08728650d18912aa9fe54863922a9eeb37e34ff43041f1d039f00028ad2cdf23705e6f6ab7ea9406535c1b0"), + FpElement::from_hex_unchecked("4f29051990de0d12b38493992845d9abcb48ef18239eca8b8228618c78ec371d39917bc0d45cf6dc4f79bd64baa9ae2") + ).unwrap(); + let expected_s2 = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("167c0384025887c01ea704234e813842a4acef7d765c3a94a5442ca685b4fc1d1b425ba7786a7413bd4a7d6a1eb5a35a"), + FpElement::from_hex_unchecked("12b644100c5d00af27c121806c4779f88e840ff3fdac44124b8175a303d586c4d910486f909b37dda1505c485f053da1") + ).unwrap(); + let expected_s3 = BLS12381Curve::create_point_from_affine( + FpElement::from_hex_unchecked("188fb6dba3cf5af8a7f6a44d935bb3dd2083a5beb4c020f581739ebc40659c824a4ca8279cf7d852decfbca572e4fa0e"), + FpElement::from_hex_unchecked("d84d52582fd95bfa7672f7cef9dd4d0b1b4a54d33f244fdb97df71c7d45fd5c5329296b633c9ed23b8475ee47b9d99") + ).unwrap(); + + assert_eq!(vk.ql_1, expected_ql); + assert_eq!(vk.qr_1, expected_qr); + assert_eq!(vk.qo_1, expected_qo); + assert_eq!(vk.qm_1, expected_qm); + + assert_eq!(vk.s1_1, expected_s1); + assert_eq!(vk.s2_1, expected_s2); + assert_eq!(vk.s3_1, expected_s3); + } +} diff --git a/provers/plonk/src/test_utils/circuit_1.rs b/provers/plonk/src/test_utils/circuit_1.rs new file mode 100644 index 000000000..bc95c8962 --- /dev/null +++ b/provers/plonk/src/test_utils/circuit_1.rs @@ -0,0 +1,116 @@ +use super::utils::{ + generate_domain, generate_permutation_coefficients, ORDER_R_MINUS_1_ROOT_UNITY, +}; +use crate::setup::{CommonPreprocessedInput, Witness}; +use lambdaworks_math::fft::polynomial::FFTPoly; +use lambdaworks_math::{ + elliptic_curve::short_weierstrass::curves::bls12_381::default_types::{FrElement, FrField}, + field::{element::FieldElement, traits::IsFFTField}, + polynomial::Polynomial, +}; + +pub const ORDER_4_ROOT_UNITY: FrElement = + FrElement::from_hex_unchecked("8d51ccce760304d0ec030002760300000001000000000000"); // order 4 + +/* Test circuit for the program: + public input x + public input y + private input e + z = x * e + assert y == z +*/ +pub fn test_common_preprocessed_input_1() -> CommonPreprocessedInput { + let n = 4; + let omega = FrField::get_primitive_root_of_unity(2).unwrap(); + let domain = generate_domain(&omega, n); + let permuted = generate_permutation_coefficients( + &omega, + n, + &[11, 3, 0, 1, 2, 4, 6, 10, 5, 8, 7, 9], + &ORDER_R_MINUS_1_ROOT_UNITY, + ); + + let s1_lagrange: Vec = permuted[..4].to_vec(); + let s2_lagrange: Vec = permuted[4..8].to_vec(); + let s3_lagrange: Vec = permuted[8..].to_vec(); + + CommonPreprocessedInput { + n, + omega, + domain, + k1: ORDER_R_MINUS_1_ROOT_UNITY, + // domain: domain.clone(), + ql: Polynomial::interpolate_fft(&[ + -FieldElement::one(), + -FieldElement::one(), + FieldElement::zero(), + FieldElement::one(), + ]) + .unwrap(), + + qr: Polynomial::interpolate_fft(&[ + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + -FieldElement::one(), + ]) + .unwrap(), + + qo: Polynomial::interpolate_fft(&[ + FieldElement::zero(), + FieldElement::zero(), + -FieldElement::one(), + FieldElement::zero(), + ]) + .unwrap(), + + qm: Polynomial::interpolate_fft(&[ + FieldElement::zero(), + FieldElement::zero(), + FieldElement::one(), + FieldElement::zero(), + ]) + .unwrap(), + + qc: Polynomial::interpolate_fft(&[ + FieldElement::from(0_u64), + FieldElement::from(0_u64), + FieldElement::zero(), + FieldElement::zero(), + ]) + .unwrap(), + + s1: Polynomial::interpolate_fft(&s1_lagrange).unwrap(), + s2: Polynomial::interpolate_fft(&s2_lagrange).unwrap(), + s3: Polynomial::interpolate_fft(&s3_lagrange).unwrap(), + + s1_lagrange, + s2_lagrange, + s3_lagrange, + } +} + +pub fn test_witness_1(x: FrElement, e: FrElement) -> Witness { + let y = &x * &e; + let empty = x.clone(); + Witness { + a: vec![ + x.clone(), // Public input + y.clone(), // Public input + x.clone(), // LHS for multiplication + y, // LHS for == + ], + b: vec![ + empty.clone(), + empty.clone(), + e.clone(), // RHS for multiplication + &x * &e, // RHS for == + ], + c: vec![ + empty.clone(), + empty.clone(), + &x * &e, // Output of multiplication + empty, + ], + } +} diff --git a/provers/plonk/src/test_utils/circuit_2.rs b/provers/plonk/src/test_utils/circuit_2.rs new file mode 100644 index 000000000..b47eafbef --- /dev/null +++ b/provers/plonk/src/test_utils/circuit_2.rs @@ -0,0 +1,157 @@ +use super::utils::{ + generate_domain, generate_permutation_coefficients, ORDER_R_MINUS_1_ROOT_UNITY, +}; +use crate::setup::{CommonPreprocessedInput, Witness}; +use lambdaworks_math::{ + elliptic_curve::short_weierstrass::curves::bls12_381::default_types::{FrElement, FrField}, + field::{element::FieldElement, traits::IsFFTField}, + polynomial::Polynomial, +}; + +pub const ORDER_8_ROOT_UNITY: FrElement = FrElement::from_hex_unchecked( + "345766f603fa66e78c0625cd70d77ce2b38b21c28713b7007228fd3397743f7a", +); // order 8 + +/* Test circuit for the program: + public input x + public input y + private input e + z1 = x * e + z2 = z1 + 5 + assert y == z2 +*/ +pub fn test_common_preprocessed_input_2() -> CommonPreprocessedInput { + let n: usize = 8; + let omega = FrField::get_primitive_root_of_unity(3).unwrap(); + let domain = generate_domain(&omega, n); + let permutation = &[ + 23, 4, 0, 18, 1, 2, 5, 6, 7, 8, 10, 9, 19, 11, 13, 14, 15, 16, 3, 12, 17, 20, 21, 22, + ]; + let permuted = + generate_permutation_coefficients(&omega, n, permutation, &ORDER_R_MINUS_1_ROOT_UNITY); + + let s1_lagrange: Vec = permuted[..8].to_vec(); + let s2_lagrange: Vec = permuted[8..16].to_vec(); + let s3_lagrange: Vec = permuted[16..].to_vec(); + + CommonPreprocessedInput { + n, + omega, + k1: ORDER_R_MINUS_1_ROOT_UNITY, + domain: domain.clone(), + + ql: Polynomial::interpolate( + &domain, + &[ + -FieldElement::one(), + -FieldElement::one(), + FieldElement::zero(), + FieldElement::one(), + FieldElement::one(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + ], + ) + .unwrap(), + qr: Polynomial::interpolate( + &domain, + &[ + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + -FieldElement::one(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + ], + ) + .unwrap(), + qo: Polynomial::interpolate( + &domain, + &[ + FieldElement::zero(), + FieldElement::zero(), + -FieldElement::one(), + -FieldElement::one(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + ], + ) + .unwrap(), + qm: Polynomial::interpolate( + &domain, + &[ + FieldElement::zero(), + FieldElement::zero(), + FieldElement::one(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + ], + ) + .unwrap(), + qc: Polynomial::interpolate( + &domain, + &[ + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::from(5_u64), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + FieldElement::zero(), + ], + ) + .unwrap(), + + s1: Polynomial::interpolate(&domain, &s1_lagrange).unwrap(), + s2: Polynomial::interpolate(&domain, &s2_lagrange).unwrap(), + s3: Polynomial::interpolate(&domain, &s3_lagrange).unwrap(), + + s1_lagrange, + s2_lagrange, + s3_lagrange, + } +} + +pub fn test_witness_2(x: FrElement, e: FrElement) -> Witness { + Witness { + a: vec![ + x.clone(), + &x * &e + FieldElement::from(5_u64), + x.clone(), + &x * &e, + &x * &e + FieldElement::from(5_u64), + x.clone(), + x.clone(), + x.clone(), + ], + b: vec![ + x.clone(), + x.clone(), + e.clone(), + x.clone(), + &x * &e + FieldElement::from(5_u64), + x.clone(), + x.clone(), + x.clone(), + ], + c: vec![ + x.clone(), + x.clone(), + &x * &e, + &x * &e + FieldElement::from(5_u64), + x.clone(), + x.clone(), + x.clone(), + x, + ], + } +} diff --git a/provers/plonk/src/test_utils/circuit_json.rs b/provers/plonk/src/test_utils/circuit_json.rs new file mode 100644 index 000000000..13aea759e --- /dev/null +++ b/provers/plonk/src/test_utils/circuit_json.rs @@ -0,0 +1,185 @@ +use super::utils::{ + generate_domain, generate_permutation_coefficients, ORDER_R_MINUS_1_ROOT_UNITY, +}; +use crate::setup::{CommonPreprocessedInput, Witness}; +use lambdaworks_math::fft::polynomial::FFTPoly; +use lambdaworks_math::field::traits::IsFFTField; +use lambdaworks_math::{ + elliptic_curve::short_weierstrass::curves::bls12_381::default_types::{FrElement, FrField}, + polynomial::Polynomial, +}; +use serde::{Deserialize, Serialize}; + +// The json exported in go comes with Uppercase in the first letter. +#[allow(non_snake_case)] +#[derive(Serialize, Deserialize)] +struct JsonPlonkCircuit { + N: usize, + N_Padded: usize, + Input: Vec, + Ql: Vec, + Qr: Vec, + Qm: Vec, + Qo: Vec, + Qc: Vec, + A: Vec, + B: Vec, + C: Vec, + Permutation: Vec, +} + +pub fn common_preprocessed_input_from_json( + json_string: &str, +) -> ( + Witness, + CommonPreprocessedInput, + Vec, +) { + let json_input: JsonPlonkCircuit = serde_json::from_str(json_string).unwrap(); + let n = json_input.N_Padded; + let omega = FrField::get_primitive_root_of_unity(n.trailing_zeros() as u64).unwrap(); + let domain = generate_domain(&omega, n); + let permuted = generate_permutation_coefficients( + &omega, + n, + &json_input.Permutation, + &ORDER_R_MINUS_1_ROOT_UNITY, + ); + + let pad = FrElement::from_hex_unchecked(&json_input.Input[0]); + + let s1_lagrange: Vec = permuted[..n].to_vec(); + let s2_lagrange: Vec = permuted[n..2 * n].to_vec(); + let s3_lagrange: Vec = permuted[2 * n..].to_vec(); + ( + Witness { + a: process_vector(json_input.A, &pad, n), + b: process_vector(json_input.B, &pad, n), + c: process_vector(json_input.C, &pad, n), + }, + CommonPreprocessedInput { + n, + domain, + omega, + k1: ORDER_R_MINUS_1_ROOT_UNITY, + ql: Polynomial::interpolate_fft(&process_vector(json_input.Ql, &FrElement::zero(), n)) + .unwrap(), + qr: Polynomial::interpolate_fft(&process_vector(json_input.Qr, &FrElement::zero(), n)) + .unwrap(), + qo: Polynomial::interpolate_fft(&process_vector(json_input.Qo, &FrElement::zero(), n)) + .unwrap(), + qm: Polynomial::interpolate_fft(&process_vector(json_input.Qm, &FrElement::zero(), n)) + .unwrap(), + qc: Polynomial::interpolate_fft(&process_vector(json_input.Qc, &FrElement::zero(), n)) + .unwrap(), + s1: Polynomial::interpolate_fft(&s1_lagrange).unwrap(), + s2: Polynomial::interpolate_fft(&s2_lagrange).unwrap(), + s3: Polynomial::interpolate_fft(&s3_lagrange).unwrap(), + s1_lagrange, + s2_lagrange, + s3_lagrange, + }, + convert_str_vec_to_frelement_vec(json_input.Input), + ) +} + +pub fn pad_vector<'a>( + v: &'a mut Vec, + p: &FrElement, + target_size: usize, +) -> &'a mut Vec { + v.append(&mut vec![p.clone(); target_size - v.len()]); + v +} + +fn convert_str_vec_to_frelement_vec(ss: Vec) -> Vec { + ss.iter() + .map(|s| FrElement::from_hex_unchecked(s)) + .collect() +} + +fn process_vector(vector: Vec, pad: &FrElement, n: usize) -> Vec { + pad_vector(&mut convert_str_vec_to_frelement_vec(vector), pad, n).to_owned() +} + +#[cfg(test)] +mod tests { + use super::common_preprocessed_input_from_json; + + #[test] + fn test_import_gnark_circuit_from_json() { + common_preprocessed_input_from_json( + r#"{ + "N": 4, + "N_Padded": 4, + "Omega": "8d51ccce760304d0ec030002760300000001000000000000", + "Input": [ + "2", + "4" + ], + "Ql": [ + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "0", + "1" + ], + "Qr": [ + "0", + "0", + "0", + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000" + ], + "Qm": [ + "0", + "0", + "1", + "0" + ], + "Qo": [ + "0", + "0", + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "0" + ], + "Qc": [ + "0", + "0", + "0", + "0" + ], + "A": [ + "2", + "4", + "2", + "4" + ], + "B": [ + "2", + "2", + "2", + "4" + ], + "C": [ + "2", + "2", + "4", + "2" + ], + "Permutation": [ + 11, + 3, + 2, + 1, + 0, + 4, + 5, + 10, + 6, + 8, + 7, + 9 + ] +}"#, + ); + } +} diff --git a/provers/plonk/src/test_utils/mod.rs b/provers/plonk/src/test_utils/mod.rs new file mode 100644 index 000000000..891980d8e --- /dev/null +++ b/provers/plonk/src/test_utils/mod.rs @@ -0,0 +1,8 @@ +/// A test circuit +pub mod circuit_1; +/// A test circuit +pub mod circuit_2; +/// Deserialize json to generate test circuits +pub mod circuit_json; +/// Useful tools to test plonk over different circuits +pub mod utils; diff --git a/provers/plonk/src/test_utils/utils.rs b/provers/plonk/src/test_utils/utils.rs new file mode 100644 index 000000000..cecd0f1b0 --- /dev/null +++ b/provers/plonk/src/test_utils/utils.rs @@ -0,0 +1,93 @@ +use lambdaworks_crypto::commitments::kzg::KateZaveruchaGoldberg; +use lambdaworks_crypto::commitments::kzg::StructuredReferenceString; +use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrElement; +use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::default_types::FrField; +use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::pairing::BLS12381AtePairing; +use lambdaworks_math::field::traits::IsField; +use lambdaworks_math::{ + cyclic_group::IsGroup, + elliptic_curve::{ + short_weierstrass::curves::bls12_381::{ + curve::BLS12381Curve, field_extension::BLS12381PrimeField, twist::BLS12381TwistCurve, + }, + traits::IsEllipticCurve, + }, + field::element::FieldElement, + traits::IsRandomFieldElementGenerator, +}; + +pub type Curve = BLS12381Curve; +pub type TwistedCurve = BLS12381TwistCurve; +pub type FpField = BLS12381PrimeField; +pub type FpElement = FieldElement; +pub type Pairing = BLS12381AtePairing; +pub type KZG = KateZaveruchaGoldberg; +pub const ORDER_R_MINUS_1_ROOT_UNITY: FrElement = FrElement::from_hex_unchecked("7"); + +pub type G1Point = ::PointRepresentation; +pub type G2Point = ::PointRepresentation; + +/// Generates a test SRS for the BLS12381 curve +/// n is the number of constraints in the system. +pub fn test_srs(n: usize) -> StructuredReferenceString { + let s = FrElement::from(2); + let g1 = ::generator(); + let g2 = ::generator(); + + let powers_main_group: Vec = (0..n + 3) + .map(|exp| g1.operate_with_self(s.pow(exp as u64).representative())) + .collect(); + let powers_secondary_group = [g2.clone(), g2.operate_with_self(s.representative())]; + + StructuredReferenceString::new(&powers_main_group, &powers_secondary_group) +} + +/// Generates a domain to interpolate: 1, omega, omega², ..., omega^size +pub fn generate_domain(omega: &FieldElement, size: usize) -> Vec> { + (1..size).fold(vec![FieldElement::one()], |mut acc, _| { + acc.push(acc.last().unwrap() * omega); + acc + }) +} + +/// Generates the permutation coefficients for the copy constraints. +/// polynomials S1, S2, S3. +pub fn generate_permutation_coefficients( + omega: &FieldElement, + n: usize, + permutation: &[usize], + order_r_minus_1_root_unity: &FieldElement, +) -> Vec> { + let identity = identity_permutation(omega, n, order_r_minus_1_root_unity); + let permuted: Vec> = (0..n * 3) + .map(|i| identity[permutation[i]].clone()) + .collect(); + permuted +} + +/// The identity permutation, auxiliary function to generate the copy constraints. +fn identity_permutation( + w: &FieldElement, + n: usize, + order_r_minus_1_root_unity: &FieldElement, +) -> Vec> { + let u = order_r_minus_1_root_unity; + let mut result: Vec> = vec![]; + for index_column in 0..=2 { + for index_row in 0..n { + result.push(w.pow(index_row) * u.pow(index_column as u64)); + } + } + result +} + +/// A mock of a random number generator, to have deterministic tests. +/// When set to zero, there is no zero knowledge applied, because it is used +/// to get random numbers to blind polynomials. +#[derive(Clone)] +pub struct TestRandomFieldGenerator; +impl IsRandomFieldElementGenerator for TestRandomFieldGenerator { + fn generate(&self) -> FrElement { + FrElement::zero() + } +} diff --git a/provers/plonk/src/verifier.rs b/provers/plonk/src/verifier.rs new file mode 100644 index 000000000..6da8e83d0 --- /dev/null +++ b/provers/plonk/src/verifier.rs @@ -0,0 +1,423 @@ +use lambdaworks_crypto::commitments::traits::IsCommitmentScheme; +use lambdaworks_crypto::fiat_shamir::transcript::Transcript; +use lambdaworks_math::cyclic_group::IsGroup; +use lambdaworks_math::field::element::FieldElement; +use lambdaworks_math::field::traits::{IsFFTField, IsField, IsPrimeField}; +use lambdaworks_math::traits::{ByteConversion, Serializable}; +use std::marker::PhantomData; + +use crate::prover::Proof; +use crate::setup::{new_strong_fiat_shamir_transcript, CommonPreprocessedInput, VerificationKey}; + +pub struct Verifier> { + commitment_scheme: CS, + phantom: PhantomData, +} + +impl> Verifier { + pub fn new(commitment_scheme: CS) -> Self { + Self { + commitment_scheme, + phantom: PhantomData, + } + } + + fn compute_challenges( + &self, + p: &Proof, + vk: &VerificationKey, + public_input: &[FieldElement], + ) -> [FieldElement; 5] + where + F: IsField, + CS: IsCommitmentScheme, + CS::Commitment: Serializable, + FieldElement: ByteConversion, + { + let mut transcript = new_strong_fiat_shamir_transcript::(vk, public_input); + + transcript.append(&p.a_1.serialize()); + transcript.append(&p.b_1.serialize()); + transcript.append(&p.c_1.serialize()); + let beta = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + let gamma = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + + transcript.append(&p.z_1.serialize()); + let alpha = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + + transcript.append(&p.t_lo_1.serialize()); + transcript.append(&p.t_mid_1.serialize()); + transcript.append(&p.t_hi_1.serialize()); + let zeta = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + + transcript.append(&p.a_zeta.to_bytes_be()); + transcript.append(&p.b_zeta.to_bytes_be()); + transcript.append(&p.c_zeta.to_bytes_be()); + transcript.append(&p.s1_zeta.to_bytes_be()); + transcript.append(&p.s2_zeta.to_bytes_be()); + transcript.append(&p.z_zeta_omega.to_bytes_be()); + let upsilon = FieldElement::from_bytes_be(&transcript.challenge()).unwrap(); + + [beta, gamma, alpha, zeta, upsilon] + } + + pub fn verify( + &self, + p: &Proof, + public_input: &[FieldElement], + input: &CommonPreprocessedInput, + vk: &VerificationKey, + ) -> bool + where + F: IsPrimeField + IsFFTField, + CS: IsCommitmentScheme, + CS::Commitment: Serializable + IsGroup, + FieldElement: ByteConversion, + { + // TODO: First three steps are validations: belonging to main subgroup, belonging to prime field. + let [beta, gamma, alpha, zeta, upsilon] = self.compute_challenges(p, vk, public_input); + let zh_zeta = zeta.pow(input.n) - FieldElement::one(); + + let k1 = &input.k1; + let k2 = k1 * k1; + + let l1_zeta = (zeta.pow(input.n as u64) - FieldElement::one()) + / (&zeta - FieldElement::one()) + / FieldElement::from(input.n as u64); + + // Use the following equality to compute PI(ζ) + // without interpolating: + // Lᵢ₊₁ = ω Lᵢ (X − ωⁱ) / (X − ωⁱ⁺¹) + // Here Lᵢ is the i-th polynomial of the Lagrange basis. + let p_pi_zeta = if public_input.is_empty() { + FieldElement::zero() + } else { + let mut p_pi_zeta = &l1_zeta * &public_input[0]; + let mut li_zeta = l1_zeta.clone(); + for (i, value) in public_input.iter().enumerate().skip(1) { + li_zeta = &input.omega + * &li_zeta + * ((&zeta - &input.domain[i - 1]) / (&zeta - &input.domain[i])); + p_pi_zeta = &p_pi_zeta + value * &li_zeta; + } + p_pi_zeta + }; + + let mut p_constant_zeta = &alpha + * &p.z_zeta_omega + * (&p.c_zeta + &gamma) + * (&p.a_zeta + &beta * &p.s1_zeta + &gamma) + * (&p.b_zeta + &beta * &p.s2_zeta + &gamma); + p_constant_zeta = p_constant_zeta - &l1_zeta * &alpha * α + p_constant_zeta += p_pi_zeta; + + let p_zeta = p_constant_zeta + &p.p_non_constant_zeta; + + let constraints_check = p_zeta - (&zh_zeta * &p.t_zeta) == FieldElement::zero(); + + // Compute commitment of partial evaluation of t (p = zh * t) + let partial_t_1 = p + .t_lo_1 + .operate_with( + &p.t_mid_1 + .operate_with_self(zeta.pow(input.n + 2).representative()), + ) + .operate_with( + &p.t_hi_1 + .operate_with_self(zeta.pow(2 * input.n + 4).representative()), + ); + + // Compute commitment of the non constant part of the linearization of p + // The first term corresponds to the gates constraints + let mut first_term = vk + .qm_1 + .operate_with_self((&p.a_zeta * &p.b_zeta).representative()); + first_term = first_term.operate_with(&vk.ql_1.operate_with_self(p.a_zeta.representative())); + first_term = first_term.operate_with(&vk.qr_1.operate_with_self(p.b_zeta.representative())); + first_term = first_term.operate_with(&vk.qo_1.operate_with_self(p.c_zeta.representative())); + first_term = first_term.operate_with(&vk.qc_1); + + // Second and third terms correspond to copy constraints + // + α*((l(ζ)+β*s1(ζ)+γ)*(r(ζ)+β*s2(ζ)+γ)*Z(μζ)*β*s3(X) - Z(X)*(l(ζ)+β*id1(ζ)+γ)*(r(ζ)+β*id2(ζ)+γ)*(o(ζ)+β*id3(ζ)+γ)) + let z_coefficient = -(&p.a_zeta + &beta * &zeta + &gamma) + * (&p.b_zeta + &beta * k1 * &zeta + &gamma) + * (&p.c_zeta + &beta * k2 * &zeta + &gamma); + let s3_coefficient = (&p.a_zeta + &beta * &p.s1_zeta + &gamma) + * (&p.b_zeta + &beta * &p.s2_zeta + &gamma) + * beta + * &p.z_zeta_omega; + let second_term = p + .z_1 + .operate_with_self(z_coefficient.representative()) + .operate_with(&vk.s3_1.operate_with_self(s3_coefficient.representative())) + .operate_with_self(alpha.representative()); + // α²*L₁(ζ)*Z(X) + let third_term = p + .z_1 + .operate_with_self((&alpha * &alpha * l1_zeta).representative()); + + let p_non_constant_1 = first_term + .operate_with(&second_term) + .operate_with(&third_term); + + let ys = [ + p.t_zeta.clone(), + p.p_non_constant_zeta.clone(), + p.a_zeta.clone(), + p.b_zeta.clone(), + p.c_zeta.clone(), + p.s1_zeta.clone(), + p.s2_zeta.clone(), + ]; + let commitments = [ + partial_t_1, + p_non_constant_1, + p.a_1.clone(), + p.b_1.clone(), + p.c_1.clone(), + vk.s1_1.clone(), + vk.s2_1.clone(), + ]; + let batch_openings_check = + self.commitment_scheme + .verify_batch(&zeta, &ys, &commitments, &p.w_zeta_1, &upsilon); + + let single_opening_check = self.commitment_scheme.verify( + &(zeta * &input.omega), + &p.z_zeta_omega, + &p.z_1, + &p.w_zeta_omega_1, + ); + + constraints_check && batch_openings_check && single_opening_check + } +} + +#[cfg(test)] +mod tests { + use lambdaworks_math::traits::Deserializable; + + use super::*; + + use crate::{ + prover::Prover, + setup::setup, + test_utils::circuit_1::{test_common_preprocessed_input_1, test_witness_1}, + test_utils::circuit_2::{test_common_preprocessed_input_2, test_witness_2}, + test_utils::circuit_json::common_preprocessed_input_from_json, + test_utils::utils::{test_srs, TestRandomFieldGenerator, KZG}, + }; + + #[test] + fn test_happy_path_for_circuit_1() { + // This is the circuit for x * e == y + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + + // Public input + let x = FieldElement::from(4_u64); + let y = FieldElement::from(12_u64); + + // Private variable + let e = FieldElement::from(3_u64); + + let public_input = vec![x.clone(), y]; + let witness = test_witness_1(x, e); + + let kzg = KZG::new(srs); + let verifying_key = setup(&common_preprocessed_input, &kzg); + let random_generator = TestRandomFieldGenerator {}; + + let prover = Prover::new(kzg.clone(), random_generator); + let proof = prover.prove( + &witness, + &public_input, + &common_preprocessed_input, + &verifying_key, + ); + + let verifier = Verifier::new(kzg); + assert!(verifier.verify( + &proof, + &public_input, + &common_preprocessed_input, + &verifying_key + )); + } + + #[test] + fn test_happy_path_for_circuit_2() { + // This is the circuit for x * e + 5 == y + let common_preprocessed_input = test_common_preprocessed_input_2(); + let srs = test_srs(common_preprocessed_input.n); + + // Public input + let x = FieldElement::from(2_u64); + let y = FieldElement::from(11_u64); + + // Private variable + let e = FieldElement::from(3_u64); + + let public_input = vec![x.clone(), y]; + let witness = test_witness_2(x, e); + + let kzg = KZG::new(srs); + let verifying_key = setup(&common_preprocessed_input, &kzg); + let random_generator = TestRandomFieldGenerator {}; + + let prover = Prover::new(kzg.clone(), random_generator); + let proof = prover.prove( + &witness, + &public_input, + &common_preprocessed_input, + &verifying_key, + ); + + let verifier = Verifier::new(kzg); + assert!(verifier.verify( + &proof, + &public_input, + &common_preprocessed_input, + &verifying_key + )); + } + + #[test] + fn test_happy_path_from_json() { + let (witness, common_preprocessed_input, public_input) = + common_preprocessed_input_from_json( + r#"{ + "N": 4, + "N_Padded": 4, + "Omega": "8d51ccce760304d0ec030002760300000001000000000000", + "Input": [ + "2", + "4" + ], + "Ql": [ + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "0", + "1" + ], + "Qr": [ + "0", + "0", + "0", + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000" + ], + "Qm": [ + "0", + "0", + "1", + "0" + ], + "Qo": [ + "0", + "0", + "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000000", + "0" + ], + "Qc": [ + "0", + "0", + "0", + "0" + ], + "A": [ + "2", + "4", + "2", + "4" + ], + "B": [ + "2", + "2", + "2", + "4" + ], + "C": [ + "2", + "2", + "4", + "2" + ], + "Permutation": [ + 11, + 3, + 2, + 1, + 0, + 4, + 5, + 10, + 6, + 8, + 7, + 9 + ] + }"#, + ); + let srs = test_srs(common_preprocessed_input.n); + + let kzg = KZG::new(srs); + let verifying_key = setup(&common_preprocessed_input, &kzg); + let random_generator = TestRandomFieldGenerator {}; + + let prover = Prover::new(kzg.clone(), random_generator); + let proof = prover.prove( + &witness, + &public_input, + &common_preprocessed_input, + &verifying_key, + ); + + let verifier = Verifier::new(kzg); + assert!(verifier.verify( + &proof, + &public_input, + &common_preprocessed_input, + &verifying_key + )); + } + + #[test] + fn test_serialize_proof() { + // This is the circuit for x * e == y + let common_preprocessed_input = test_common_preprocessed_input_1(); + let srs = test_srs(common_preprocessed_input.n); + + // Public input + let x = FieldElement::from(4_u64); + let y = FieldElement::from(12_u64); + + // Private variable + let e = FieldElement::from(3_u64); + + let public_input = vec![x.clone(), y]; + let witness = test_witness_1(x, e); + + let kzg = KZG::new(srs); + let verifying_key = setup(&common_preprocessed_input, &kzg); + let random_generator = TestRandomFieldGenerator {}; + + let prover = Prover::new(kzg.clone(), random_generator); + let proof = prover.prove( + &witness, + &public_input, + &common_preprocessed_input, + &verifying_key, + ); + + let serialized_proof = proof.serialize(); + let deserialized_proof = Proof::deserialize(&serialized_proof).unwrap(); + + let verifier = Verifier::new(kzg); + assert!(verifier.verify( + &deserialized_proof, + &public_input, + &common_preprocessed_input, + &verifying_key + )); + } +} From c8170d5ce7924bfec8e5b489d0be12e72b0b5cf4 Mon Sep 17 00:00:00 2001 From: Mauro Toscano <12560266+MauroToscano@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:06:31 -0300 Subject: [PATCH 2/3] Remove cuda from CI (#547) * Remove cuda from CI * Fmt ci --- .github/workflows/ci.yaml | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index ee1b5bf6d..6d1792ffc 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -112,25 +112,3 @@ jobs: run: make clippy-metal - name: Run tests run: make test-metal - - test_nvidia: - name: Test (Ubuntu, NVIDIA GPU) - runs-on: [self-hosted, cuda] - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v3 - - name: Rustup toolchain install - uses: dtolnay/rust-toolchain@stable - with: - toolchain: stable - components: clippy - - - name: Add CUDA binaries to PATH - run: | - echo /usr/local/cuda/bin >> $GITHUB_PATH - - - name: Run clippy - run: make clippy-cuda - - name: Run tests - run: cargo test -F cuda From 541323016339aca0575501fa9556dd8f32a7f654 Mon Sep 17 00:00:00 2001 From: 0xKitetsu <105421120+0xKitetsu-smdk@users.noreply.github.com> Date: Tue, 5 Sep 2023 21:47:20 +0530 Subject: [PATCH 3/3] Impl Bandersnatch curve (#513) * bandersnatch first version without tests * test impl * switching from SW to TE * set a hex to -5 over modulus * fix * fixed * tests pass * Added some tests * clippy linting * renamed field_extension to field in bandersnatch curve --------- Co-authored-by: MatteoMer Co-authored-by: Dragan Pilipovic Co-authored-by: Matteo <30910760+MatteoMer@users.noreply.github.com> Co-authored-by: Mauro Toscano <12560266+MauroToscano@users.noreply.github.com> --- .../edwards/curves/bandersnatch/curve.rs | 129 ++++++++++++++++++ .../edwards/curves/bandersnatch/field.rs | 28 ++++ .../edwards/curves/bandersnatch/mod.rs | 2 + math/src/elliptic_curve/edwards/curves/mod.rs | 1 + 4 files changed, 160 insertions(+) create mode 100644 math/src/elliptic_curve/edwards/curves/bandersnatch/curve.rs create mode 100644 math/src/elliptic_curve/edwards/curves/bandersnatch/field.rs create mode 100644 math/src/elliptic_curve/edwards/curves/bandersnatch/mod.rs diff --git a/math/src/elliptic_curve/edwards/curves/bandersnatch/curve.rs b/math/src/elliptic_curve/edwards/curves/bandersnatch/curve.rs new file mode 100644 index 000000000..85211713a --- /dev/null +++ b/math/src/elliptic_curve/edwards/curves/bandersnatch/curve.rs @@ -0,0 +1,129 @@ +pub use super::field::FqField; +use crate::elliptic_curve::edwards::point::EdwardsProjectivePoint; +use crate::elliptic_curve::traits::IsEllipticCurve; +use crate::{elliptic_curve::edwards::traits::IsEdwards, field::element::FieldElement}; + +pub type BaseBandersnatchFieldElement = FqField; + +#[derive(Clone, Debug)] +pub struct BandersnatchCurve; + +impl IsEllipticCurve for BandersnatchCurve { + type BaseField = BaseBandersnatchFieldElement; + type PointRepresentation = EdwardsProjectivePoint; + + // Values are from https://github.com/arkworks-rs/curves/blob/5a41d7f27a703a7ea9c48512a4148443ec6c747e/ed_on_bls12_381_bandersnatch/src/curves/mod.rs#L120 + // Converted to Hex + fn generator() -> Self::PointRepresentation { + Self::PointRepresentation::new([ + FieldElement::::new_base( + "29C132CC2C0B34C5743711777BBE42F32B79C022AD998465E1E71866A252AE18", + ), + FieldElement::::new_base( + "2A6C669EDA123E0F157D8B50BADCD586358CAD81EEE464605E3167B6CC974166", + ), + FieldElement::one(), + ]) + } +} + +impl IsEdwards for BandersnatchCurve { + fn a() -> FieldElement { + FieldElement::::new_base( + "73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFEFFFFFFFC", + ) + } + + fn d() -> FieldElement { + FieldElement::::new_base( + "6389C12633C267CBC66E3BF86BE3B6D8CB66677177E54F92B369F2F5188D58E7", + ) + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::{ + cyclic_group::IsGroup, elliptic_curve::traits::EllipticCurveError, + field::element::FieldElement, unsigned_integer::element::U256, + }; + + #[allow(clippy::upper_case_acronyms)] + type FEE = FieldElement; + + fn point_1() -> EdwardsProjectivePoint { + let x = FEE::new_base("29C132CC2C0B34C5743711777BBE42F32B79C022AD998465E1E71866A252AE18"); + let y = FEE::new_base("2A6C669EDA123E0F157D8B50BADCD586358CAD81EEE464605E3167B6CC974166"); + + BandersnatchCurve::create_point_from_affine(x, y).unwrap() + } + + #[test] + fn test_scalar_mul() { + let g = BandersnatchCurve::generator(); + let result1 = g.operate_with_self(5u16); + + assert_eq!( + result1.x().clone(), + FEE::new_base("68CBECE0B8FB55450410CBC058928A567EED293D168FAEF44BFDE25F943AABE0") + ); + + let scalar = + U256::from_hex("1CFB69D4CA675F520CCE760202687600FF8F87007419047174FD06B52876E7E6") + .unwrap(); + let result2 = g.operate_with_self(scalar); + + assert_eq!( + result2.x().clone(), + FEE::new_base("68CBECE0B8FB55450410CBC058928A567EED293D168FAEF44BFDE25F943AABE0") + ); + } + + #[test] + fn test_create_valid_point_works() { + let p = BandersnatchCurve::generator(); + + assert_eq!(p, p.clone()); + } + + #[test] + fn create_valid_point_works() { + let p = point_1(); + assert_eq!( + *p.x(), + FEE::new_base("29C132CC2C0B34C5743711777BBE42F32B79C022AD998465E1E71866A252AE18") + ); + assert_eq!( + *p.y(), + FEE::new_base("2A6C669EDA123E0F157D8B50BADCD586358CAD81EEE464605E3167B6CC974166") + ); + assert_eq!(*p.z(), FEE::new_base("1")); + } + + #[test] + fn create_invalid_points_panics() { + assert_eq!( + BandersnatchCurve::create_point_from_affine(FEE::from(1), FEE::from(1)).unwrap_err(), + EllipticCurveError::InvalidPoint + ) + } + + #[test] + fn equality_works() { + let g = BandersnatchCurve::generator(); + let g2 = g.operate_with(&g); + assert_ne!(&g2, &g); + assert_eq!(&g, &g); + } + + #[test] + fn operate_with_self_works_1() { + let g = BandersnatchCurve::generator(); + assert_eq!( + g.operate_with(&g).operate_with(&g), + g.operate_with_self(3_u16) + ); + } +} diff --git a/math/src/elliptic_curve/edwards/curves/bandersnatch/field.rs b/math/src/elliptic_curve/edwards/curves/bandersnatch/field.rs new file mode 100644 index 000000000..1634df248 --- /dev/null +++ b/math/src/elliptic_curve/edwards/curves/bandersnatch/field.rs @@ -0,0 +1,28 @@ +//! Base field of bandersantch -- which is also the scalar field of BLS12-381 curve. + +use crate::{ + field::{ + element::FieldElement, + fields::montgomery_backed_prime_fields::{IsModulus, MontgomeryBackendPrimeField}, + }, + unsigned_integer::element::U256, +}; + +pub const BANDERSNATCH_PRIME_FIELD_ORDER: U256 = + U256::from_hex_unchecked("73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"); + +#[derive(Clone, Debug)] +pub struct FqConfig; + +impl IsModulus for FqConfig { + const MODULUS: U256 = BANDERSNATCH_PRIME_FIELD_ORDER; +} + +pub type FqField = MontgomeryBackendPrimeField; + +impl FieldElement { + pub fn new_base(a_hex: &str) -> Self { + Self::new(U256::from(a_hex)) + } +} +pub type FqElement = FieldElement; diff --git a/math/src/elliptic_curve/edwards/curves/bandersnatch/mod.rs b/math/src/elliptic_curve/edwards/curves/bandersnatch/mod.rs new file mode 100644 index 000000000..f773c8d78 --- /dev/null +++ b/math/src/elliptic_curve/edwards/curves/bandersnatch/mod.rs @@ -0,0 +1,2 @@ +pub mod curve; +pub mod field; diff --git a/math/src/elliptic_curve/edwards/curves/mod.rs b/math/src/elliptic_curve/edwards/curves/mod.rs index 55679082c..34aec6d60 100644 --- a/math/src/elliptic_curve/edwards/curves/mod.rs +++ b/math/src/elliptic_curve/edwards/curves/mod.rs @@ -1 +1,2 @@ +pub mod bandersnatch; pub mod tiny_jub_jub;