diff --git a/CHANGELOG.md b/CHANGELOG.md index a0bda13c4..0ea011d4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,30 @@ ## Pending -- (`ark-poly`) Reduce the number of field multiplications performed by `SparseMultilinearExtension::evaluate` and `DenseMultilinearExtension::evaluate` +### Breaking changes + +- [\#577](https://github.com/arkworks-rs/algebra/pull/577) (`ark-ff`, `ark-ec`) Add `AdditiveGroup`, a trait for additive groups (equipped with scalar field). +- [\#593](https://github.com/arkworks-rs/algebra/pull/593) (`ark-ec`) Change `AffineRepr::xy()` to return owned values. + +### Features + +### Improvements + +### Bugfixes + +## v0.4.2 + +### Breaking changes + +### Features + +### Improvements + +### Bugfixes + +- [\#610](https://github.com/arkworks-rs/algebra/pull/610) (`ark-ec`) Fix panic in `final_exponentiation` step for MNT4/6 curves if inverse does not exist. + +## v0.4.1 ### Breaking changes @@ -10,6 +33,8 @@ ### Improvements +- [\#603](https://github.com/arkworks-rs/algebra/pull/603) (`ark-poly`) Reduce the number of field multiplications performed by `SparseMultilinearExtension::evaluate` and `DenseMultilinearExtension::evaluate` + ### Bugfixes ## v0.4.0 diff --git a/README.md b/README.md index a81196d4e..b7f2c3010 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ This repository contains several Rust crates: * [`ark-poly`](poly): Interfaces for univariate, multivariate, and multilinear polynomials, and FFTs over finite fields * [`ark-serialize`](serialize): Efficient interfaces for serialization and point compression for finite fields and elliptic curves -In addition, the [`curves`](https://github.com/arkworks-rs/curves) repository contains concrete implementations of popular elliptic curves; see [here](https://github.com/arkworks-rs/curves/README.md) for details. +In addition, the [`curves`](https://github.com/arkworks-rs/curves) repository contains concrete implementations of popular elliptic curves; see [here](https://github.com/arkworks-rs/curves/blob/master/README.md) for details. ## Build guide diff --git a/bench-templates/Cargo.toml b/bench-templates/Cargo.toml index fa60dd7d5..914692d7f 100644 --- a/bench-templates/Cargo.toml +++ b/bench-templates/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-algebra-bench-templates" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A benchmark library for finite fields and elliptic curves" homepage = "https://arkworks.rs" @@ -18,9 +18,9 @@ rust-version = "1.63" [dependencies] criterion = { version = "0.4.0", features = [ "html_reports" ] } ark-std = { version = "0.4.0", default-features = false } -ark-ec = { version = "0.4.0", path = "../ec", default-features = false } -ark-ff = { version = "0.4.0", path = "../ff", default-features = false } -ark-serialize = { version = "0.4.0", path = "../serialize", default-features = false } +ark-ec = { version = "0.4.2", path = "../ec", default-features = false } +ark-ff = { version = "0.4.2", path = "../ff", default-features = false } +ark-serialize = { version = "0.4.2", path = "../serialize", default-features = false } paste = { version = "1.0" } [features] diff --git a/bench-templates/src/macros/ec.rs b/bench-templates/src/macros/ec.rs index d561fcfc9..d8abde7f2 100644 --- a/bench-templates/src/macros/ec.rs +++ b/bench-templates/src/macros/ec.rs @@ -3,10 +3,10 @@ macro_rules! ec_bench { ($curve_name:expr, $Group:ident) => { $crate::paste! { mod [<$Group:lower>] { - use ark_ec::Group; + use ark_ec::PrimeGroup; use super::*; - type Scalar = <$Group as Group>::ScalarField; + type Scalar = <$Group as PrimeGroup>::ScalarField; fn rand(c: &mut $crate::criterion::Criterion) { let name = format!("{}::{}", $curve_name, stringify!($Group)); use ark_std::UniformRand; @@ -18,11 +18,12 @@ macro_rules! ec_bench { } fn arithmetic(c: &mut $crate::criterion::Criterion) { - use ark_ec::{CurveGroup, Group}; + use ark_ff::AdditiveGroup; + use ark_ec::{CurveGroup, PrimeGroup}; use ark_std::UniformRand; let name = format!("{}::{}", $curve_name, stringify!($Group)); - type Scalar = <$Group as Group>::ScalarField; + type Scalar = <$Group as PrimeGroup>::ScalarField; const SAMPLES: usize = 1000; let mut rng = ark_std::test_rng(); let mut arithmetic = @@ -214,8 +215,10 @@ macro_rules! ec_bench { let name = format!("{}::{}", $curve_name, stringify!($Group)); let mut rng = ark_std::test_rng(); - let g = <$Group>::rand(&mut rng).into_affine(); - let v: Vec<_> = (0..SAMPLES).map(|_| g).collect(); + let v: Vec<_> = (0..SAMPLES) + .map(|_| <$Group>::rand(&mut rng)) + .collect(); + let v = <$Group>::normalize_batch(&v); let scalars: Vec<_> = (0..SAMPLES) .map(|_| Scalar::rand(&mut rng).into_bigint()) .collect(); diff --git a/bench-templates/src/macros/field.rs b/bench-templates/src/macros/field.rs index 585b5000a..03391f51b 100644 --- a/bench-templates/src/macros/field.rs +++ b/bench-templates/src/macros/field.rs @@ -63,6 +63,8 @@ macro_rules! f_bench { macro_rules! field_common { ($bench_group_name:expr, $F:ident) => { fn arithmetic(c: &mut $crate::criterion::Criterion) { + use ark_ff::AdditiveGroup; + let name = format!("{}::{}", $bench_group_name, stringify!($F)); const SAMPLES: usize = 1000; let mut rng = ark_std::test_rng(); diff --git a/ec/Cargo.toml b/ec/Cargo.toml index a820e760a..039d33fea 100644 --- a/ec/Cargo.toml +++ b/ec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-ec" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for elliptic curves and pairings" homepage = "https://arkworks.rs" @@ -8,25 +8,26 @@ repository = "https://github.com/arkworks-rs/algebra" documentation = "https://docs.rs/ark-ec/" keywords = ["cryptography", "elliptic-curves", "pairing"] categories = ["cryptography"] -include = ["Cargo.toml", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +include = ["Cargo.toml", "src", "doc", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] license = "MIT/Apache-2.0" edition = "2021" rust-version = "1.63" [dependencies] ark-std = { version = "0.4.0", default-features = false } -ark-serialize = { version = "0.4.0", path = "../serialize", default-features = false } -ark-ff = { version = "0.4.0", path = "../ff", default-features = false } -ark-poly = { version = "0.4.0", path = "../poly", default-features = false } +ark-serialize = { version = "0.4.2", path = "../serialize", default-features = false } +ark-ff = { version = "0.4.2", path = "../ff", default-features = false } +ark-poly = { version = "0.4.2", path = "../poly", default-features = false } derivative = { version = "2", features = ["use_core"] } num-traits = { version = "0.2", default-features = false } rayon = { version = "1", optional = true } zeroize = { version = "1", default-features = false, features = ["zeroize_derive"] } hashbrown = "0.13.1" itertools = { version = "0.10", default-features = false } +num-bigint = "0.4.3" [dev-dependencies] -ark-test-curves = { version = "0.4.0", path = "../test-curves", default-features = false, features = ["bls12_381_curve"] } +ark-test-curves = { version = "0.4.2", path = "../test-curves", default-features = false, features = ["bls12_381_curve"] } sha2 = { version = "0.10", default-features = false } libtest-mimic = "0.6.0" serde = "1.0.110" diff --git a/ec/README.md b/ec/README.md index eec8bcfd1..abbdab761 100644 --- a/ec/README.md +++ b/ec/README.md @@ -7,16 +7,16 @@

`ark-ec` defines traits and algorithms for working with different kinds of additive groups, with a focus on groups arising from elliptic curves. It further provides concrete instantiations of these traits for various elliptic curve models, including popular families of pairing-friendly curves such as the BLS12 family of curves. -Implementations of particular curves using these curve models can be found in [`arkworks-rs/curves`](https://github.com/arkworks-rs/curves/README.md). +Implementations of particular curves using these curve models can be found in [`arkworks-rs/curves`](https://github.com/arkworks-rs/curves/blob/master/README.md). ## Usage ### The `Group` trait -Many cryptographic protocols use as core building-blocks prime-order groups. The [`Group`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs) trait is an abstraction that represents elements of such abelian prime-order groups. It provides methods for performing common operations on group elements: +Many cryptographic protocols use as core building-blocks prime-order groups. The [`PrimeGroup`](https://github.com/arkworks-rs/algebra/blob/master/ec/src/lib.rs) trait is an abstraction that represents elements of such abelian prime-order groups. It provides methods for performing common operations on group elements: ```rust -use ark_ec::Group; +use ark_ec::{AdditiveGroup, PrimeGroup}; use ark_ff::{PrimeField, Field}; // We'll use the BLS12-381 G1 curve for this example. // This group has a prime order `r`, and is associated with a prime field `Fr`. @@ -49,12 +49,12 @@ assert_eq!(f, c); ## Scalar multiplication -While the `Group` trait already produces scalar multiplication routines, in many cases one can take advantage of +While the `PrimeGroup` trait already produces scalar multiplication routines, in many cases one can take advantage of the group structure to perform scalar multiplication more efficiently. To allow such specialization, `ark-ec` provides the `ScalarMul` and `VariableBaseMSM` traits. The latter trait computes an "inner product" between a vector of scalars `s` and a vector of group elements `g`. That is, it computes `s.iter().zip(g).map(|(s, g)| g * s).sum()`. ```rust -use ark_ec::{Group, VariableBaseMSM}; +use ark_ec::{PrimeGroup, VariableBaseMSM}; use ark_ff::{PrimeField, Field}; // We'll use the BLS12-381 G1 curve for this example. // This group has a prime order `r`, and is associated with a prime field `Fr`. @@ -72,7 +72,7 @@ let s2 = ScalarField::rand(&mut rng); // Note that we're using the `GAffine` type here, as opposed to `G`. // This is because MSMs are more efficient when the group elements are in affine form. (See below for why.) // -// The `VariableBaseMSM` trait allows specializing the input group element representation to allow +// The `VariableBaseMSM` trait allows specializing the input group element representation to allow // for more efficient implementations. let r = G::msm(&[a, b], &[s1, s2]).unwrap(); assert_eq!(r, a * s1 + b * s2); @@ -90,7 +90,7 @@ but is slower for most arithmetic operations. Let's explore how and when to use these: ```rust -use ark_ec::{AffineRepr, Group, CurveGroup, VariableBaseMSM}; +use ark_ec::{AdditiveGroup, AffineRepr, PrimeGroup, CurveGroup, VariableBaseMSM}; use ark_ff::{PrimeField, Field}; use ark_test_curves::bls12_381::{G1Projective as G, G1Affine as GAffine, Fr as ScalarField}; use ark_std::{Zero, UniformRand}; @@ -105,9 +105,9 @@ assert_eq!(a_aff, a); // We can also convert back to the `CurveGroup` representation: assert_eq!(a, a_aff.into_group()); -// As a general rule, most group operations are slower when elements -// are represented as `AffineRepr`. However, adding an `AffineRepr` -// point to a `CurveGroup` one is usually slightly more efficient than +// As a general rule, most group operations are slower when elements +// are represented as `AffineRepr`. However, adding an `AffineRepr` +// point to a `CurveGroup` one is usually slightly more efficient than // adding two `CurveGroup` points. let d = a + a_aff; assert_eq!(d, a.double()); diff --git a/ec/src/hashing/curve_maps/wb/mod.rs b/ec/src/hashing/curve_maps/wb/mod.rs index 4e2644009..10ddbb0f7 100644 --- a/ec/src/hashing/curve_maps/wb/mod.rs +++ b/ec/src/hashing/curve_maps/wb/mod.rs @@ -53,10 +53,10 @@ where let y_num = DensePolynomial::from_coefficients_slice(self.y_map_numerator); let y_den = DensePolynomial::from_coefficients_slice(self.y_map_denominator); - let mut v: [BaseField; 2] = [x_den.evaluate(x), y_den.evaluate(x)]; + let mut v: [BaseField; 2] = [x_den.evaluate(&x), y_den.evaluate(&x)]; batch_inversion(&mut v); - let img_x = x_num.evaluate(x) * v[0]; - let img_y = (y_num.evaluate(x) * y) * v[1]; + let img_x = x_num.evaluate(&x) * v[0]; + let img_y = (y_num.evaluate(&x) * y) * v[1]; Ok(Affine::::new_unchecked(img_x, img_y)) }, None => Ok(Affine::identity()), diff --git a/ec/src/hashing/map_to_curve_hasher.rs b/ec/src/hashing/map_to_curve_hasher.rs index 59fe4bbf5..4cdfb017f 100644 --- a/ec/src/hashing/map_to_curve_hasher.rs +++ b/ec/src/hashing/map_to_curve_hasher.rs @@ -42,10 +42,10 @@ where }) } - // Produce a hash of the message, using the hash to field and map to curve - // traits. This uses the IETF hash to curve's specification for Random - // oracle encoding (hash_to_curve) defined by combining these components. - // See https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-09#section-3 + /// Produce a hash of the message, using the hash to field and map to curve + /// traits. This uses the IETF hash to curve's specification for Random + /// oracle encoding (hash_to_curve) defined by combining these components. + /// See fn hash(&self, msg: &[u8]) -> Result { // IETF spec of hash_to_curve, from hash_to_field and map_to_curve // sub-components diff --git a/ec/src/lib.rs b/ec/src/lib.rs index daf169566..423680e0b 100644 --- a/ec/src/lib.rs +++ b/ec/src/lib.rs @@ -28,13 +28,14 @@ use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{ fmt::{Debug, Display}, hash::Hash, - ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}, + ops::{Add, AddAssign, Mul, MulAssign}, vec::Vec, }; -use num_traits::Zero; pub use scalar_mul::{variable_base::VariableBaseMSM, ScalarMul}; use zeroize::Zeroize; +pub use ark_ff::AdditiveGroup; + pub mod models; pub use self::models::*; @@ -47,39 +48,7 @@ pub mod hashing; pub mod pairing; /// Represents (elements of) a group of prime order `r`. -pub trait Group: - Eq - + 'static - + Sized - + CanonicalSerialize - + CanonicalDeserialize - + Copy - + Clone - + Default - + Send - + Sync - + Hash - + Debug - + Display - + UniformRand - + Zeroize - + Zero - + Neg - + Add - + Sub - + Mul<::ScalarField, Output = Self> - + AddAssign - + SubAssign - + MulAssign<::ScalarField> - + for<'a> Add<&'a Self, Output = Self> - + for<'a> Sub<&'a Self, Output = Self> - + for<'a> Mul<&'a ::ScalarField, Output = Self> - + for<'a> AddAssign<&'a Self> - + for<'a> SubAssign<&'a Self> - + for<'a> MulAssign<&'a ::ScalarField> - + core::iter::Sum - + for<'a> core::iter::Sum<&'a Self> -{ +pub trait PrimeGroup: AdditiveGroup { /// The scalar field `F_r`, where `r` is the order of this group. type ScalarField: PrimeField; @@ -87,17 +56,6 @@ pub trait Group: #[must_use] fn generator() -> Self; - /// Doubles `self`. - #[must_use] - fn double(&self) -> Self { - let mut copy = *self; - copy.double_in_place(); - copy - } - - /// Double `self` in place. - fn double_in_place(&mut self) -> &mut Self; - /// Performs scalar multiplication of this element. fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self; @@ -121,7 +79,7 @@ pub trait Group: /// /// The point is guaranteed to be in the correct prime order subgroup. pub trait CurveGroup: - Group + PrimeGroup + Add + AddAssign // + for<'a> Add<&'a Self::Affine, Output = Self> @@ -205,15 +163,15 @@ pub trait AffineRepr: + MulAssign; // needed due to https://github.com/rust-lang/rust/issues/69640 /// Returns the x and y coordinates of this affine point. - fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)>; + fn xy(&self) -> Option<(Self::BaseField, Self::BaseField)>; /// Returns the x coordinate of this affine point. - fn x(&self) -> Option<&Self::BaseField> { + fn x(&self) -> Option { self.xy().map(|(x, _)| x) } /// Returns the y coordinate of this affine point. - fn y(&self) -> Option<&Self::BaseField> { + fn y(&self) -> Option { self.xy().map(|(_, y)| y) } @@ -278,7 +236,7 @@ where Self::E2: MulAssign<::BaseField>, { type E1: CurveGroup< - BaseField = ::ScalarField, + BaseField = ::ScalarField, ScalarField = ::BaseField, >; type E2: CurveGroup; @@ -289,12 +247,12 @@ pub trait PairingFriendlyCycle: CurveCycle { type Engine1: pairing::Pairing< G1 = Self::E1, G1Affine = ::Affine, - ScalarField = ::ScalarField, + ScalarField = ::ScalarField, >; type Engine2: pairing::Pairing< G1 = Self::E2, G1Affine = ::Affine, - ScalarField = ::ScalarField, + ScalarField = ::ScalarField, >; } diff --git a/ec/src/models/bls12/g2.rs b/ec/src/models/bls12/g2.rs index 661486351..629ddcda4 100644 --- a/ec/src/models/bls12/g2.rs +++ b/ec/src/models/bls12/g2.rs @@ -1,4 +1,4 @@ -use ark_ff::{BitIteratorBE, Field, Fp2}; +use ark_ff::{AdditiveGroup, BitIteratorBE, Field, Fp2}; use ark_serialize::*; use ark_std::{vec::Vec, One}; @@ -20,8 +20,8 @@ pub type G2Projective

= Projective<

::G2Config>; Eq(bound = "P: Bls12Config") )] pub struct G2Prepared { - // Stores the coefficients of the line evaluations as calculated in - // https://eprint.iacr.org/2013/722.pdf + /// Stores the coefficients of the line evaluations as calculated in + /// pub ell_coeffs: Vec>, pub infinity: bool, } @@ -57,7 +57,7 @@ impl From> for G2Prepared

{ ell_coeffs: vec![], infinity: true, }; - q.xy().map_or(zero, |(&q_x, &q_y)| { + q.xy().map_or(zero, |(q_x, q_y)| { let mut ell_coeffs = vec![]; let mut r = G2HomProjective::

{ x: q_x, @@ -133,7 +133,7 @@ impl G2HomProjective

{ } fn add_in_place(&mut self, q: &G2Affine

) -> EllCoeff

{ - let (&qx, &qy) = q.xy().unwrap(); + let (qx, qy) = q.xy().unwrap(); // Formula for line function when working with // homogeneous projective coordinates. let theta = self.y - &(qy * &self.z); diff --git a/ec/src/models/bls12/mod.rs b/ec/src/models/bls12/mod.rs index ca8a66351..f5275f697 100644 --- a/ec/src/models/bls12/mod.rs +++ b/ec/src/models/bls12/mod.rs @@ -178,13 +178,13 @@ impl Bls12

{ match P::TWIST_TYPE { TwistType::M => { - c2.mul_assign_by_fp(py); - c1.mul_assign_by_fp(px); + c2.mul_assign_by_fp(&py); + c1.mul_assign_by_fp(&px); f.mul_by_014(&c0, &c1, &c2); }, TwistType::D => { - c0.mul_assign_by_fp(py); - c1.mul_assign_by_fp(px); + c0.mul_assign_by_fp(&py); + c1.mul_assign_by_fp(&px); f.mul_by_034(&c0, &c1, &c2); }, } diff --git a/ec/src/models/bn/g2.rs b/ec/src/models/bn/g2.rs index 231b74402..281064496 100644 --- a/ec/src/models/bn/g2.rs +++ b/ec/src/models/bn/g2.rs @@ -1,4 +1,7 @@ -use ark_ff::fields::{Field, Fp2}; +use ark_ff::{ + fields::{Field, Fp2}, + AdditiveGroup, +}; use ark_serialize::*; use ark_std::vec::Vec; use num_traits::One; @@ -21,8 +24,8 @@ pub type G2Projective

= Projective<

::G2Config>; Eq(bound = "P: BnConfig") )] pub struct G2Prepared { - // Stores the coefficients of the line evaluations as calculated in - // https://eprint.iacr.org/2013/722.pdf + /// Stores the coefficients of the line evaluations as calculated in + /// pub ell_coeffs: Vec>, pub infinity: bool, } diff --git a/ec/src/models/bw6/g2.rs b/ec/src/models/bw6/g2.rs index 02430bead..7614e2c5b 100644 --- a/ec/src/models/bw6/g2.rs +++ b/ec/src/models/bw6/g2.rs @@ -1,4 +1,4 @@ -use ark_ff::{BitIteratorBE, Field}; +use ark_ff::{AdditiveGroup, BitIteratorBE, Field}; use ark_serialize::*; use ark_std::vec::Vec; use num_traits::One; @@ -21,8 +21,8 @@ pub type G2Projective

= Projective<

::G2Config>; Eq(bound = "P: BW6Config") )] pub struct G2Prepared { - // Stores the coefficients of the line evaluations as calculated in - // https://eprint.iacr.org/2013/722.pdf + /// Stores the coefficients of the line evaluations as calculated in + /// pub ell_coeffs_1: Vec<(P::Fp, P::Fp, P::Fp)>, pub ell_coeffs_2: Vec<(P::Fp, P::Fp, P::Fp)>, pub infinity: bool, @@ -127,7 +127,8 @@ impl G2Prepared

{ impl G2HomProjective

{ fn double_in_place(&mut self) -> (P::Fp, P::Fp, P::Fp) { // Formula for line function when working with - // homogeneous projective coordinates, as described in https://eprint.iacr.org/2013/722.pdf. + // homogeneous projective coordinates, as described in + // . let a = self.x * &self.y; let b = self.y.square(); diff --git a/ec/src/models/mnt4/mod.rs b/ec/src/models/mnt4/mod.rs index dc292ba73..c9a618c9c 100644 --- a/ec/src/models/mnt4/mod.rs +++ b/ec/src/models/mnt4/mod.rs @@ -5,7 +5,7 @@ use crate::{ use ark_ff::{ fp2::{Fp2, Fp2Config}, fp4::{Fp4, Fp4Config}, - CyclotomicMultSubgroup, Field, PrimeField, + AdditiveGroup, CyclotomicMultSubgroup, Field, PrimeField, }; use itertools::Itertools; use num_traits::{One, Zero}; @@ -60,7 +60,7 @@ pub trait MNT4Config: 'static + Sized { fn final_exponentiation(f: MillerLoopOutput>) -> Option>> { let value = f.0; - let value_inv = value.inverse().unwrap(); + let value_inv = value.inverse()?; let value_to_first_chunk = MNT4::::final_exponentiation_first_chunk(&value, &value_inv); let value_inv_to_first_chunk = diff --git a/ec/src/models/mnt6/g2.rs b/ec/src/models/mnt6/g2.rs index 0a79a270a..cf2639c90 100644 --- a/ec/src/models/mnt6/g2.rs +++ b/ec/src/models/mnt6/g2.rs @@ -1,5 +1,3 @@ -use core::ops::Neg; - use crate::{ mnt6::MNT6Config, models::mnt6::MNT6, @@ -8,7 +6,7 @@ use crate::{ }; use ark_ff::fields::{Field, Fp3}; use ark_serialize::*; -use ark_std::vec::Vec; +use ark_std::{ops::Neg, vec::Vec}; use num_traits::One; pub type G2Affine

= Affine<

::G2Config>; diff --git a/ec/src/models/mnt6/mod.rs b/ec/src/models/mnt6/mod.rs index 53d4f2ca8..47b0f70c5 100644 --- a/ec/src/models/mnt6/mod.rs +++ b/ec/src/models/mnt6/mod.rs @@ -5,7 +5,7 @@ use crate::{ use ark_ff::{ fp3::{Fp3, Fp3Config}, fp6_2over3::{Fp6, Fp6Config}, - CyclotomicMultSubgroup, Field, PrimeField, + AdditiveGroup, CyclotomicMultSubgroup, Field, PrimeField, }; use itertools::Itertools; use num_traits::{One, Zero}; @@ -61,7 +61,7 @@ pub trait MNT6Config: 'static + Sized { fn final_exponentiation(f: MillerLoopOutput>) -> Option>> { let value = f.0; - let value_inv = value.inverse().unwrap(); + let value_inv = value.inverse()?; let value_to_first_chunk = MNT6::::final_exponentiation_first_chunk(&value, &value_inv); let value_inv_to_first_chunk = diff --git a/ec/src/models/short_weierstrass/affine.rs b/ec/src/models/short_weierstrass/affine.rs index b1340de3a..1d1d9a04d 100644 --- a/ec/src/models/short_weierstrass/affine.rs +++ b/ec/src/models/short_weierstrass/affine.rs @@ -14,7 +14,7 @@ use ark_std::{ One, Zero, }; -use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; +use ark_ff::{fields::Field, AdditiveGroup, PrimeField, ToConstraintField, UniformRand}; use zeroize::Zeroize; @@ -205,8 +205,8 @@ impl AffineRepr for Affine

{ type ScalarField = P::ScalarField; type Group = Projective

; - fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)> { - (!self.infinity).then(|| (&self.x, &self.y)) + fn xy(&self) -> Option<(Self::BaseField, Self::BaseField)> { + (!self.infinity).then(|| (self.x, self.y)) } #[inline] diff --git a/ec/src/models/short_weierstrass/group.rs b/ec/src/models/short_weierstrass/group.rs index ad043f892..58e5026f7 100644 --- a/ec/src/models/short_weierstrass/group.rs +++ b/ec/src/models/short_weierstrass/group.rs @@ -15,7 +15,7 @@ use ark_std::{ One, Zero, }; -use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; +use ark_ff::{fields::Field, AdditiveGroup, PrimeField, ToConstraintField, UniformRand}; use zeroize::Zeroize; @@ -25,7 +25,7 @@ use rayon::prelude::*; use super::{Affine, SWCurveConfig}; use crate::{ scalar_mul::{variable_base::VariableBaseMSM, ScalarMul}, - AffineRepr, CurveGroup, Group, + AffineRepr, CurveGroup, PrimeGroup, }; /// Jacobian coordinates for a point on an elliptic curve in short Weierstrass @@ -160,13 +160,11 @@ impl Zero for Projective

{ } } -impl Group for Projective

{ - type ScalarField = P::ScalarField; +impl AdditiveGroup for Projective

{ + type Scalar = P::ScalarField; - #[inline] - fn generator() -> Self { - Affine::generator().into() - } + const ZERO: Self = + Self::new_unchecked(P::BaseField::ONE, P::BaseField::ONE, P::BaseField::ZERO); /// Sets `self = 2 * self`. Note that Jacobian formulae are incomplete, and /// so doubling cannot be computed as `self + self`. Instead, this @@ -273,6 +271,15 @@ impl Group for Projective

{ self } } +} + +impl PrimeGroup for Projective

{ + type ScalarField = P::ScalarField; + + #[inline] + fn generator() -> Self { + Affine::generator().into() + } #[inline] fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self { @@ -330,10 +337,10 @@ impl Neg for Projective

{ } impl>> AddAssign for Projective

{ - /// Using http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl + /// Using fn add_assign(&mut self, other: T) { let other = other.borrow(); - if let Some((&other_x, &other_y)) = other.xy() { + if let Some((other_x, other_y)) = other.xy() { if self.is_zero() { self.x = other_x; self.y = other_y; @@ -564,7 +571,7 @@ impl> Mul for Projective

{ impl From> for Projective

{ #[inline] fn from(p: Affine

) -> Projective

{ - p.xy().map_or(Projective::zero(), |(&x, &y)| Self { + p.xy().map_or(Projective::zero(), |(x, y)| Self { x, y, z: P::BaseField::one(), diff --git a/ec/src/models/short_weierstrass/mod.rs b/ec/src/models/short_weierstrass/mod.rs index 965cbb83f..20bf7029c 100644 --- a/ec/src/models/short_weierstrass/mod.rs +++ b/ec/src/models/short_weierstrass/mod.rs @@ -4,10 +4,14 @@ use ark_serialize::{ }; use ark_std::io::{Read, Write}; -use ark_ff::fields::Field; - -use crate::{scalar_mul::variable_base::VariableBaseMSM, AffineRepr, Group}; +use ark_ff::{fields::Field, AdditiveGroup}; +use crate::{ + scalar_mul::{ + sw_double_and_add_affine, sw_double_and_add_projective, variable_base::VariableBaseMSM, + }, + AffineRepr, +}; use num_traits::Zero; mod affine; @@ -61,7 +65,7 @@ pub trait SWCurveConfig: super::CurveConfig { /// Check if the provided curve point is in the prime-order subgroup. /// /// The default implementation multiplies `item` by the order `r` of the - /// prime-order subgroup, and checks if the result is one. + /// prime-order subgroup, and checks if the result is zero. /// Implementors can choose to override this default impl /// if the given curve has faster methods /// for performing this check (for example, via leveraging curve @@ -80,29 +84,13 @@ pub trait SWCurveConfig: super::CurveConfig { /// Default implementation of group multiplication for projective /// coordinates fn mul_projective(base: &Projective, scalar: &[u64]) -> Projective { - let mut res = Projective::::zero(); - for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { - res.double_in_place(); - if b { - res += base; - } - } - - res + sw_double_and_add_projective(base, scalar) } /// Default implementation of group multiplication for affine /// coordinates. fn mul_affine(base: &Affine, scalar: &[u64]) -> Projective { - let mut res = Projective::::zero(); - for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { - res.double_in_place(); - if b { - res += base - } - } - - res + sw_double_and_add_affine(base, scalar) } /// Default implementation for multi scalar multiplication diff --git a/ec/src/models/short_weierstrass/serialization_flags.rs b/ec/src/models/short_weierstrass/serialization_flags.rs index 13eb1c8de..c2c9c0b41 100644 --- a/ec/src/models/short_weierstrass/serialization_flags.rs +++ b/ec/src/models/short_weierstrass/serialization_flags.rs @@ -5,11 +5,11 @@ use ark_serialize::Flags; /// The default flags (empty) should not change the binary representation. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum SWFlags { - /// Represents a point with positive y-coordinate by setting the MSB to 1. + /// Represents a point with positive y-coordinate by setting all bits to 0. YIsPositive = 0, /// Represents the point at infinity by setting the setting the last-but-one bit to 1. PointAtInfinity = 1 << 6, - /// Represents a point with negative y-coordinate by setting all bits to 0. + /// Represents a point with negative y-coordinate by setting the MSB to 1. YIsNegative = 1 << 7, } diff --git a/ec/src/models/twisted_edwards/affine.rs b/ec/src/models/twisted_edwards/affine.rs index a6c908e31..2b168a809 100644 --- a/ec/src/models/twisted_edwards/affine.rs +++ b/ec/src/models/twisted_edwards/affine.rs @@ -15,7 +15,7 @@ use ark_std::{ use num_traits::{One, Zero}; use zeroize::Zeroize; -use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; +use ark_ff::{fields::Field, AdditiveGroup, PrimeField, ToConstraintField, UniformRand}; use super::{Projective, TECurveConfig, TEFlags}; use crate::AffineRepr; @@ -166,8 +166,8 @@ impl AffineRepr for Affine

{ type ScalarField = P::ScalarField; type Group = Projective

; - fn xy(&self) -> Option<(&Self::BaseField, &Self::BaseField)> { - (!self.is_zero()).then(|| (&self.x, &self.y)) + fn xy(&self) -> Option<(Self::BaseField, Self::BaseField)> { + (!self.is_zero()).then(|| (self.x, self.y)) } fn generator() -> Self { diff --git a/ec/src/models/twisted_edwards/group.rs b/ec/src/models/twisted_edwards/group.rs index d82d70a2e..88bf8128c 100644 --- a/ec/src/models/twisted_edwards/group.rs +++ b/ec/src/models/twisted_edwards/group.rs @@ -15,7 +15,7 @@ use ark_std::{ One, Zero, }; -use ark_ff::{fields::Field, PrimeField, ToConstraintField, UniformRand}; +use ark_ff::{fields::Field, AdditiveGroup, PrimeField, ToConstraintField, UniformRand}; use zeroize::Zeroize; @@ -25,7 +25,7 @@ use rayon::prelude::*; use super::{Affine, MontCurveConfig, TECurveConfig}; use crate::{ scalar_mul::{variable_base::VariableBaseMSM, ScalarMul}, - AffineRepr, CurveGroup, Group, + AffineRepr, CurveGroup, PrimeGroup, }; /// `Projective` implements Extended Twisted Edwards Coordinates @@ -150,12 +150,15 @@ impl Zero for Projective

{ } } -impl Group for Projective

{ - type ScalarField = P::ScalarField; +impl AdditiveGroup for Projective

{ + type Scalar = P::ScalarField; - fn generator() -> Self { - Affine::generator().into() - } + const ZERO: Self = Self::new_unchecked( + P::BaseField::ZERO, + P::BaseField::ONE, + P::BaseField::ZERO, + P::BaseField::ONE, + ); fn double_in_place(&mut self) -> &mut Self { // See "Twisted Edwards Curves Revisited" @@ -190,6 +193,14 @@ impl Group for Projective

{ self } +} + +impl PrimeGroup for Projective

{ + type ScalarField = P::ScalarField; + + fn generator() -> Self { + Affine::generator().into() + } #[inline] fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self { diff --git a/ec/src/models/twisted_edwards/mod.rs b/ec/src/models/twisted_edwards/mod.rs index 67402ed69..6d326b3a2 100644 --- a/ec/src/models/twisted_edwards/mod.rs +++ b/ec/src/models/twisted_edwards/mod.rs @@ -4,10 +4,10 @@ use ark_serialize::{ }; use ark_std::io::{Read, Write}; -use crate::{scalar_mul::variable_base::VariableBaseMSM, AffineRepr, Group}; +use crate::{scalar_mul::variable_base::VariableBaseMSM, AffineRepr}; use num_traits::Zero; -use ark_ff::fields::Field; +use ark_ff::{fields::Field, AdditiveGroup}; mod affine; pub use affine::*; diff --git a/ec/src/pairing.rs b/ec/src/pairing.rs index 05071a593..90012301e 100644 --- a/ec/src/pairing.rs +++ b/ec/src/pairing.rs @@ -1,4 +1,4 @@ -use ark_ff::{CyclotomicMultSubgroup, Field, One, PrimeField}; +use ark_ff::{AdditiveGroup, CyclotomicMultSubgroup, Field, One, PrimeField}; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Compress, SerializationError, Valid, Validate, }; @@ -16,7 +16,7 @@ use ark_std::{ }; use zeroize::Zeroize; -use crate::{AffineRepr, CurveGroup, Group, VariableBaseMSM}; +use crate::{AffineRepr, CurveGroup, PrimeGroup, VariableBaseMSM}; /// Collection of types (mainly fields and curves) that together describe /// how to compute a pairing over a pairing-friendly curve. @@ -265,7 +265,18 @@ impl Distribution> for Standard { } } -impl Group for PairingOutput

{ +impl AdditiveGroup for PairingOutput

{ + type Scalar = P::ScalarField; + + const ZERO: Self = Self(P::TargetField::ONE); + + fn double_in_place(&mut self) -> &mut Self { + self.0.cyclotomic_square_in_place(); + self + } +} + +impl PrimeGroup for PairingOutput

{ type ScalarField = P::ScalarField; fn generator() -> Self { @@ -277,11 +288,6 @@ impl Group for PairingOutput

{ P::pairing(g1.into(), g2.into()) } - fn double_in_place(&mut self) -> &mut Self { - self.0.cyclotomic_square_in_place(); - self - } - fn mul_bigint(&self, other: impl AsRef<[u64]>) -> Self { Self(self.0.cyclotomic_exp(other.as_ref())) } diff --git a/ec/src/scalar_mul/glv.rs b/ec/src/scalar_mul/glv.rs index 87f86176a..21ef70077 100644 --- a/ec/src/scalar_mul/glv.rs +++ b/ec/src/scalar_mul/glv.rs @@ -1,62 +1,148 @@ -use crate::{CurveConfig, CurveGroup}; +use crate::AdditiveGroup; +use crate::{ + short_weierstrass::{Affine, Projective, SWCurveConfig}, + CurveGroup, +}; +use ark_ff::{PrimeField, Zero}; +use num_bigint::{BigInt, BigUint, Sign}; +use num_traits::Signed; /// The GLV parameters for computing the endomorphism and scalar decomposition. -pub trait GLVConfig: Send + Sync + 'static + CurveConfig { - /// A representation of curve points that enables efficient arithmetic by - /// avoiding inversions. - type Curve: CurveGroup; - - // Constants that are used to calculate `phi(G) := lambda*G`. - - /// Coefficient `a_1` of `f(y) = a_1 * (y + a_2) * (y + a_3)`. - const COEFF_A1: Self::BaseField; - /// Coefficient `a_2` of `f(y) = a_1 * (y + a_2) * (y + a_3)`. - const COEFF_A2: Self::BaseField; - /// Coefficient `a_3` of `f(y) = a_1 * (y + a_2) * (y + a_3)`. - const COEFF_A3: Self::BaseField; - - /// Coefficient `b_1` of `g(y) = b_1 * (y + b_2) * (y + b_3)`. - const COEFF_B1: Self::BaseField; - /// Coefficient `b_2` of `g(y) = b_1 * (y + b_2) * (y + b_3)`. - const COEFF_B2: Self::BaseField; - /// Coefficient `b_3` of `g(y) = b_1 * (y + b_2) * (y + b_3)`. - const COEFF_B3: Self::BaseField; - - /// Coefficient `c_1` of `h(y) = (y + c_1) * (y + c_2)`. - const COEFF_C1: Self::BaseField; - /// Coefficient `c_2` of `h(y) = (y + c_1) * (y + c_2)`. - const COEFF_C2: Self::BaseField; - - // Constants for scalar decomposition. - // This is a 2x2 matrix, which is practically the LLL-reduced bases. - - /// The first element of the matrix for scalar decomposition. - const COEFF_N11: Self::ScalarField; - /// The second element of the matrix for scalar decomposition. - const COEFF_N12: Self::ScalarField; - /// The third element of the matrix for scalar decomposition. - const COEFF_N21: Self::ScalarField; - /// The forth element of the matrix for the scalar decomposition. - const COEFF_N22: Self::ScalarField; - - /// Maps a point G to phi(G):= lambda G where psi is the endomorphism. - // On an affine curve, the function takes the following steps: - // f(y) = a_1 * (y + a_2) * (y + a_3) - // g(y) = b_1 * (y + b_2) * (y + b_3) - // h(y) = (y + c_1) * (y + c_2) - // return (x',y') where - // x' = x * f(y) / y - // y' = g(y) / h(y) - fn endomorphism( - base: &::Affine, - ) -> ::Affine; +pub trait GLVConfig: Send + Sync + 'static + SWCurveConfig { + /// Constants that are used to calculate `phi(G) := lambda*G`. + + /// The coefficients of the endomorphism + const ENDO_COEFFS: &'static [Self::BaseField]; + + /// The eigenvalue corresponding to the endomorphism. + const LAMBDA: Self::ScalarField; + + /// A 4-element vector representing a 2x2 matrix of coefficients the for scalar decomposition, s.t. k-th entry in the vector is at col i, row j in the matrix, with ij = BE binary decomposition of k. + /// The entries are the LLL-reduced bases. + /// The determinant of this matrix must equal `ScalarField::characteristic()`. + const SCALAR_DECOMP_COEFFS: [(bool, ::BigInt); 4]; /// Decomposes a scalar s into k1, k2, s.t. s = k1 + lambda k2, - fn scalar_decomposition(k: &Self::ScalarField) -> (Self::ScalarField, Self::ScalarField); + fn scalar_decomposition( + k: Self::ScalarField, + ) -> ((bool, Self::ScalarField), (bool, Self::ScalarField)) { + let scalar: BigInt = k.into_bigint().into().into(); + + let coeff_bigints: [BigInt; 4] = Self::SCALAR_DECOMP_COEFFS.map(|x| { + BigInt::from_biguint(x.0.then_some(Sign::Plus).unwrap_or(Sign::Minus), x.1.into()) + }); + + let [n11, n12, n21, n22] = coeff_bigints; + + let r = BigInt::from(Self::ScalarField::MODULUS.into()); + + // beta = vector([k,0]) * self.curve.N_inv + // The inverse of N is 1/r * Matrix([[n22, -n12], [-n21, n11]]). + // so β = (k*n22, -k*n12)/r + + let beta_1 = &scalar * &n22 / &r; + let beta_2 = &scalar * &n12 / &r; + + // b = vector([int(beta[0]), int(beta[1])]) * self.curve.N + // b = (β1N11 + β2N21, β1N12 + β2N22) with the signs! + // = (b11 + b12 , b21 + b22) with the signs! + + // b1 + let b11 = &beta_1 * &n11; + let b12 = &beta_2 * &n21; + let b1 = b11 + b12; + + // b2 + let b21 = &beta_1 * &n12; + let b22 = &beta_2 * &n22; + let b2 = b21 + b22; + + let k1 = &scalar - b1; + let k1_abs = BigUint::try_from(k1.abs()).unwrap(); + + // k2 + let k2 = -b2; + let k2_abs = BigUint::try_from(k2.abs()).unwrap(); + + ( + (k1.sign() == Sign::Plus, Self::ScalarField::from(k1_abs)), + (k2.sign() == Sign::Plus, Self::ScalarField::from(k2_abs)), + ) + } + + fn endomorphism(p: &Projective) -> Projective; + + fn endomorphism_affine(p: &Affine) -> Affine; + + fn glv_mul_projective(p: Projective, k: Self::ScalarField) -> Projective { + let ((sgn_k1, k1), (sgn_k2, k2)) = Self::scalar_decomposition(k); + + let mut b1 = p; + let mut b2 = Self::endomorphism(&p); + + if !sgn_k1 { + b1 = -b1; + } + if !sgn_k2 { + b2 = -b2; + } + + let b1b2 = b1 + b2; + + let iter_k1 = ark_ff::BitIteratorBE::new(k1.into_bigint()); + let iter_k2 = ark_ff::BitIteratorBE::new(k2.into_bigint()); + + let mut res = Projective::::zero(); + let mut skip_zeros = true; + for pair in iter_k1.zip(iter_k2) { + if skip_zeros && pair == (false, false) { + skip_zeros = false; + continue; + } + res.double_in_place(); + match pair { + (true, false) => res += b1, + (false, true) => res += b2, + (true, true) => res += b1b2, + (false, false) => {}, + } + } + res + } + + fn glv_mul_affine(p: Affine, k: Self::ScalarField) -> Affine { + let ((sgn_k1, k1), (sgn_k2, k2)) = Self::scalar_decomposition(k); + + let mut b1 = p; + let mut b2 = Self::endomorphism_affine(&p); + + if !sgn_k1 { + b1 = -b1; + } + if !sgn_k2 { + b2 = -b2; + } + + let b1b2 = b1 + b2; + + let iter_k1 = ark_ff::BitIteratorBE::new(k1.into_bigint()); + let iter_k2 = ark_ff::BitIteratorBE::new(k2.into_bigint()); - /// Performs GLV multiplication. - fn glv_mul( - base: &::Affine, - scalar: &Self::ScalarField, - ) -> Self::Curve; + let mut res = Projective::::zero(); + let mut skip_zeros = true; + for pair in iter_k1.zip(iter_k2) { + if skip_zeros && pair == (false, false) { + skip_zeros = false; + continue; + } + res.double_in_place(); + match pair { + (true, false) => res += b1, + (false, true) => res += b2, + (true, true) => res += b1b2, + (false, false) => {}, + } + } + res.into_affine() + } } diff --git a/ec/src/scalar_mul/mod.rs b/ec/src/scalar_mul/mod.rs index 9f81b75c7..1ae3a1e99 100644 --- a/ec/src/scalar_mul/mod.rs +++ b/ec/src/scalar_mul/mod.rs @@ -4,7 +4,9 @@ pub mod wnaf; pub mod fixed_base; pub mod variable_base; -use crate::Group; +use crate::PrimeGroup; +use crate::short_weierstrass::{Affine, Projective, SWCurveConfig}; +use ark_ff::{AdditiveGroup, Zero}; use ark_std::{ ops::{Add, AddAssign, Mul, Neg, Sub, SubAssign}, vec::Vec, @@ -19,8 +21,42 @@ fn ln_without_floats(a: usize) -> usize { (ark_std::log2(a) * 69 / 100) as usize } +/// Standard double-and-add method for multiplication by a scalar. +#[inline(always)] +pub fn sw_double_and_add_affine( + base: &Affine

, + scalar: impl AsRef<[u64]>, +) -> Projective

{ + let mut res = Projective::

::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { + res.double_in_place(); + if b { + res += base + } + } + + res +} + +/// Standard double-and-add method for multiplication by a scalar. +#[inline(always)] +pub fn sw_double_and_add_projective( + base: &Projective

, + scalar: impl AsRef<[u64]>, +) -> Projective

{ + let mut res = Projective::

::zero(); + for b in ark_ff::BitIteratorBE::without_leading_zeros(scalar) { + res.double_in_place(); + if b { + res += base + } + } + + res +} + pub trait ScalarMul: - Group + PrimeGroup + Add + AddAssign + for<'a> Add<&'a Self::MulBase, Output = Self> diff --git a/ec/src/scalar_mul/variable_base/mod.rs b/ec/src/scalar_mul/variable_base/mod.rs index bc4719c8f..6e485c9a3 100644 --- a/ec/src/scalar_mul/variable_base/mod.rs +++ b/ec/src/scalar_mul/variable_base/mod.rs @@ -24,7 +24,7 @@ pub trait VariableBaseMSM: ScalarMul { Self::msm_bigint(bases, &bigints) } - /// Performs multi-scalar multiplication, without checking that `bases.len() == scalars.len()`. + /// Performs multi-scalar multiplication. /// /// # Warning /// diff --git a/ec/src/scalar_mul/wnaf.rs b/ec/src/scalar_mul/wnaf.rs index d3e0b5437..0003538c5 100644 --- a/ec/src/scalar_mul/wnaf.rs +++ b/ec/src/scalar_mul/wnaf.rs @@ -1,4 +1,4 @@ -use crate::Group; +use crate::PrimeGroup; use ark_ff::{BigInteger, PrimeField}; use ark_std::vec::Vec; @@ -20,7 +20,7 @@ impl WnafContext { Self { window_size } } - pub fn table(&self, mut base: G) -> Vec { + pub fn table(&self, mut base: G) -> Vec { let mut table = Vec::with_capacity(1 << (self.window_size - 1)); let dbl = base.double(); @@ -37,7 +37,7 @@ impl WnafContext { /// multiplication; first, it uses `Self::table` to calculate an /// appropriate table of multiples of `g`, and then uses the wNAF /// algorithm to compute the scalar multiple. - pub fn mul(&self, g: G, scalar: &G::ScalarField) -> G { + pub fn mul(&self, g: G, scalar: &G::ScalarField) -> G { let table = self.table(g); self.mul_with_table(&table, scalar).unwrap() } @@ -48,7 +48,11 @@ impl WnafContext { /// `G::ScalarField`. /// /// Returns `None` if the table is too small. - pub fn mul_with_table(&self, base_table: &[G], scalar: &G::ScalarField) -> Option { + pub fn mul_with_table( + &self, + base_table: &[G], + scalar: &G::ScalarField, + ) -> Option { if 1 << (self.window_size - 1) > base_table.len() { return None; } diff --git a/ff-asm/Cargo.toml b/ff-asm/Cargo.toml index 980020941..4e2680073 100644 --- a/ff-asm/Cargo.toml +++ b/ff-asm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-ff-asm" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for generating x86-64 assembly for finite field multiplication" homepage = "https://arkworks.rs" diff --git a/ff-macros/Cargo.toml b/ff-macros/Cargo.toml index ca96782ff..2bd7dd464 100644 --- a/ff-macros/Cargo.toml +++ b/ff-macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-ff-macros" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for generating x86-64 assembly for finite field multiplication" homepage = "https://arkworks.rs" diff --git a/ff-macros/src/lib.rs b/ff-macros/src/lib.rs index d642d1a79..9d90d5840 100644 --- a/ff-macros/src/lib.rs +++ b/ff-macros/src/lib.rs @@ -131,6 +131,47 @@ fn fetch_attr(name: &str, attrs: &[syn::Attribute]) -> Option { #[test] fn test_str_to_limbs() { + use num_bigint::Sign::*; + for i in 0..100 { + for sign in [Plus, Minus] { + let number = 1i128 << i; + let signed_number = match sign { + Minus => -number, + Plus | _ => number, + }; + for base in [2, 8, 16, 10] { + let mut string = match base { + 2 => format!("{:#b}", number), + 8 => format!("{:#o}", number), + 16 => format!("{:#x}", number), + 10 => format!("{}", number), + _ => unreachable!(), + }; + if sign == Minus { + string.insert(0, '-'); + } + let (is_positive, limbs) = utils::str_to_limbs(&format!("{}", string)); + assert_eq!( + limbs[0], + format!("{}u64", signed_number.abs() as u64), + "{signed_number}, {i}" + ); + if i > 63 { + assert_eq!( + limbs[1], + format!("{}u64", (signed_number.abs() >> 64) as u64), + "{signed_number}, {i}" + ); + } + + assert_eq!(is_positive, sign == Plus); + } + } + } + let (is_positive, limbs) = utils::str_to_limbs("0"); + assert!(is_positive); + assert_eq!(&limbs, &["0u64".to_string()]); + let (is_positive, limbs) = utils::str_to_limbs("-5"); assert!(!is_positive); assert_eq!(&limbs, &["5u64".to_string()]); diff --git a/ff-macros/src/utils.rs b/ff-macros/src/utils.rs index dde73e1a9..055fbf79f 100644 --- a/ff-macros/src/utils.rs +++ b/ff-macros/src/utils.rs @@ -1,6 +1,7 @@ use std::str::FromStr; use num_bigint::{BigInt, Sign}; +use num_traits::Num; use proc_macro::TokenStream; use syn::{Expr, Lit}; @@ -26,9 +27,25 @@ pub fn str_to_limbs(num: &str) -> (bool, Vec) { } pub fn str_to_limbs_u64(num: &str) -> (bool, Vec) { - let (sign, digits) = BigInt::from_str(num) - .expect("could not parse to bigint") - .to_radix_le(16); + let is_negative = num.starts_with('-'); + let num = if is_negative { &num[1..] } else { num }; + let number = if num.starts_with("0x") || num.starts_with("0X") { + // We are in hexadecimal + BigInt::from_str_radix(&num[2..], 16) + } else if num.starts_with("0o") || num.starts_with("0O") { + // We are in octal + BigInt::from_str_radix(&num[2..], 8) + } else if num.starts_with("0b") || num.starts_with("0B") { + // We are in binary + BigInt::from_str_radix(&num[2..], 2) + } else { + // We are in decimal + BigInt::from_str(num) + } + .expect("could not parse to bigint"); + let number = if is_negative { -number } else { number }; + let (sign, digits) = number.to_radix_le(16); + let limbs = digits .chunks(16) .map(|chunk| { diff --git a/ff/Cargo.toml b/ff/Cargo.toml index 29e5bb7da..69eeeafdb 100644 --- a/ff/Cargo.toml +++ b/ff/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-ff" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for finite fields" homepage = "https://arkworks.rs" @@ -8,16 +8,16 @@ repository = "https://github.com/arkworks-rs/algebra" documentation = "https://docs.rs/ark-ff/" keywords = ["cryptography", "finite-fields" ] categories = ["cryptography"] -include = ["Cargo.toml", "build.rs", "src", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] +include = ["Cargo.toml", "build.rs", "src", "doc", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] license = "MIT/Apache-2.0" edition = "2021" rust-version = "1.63" [dependencies] -ark-ff-asm = { version = "0.4.0", path = "../ff-asm" } -ark-ff-macros = { version = "0.4.0", path = "../ff-macros" } +ark-ff-asm = { version = "0.4.2", path = "../ff-asm" } +ark-ff-macros = { version = "0.4.2", path = "../ff-macros" } ark-std = { version = "0.4.0", default-features = false } -ark-serialize = { version = "0.4.0", path = "../serialize", default-features = false } +ark-serialize = { version = "0.4.2", path = "../serialize", default-features = false } derivative = { version = "2", features = ["use_core"] } num-traits = { version = "0.2", default-features = false } paste = "1.0" @@ -28,7 +28,7 @@ digest = { version = "0.10", default-features = false, features = ["alloc"] } itertools = { version = "0.10", default-features = false } [dev-dependencies] -ark-test-curves = { version = "0.4.0", path = "../test-curves", default-features = false, features = [ "bls12_381_curve", "mnt6_753", "secp256k1"] } +ark-test-curves = { version = "0.4.2", path = "../test-curves", default-features = false, features = [ "bls12_381_curve", "mnt6_753", "secp256k1"] } blake2 = { version = "0.10", default-features = false } sha3 = { version = "0.10", default-features = false } sha2 = { version = "0.10", default-features = false } diff --git a/ff/README.md b/ff/README.md index ce246ca73..4d50a2af2 100644 --- a/ff/README.md +++ b/ff/README.md @@ -7,7 +7,7 @@

This crate defines Finite Field traits and useful abstraction models that follow these traits. -Implementations of concrete finite fields for some popular elliptic curves can be found in [`arkworks-rs/curves`](https://github.com/arkworks-rs/curves/README.md) under `arkworks-rs/curves//src/fields/`. +Implementations of concrete finite fields for some popular elliptic curves can be found in [`arkworks-rs/curves`](https://github.com/arkworks-rs/curves/blob/master/README.md) under `arkworks-rs/curves//src/fields/`. This crate contains two types of traits: @@ -16,6 +16,7 @@ This crate contains two types of traits: The available field traits are: +- [`AdditiveGroup`](/ff/src/lib.rs) - Interface for additive groups that have a "scalar multiplication" operation with respect to the `Scalar` associated type. This applies to to prime-order fields, field extensions, and elliptic-curve groups used in cryptography. - [`Field`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/mod.rs#L66) - Interface for a generic finite field. - [`FftField`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/mod.rs#L419) - Exposes methods that allow for performing efficient FFTs on field elements. - [`PrimeField`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/mod.rs#L523) - Field with a prime `p` number of elements, also referred to as `Fp`. @@ -28,7 +29,7 @@ The models implemented are: - [`Cubic Extension`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/models/cubic_extension.rs) - [`CubicExtField`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/models/cubic_extension.rs#L72) - Struct representing a cubic extension field, holds three base field elements - [`CubicExtConfig`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/models/cubic_extension.rs#L27) - Trait defining the necessary parameters needed to instantiate a Cubic Extension Field - + The above two models serve as abstractions for constructing the extension fields `Fp^m` directly (i.e. `m` equal 2 or 3) or for creating extension towers to arrive at higher `m`. The latter is done by applying the extensions iteratively, e.g. cubic extension over a quadratic extension field. - [`Fp2`](https://github.com/arkworks-rs/algebra/blob/master/ff/src/fields/models/fp2.rs#L103) - Quadratic extension directly on the prime field, i.e. `BaseField == BasePrimeField` @@ -42,14 +43,48 @@ The above two models serve as abstractions for constructing the extension fields There are two important traits when working with finite fields: [`Field`], and [`PrimeField`]. Let's explore these via examples. -### [`Field`] +### [`AdditiveGroup`][additive_group] -The [`Field`] trait provides a generic interface for any finite field. -Types implementing [`Field`] support common field operations -such as addition, subtraction, multiplication, and inverses. +The [`AdditiveGroup`][additive_group] trait provides a generic interface for additive groups that have an associated scalar multiplication operations. Types implementing this trait support common group operations such as addition, subtraction, negation, as well as scalar multiplication by the [`Scalar`][group_scalar_type] associated type. ```rust -use ark_ff::Field; +use ark_ff::AdditiveGroup; +// We'll use a field associated with the BLS12-381 pairing-friendly +// group for this example. +use ark_test_curves::bls12_381::Fq2 as F; +// `ark-std` is a utility crate that enables `arkworks` libraries +// to easily support `std` and `no_std` workloads, and also re-exports +// useful crates that should be common across the entire ecosystem, such as `rand`. +use ark_std::{One, UniformRand}; + +let mut rng = ark_std::test_rng(); +// Let's sample uniformly random field elements: +let a = F::rand(&mut rng); +let b = F::rand(&mut rng); +let c = ::Scalar::rand(&mut rng); + +// We can add... +let c = a + b; +// ... subtract ... +let d = a - b; +// ... double elements ... +assert_eq!(c + d, a.double()); +// ... negate them ... +assert_ne!(d, -d); + +// ... and multiply them by scalars: +let e = d * c; +``` + +### [`Field`][field] + +The [`Field`][field] trait provides a generic interface for any finite field. +Types implementing [`Field`][field] support common field operations +such as addition, subtraction, multiplication, and inverses, and are required +to be [`AdditiveGroup`][additive_group]s too. + +```rust +use ark_ff::{AdditiveGroup, Field}; // We'll use a field associated with the BLS12-381 pairing-friendly // group for this example. use ark_test_curves::bls12_381::Fq2 as F; @@ -63,6 +98,7 @@ let mut rng = ark_std::test_rng(); let a = F::rand(&mut rng); let b = F::rand(&mut rng); +// We can perform all the operations from the `AdditiveGroup` trait: // We can add... let c = a + b; // ... subtract ... @@ -107,10 +143,10 @@ if a.legendre().is_qr() { } ``` -### [`PrimeField`] +### [`PrimeField`][prime_field] If the field is of prime order, then users can choose -to implement the [`PrimeField`] trait for it. This provides access to the following +to implement the [`PrimeField`][prime_field] trait for it. This provides access to the following additional APIs: ```rust @@ -133,3 +169,8 @@ assert_eq!(one, num_bigint::BigUint::one()); let n = F::from_le_bytes_mod_order(&modulus.to_bytes_le()); assert_eq!(n, F::zero()); ``` + +[additive_group]: https://docs.rs/ark-ff/latest/ark_ff/fields/trait.AdditiveGroup.html +[group_scalar_type]: https://docs.rs/ark-ff/latest/ark_ff/fields/trait.AdditiveGroup.html#associatedtype.Scalar +[field]: https://docs.rs/ark-ff/latest/ark_ff/fields/trait.Field.html +[prime_field]: https://docs.rs/ark-ff/latest/ark_ff/fields/trait.PrimeField.html diff --git a/ff/src/fields/field_hashers/expander/mod.rs b/ff/src/fields/field_hashers/expander/mod.rs index 4f927aa82..8b1ef0a12 100644 --- a/ff/src/fields/field_hashers/expander/mod.rs +++ b/ff/src/fields/field_hashers/expander/mod.rs @@ -77,9 +77,9 @@ impl Expander for ExpanderXmd { let dst_prime = self.construct_dst_prime(); let z_pad: Vec = vec![0; self.block_size]; - // // Represent `len_in_bytes` as a 2-byte array. - // // As per I2OSP method outlined in https://tools.ietf.org/pdf/rfc8017.pdf, - // // The program should abort if integer that we're trying to convert is too large. + // Represent `len_in_bytes` as a 2-byte array. + // As per I2OSP method outlined in https://tools.ietf.org/pdf/rfc8017.pdf, + // The program should abort if integer that we're trying to convert is too large. assert!(n < (1 << 16), "Length should be smaller than 2^16"); let lib_str: [u8; 2] = (n as u16).to_be_bytes(); diff --git a/ff/src/fields/mod.rs b/ff/src/fields/mod.rs index 4463953dd..08e3ebc45 100644 --- a/ff/src/fields/mod.rs +++ b/ff/src/fields/mod.rs @@ -1,3 +1,5 @@ +use core::iter::Product; + use crate::UniformRand; use ark_serialize::{ CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize, @@ -42,14 +44,78 @@ use ark_std::cmp::max; #[cfg(feature = "parallel")] use rayon::prelude::*; -/// The interface for a generic field. +pub trait AdditiveGroup: + Eq + + 'static + + Sized + + CanonicalSerialize + + CanonicalDeserialize + + Copy + + Clone + + Default + + Send + + Sync + + Hash + + Debug + + Display + + UniformRand + + Zeroize + + Zero + + Neg + + Add + + Sub + + Mul<::Scalar, Output = Self> + + AddAssign + + SubAssign + + MulAssign<::Scalar> + + for<'a> Add<&'a Self, Output = Self> + + for<'a> Sub<&'a Self, Output = Self> + + for<'a> Mul<&'a ::Scalar, Output = Self> + + for<'a> AddAssign<&'a Self> + + for<'a> SubAssign<&'a Self> + + for<'a> MulAssign<&'a ::Scalar> + + for<'a> Add<&'a mut Self, Output = Self> + + for<'a> Sub<&'a mut Self, Output = Self> + + for<'a> Mul<&'a mut ::Scalar, Output = Self> + + for<'a> AddAssign<&'a mut Self> + + for<'a> SubAssign<&'a mut Self> + + for<'a> MulAssign<&'a mut ::Scalar> + + ark_std::iter::Sum + + for<'a> ark_std::iter::Sum<&'a Self> +{ + type Scalar: Field; + + /// The additive identity of the field. + const ZERO: Self; + + /// Doubles `self`. + #[must_use] + fn double(&self) -> Self { + let mut copy = *self; + copy.double_in_place(); + copy + } + /// Doubles `self` in place. + fn double_in_place(&mut self) -> &mut Self { + self.add_assign(*self); + self + } + + /// Negates `self` in place. + fn neg_in_place(&mut self) -> &mut Self { + *self = -(*self); + self + } +} + +/// The interface for a generic field. /// Types implementing [`Field`] support common field operations such as addition, subtraction, multiplication, and inverses. /// /// ## Defining your own field /// To demonstrate the various field operations, we can first define a prime ordered field $\mathbb{F}_{p}$ with $p = 17$. When defining a field $\mathbb{F}_p$, we need to provide the modulus(the $p$ in $\mathbb{F}_p$) and a generator. Recall that a generator $g \in \mathbb{F}_p$ is a field element whose powers comprise the entire field: $\mathbb{F}_p =\\{g, g^1, \ldots, g^{p-1}\\}$. /// We can then manually construct the field element associated with an integer with `Fp::from` and perform field addition, subtraction, multiplication, and inversion on it. /// ```rust -/// use ark_ff::fields::{Field, Fp64, MontBackend, MontConfig}; +/// use ark_ff::{AdditiveGroup, fields::{Field, Fp64, MontBackend, MontConfig}}; /// /// #[derive(MontConfig)] /// #[modulus = "17"] @@ -74,7 +140,7 @@ use rayon::prelude::*; /// ## Using pre-defined fields /// In the following example, we’ll use the field associated with the BLS12-381 pairing-friendly group. /// ```rust -/// use ark_ff::Field; +/// use ark_ff::{AdditiveGroup, Field}; /// use ark_test_curves::bls12_381::Fq as F; /// use ark_std::{One, UniformRand, test_rng}; /// @@ -113,33 +179,13 @@ pub trait Field: + CanonicalSerializeWithFlags + CanonicalDeserialize + CanonicalDeserializeWithFlags - + Add - + Sub - + Mul + + AdditiveGroup + Div - + AddAssign - + SubAssign - + MulAssign + DivAssign - + for<'a> Add<&'a Self, Output = Self> - + for<'a> Sub<&'a Self, Output = Self> - + for<'a> Mul<&'a Self, Output = Self> + for<'a> Div<&'a Self, Output = Self> - + for<'a> AddAssign<&'a Self> - + for<'a> SubAssign<&'a Self> - + for<'a> MulAssign<&'a Self> + for<'a> DivAssign<&'a Self> - + for<'a> Add<&'a mut Self, Output = Self> - + for<'a> Sub<&'a mut Self, Output = Self> - + for<'a> Mul<&'a mut Self, Output = Self> + for<'a> Div<&'a mut Self, Output = Self> - + for<'a> AddAssign<&'a mut Self> - + for<'a> SubAssign<&'a mut Self> - + for<'a> MulAssign<&'a mut Self> + for<'a> DivAssign<&'a mut Self> - + core::iter::Sum - + for<'a> core::iter::Sum<&'a Self> - + core::iter::Product + for<'a> core::iter::Product<&'a Self> + From + From @@ -147,6 +193,7 @@ pub trait Field: + From + From + From + + Product { type BasePrimeField: PrimeField; @@ -155,8 +202,6 @@ pub trait Field: /// Determines the algorithm for computing square roots. const SQRT_PRECOMP: Option>; - /// The additive identity of the field. - const ZERO: Self; /// The multiplicative identity of the field. const ONE: Self; @@ -186,16 +231,6 @@ pub trait Field: /// ``` fn from_base_prime_field(elem: Self::BasePrimeField) -> Self; - /// Returns `self + self`. - #[must_use] - fn double(&self) -> Self; - - /// Doubles `self` in place. - fn double_in_place(&mut self) -> &mut Self; - - /// Negates `self` in place. - fn neg_in_place(&mut self) -> &mut Self; - /// Attempt to deserialize a field element. Returns `None` if the /// deserialization fails. /// @@ -402,7 +437,10 @@ mod no_std_tests { // TODO: only Fr & FrConfig should need to be imported. // The rest of imports are caused by cargo not resolving the deps properly // from this crate and from ark_test_curves - use ark_test_curves::{batch_inversion, batch_inversion_and_mul, bls12_381::Fr, PrimeField}; + use ark_test_curves::{ + ark_ff::{batch_inversion, batch_inversion_and_mul, PrimeField}, + bls12_381::Fr, + }; #[test] fn test_batch_inversion() { @@ -457,7 +495,7 @@ mod no_std_tests { // TODO: Eventually generate all the test vector bytes via computation with the // modulus use ark_std::{rand::Rng, string::ToString}; - use ark_test_curves::BigInteger; + use ark_test_curves::ark_ff::BigInteger; use num_bigint::BigUint; let ref_modulus = BigUint::from_bytes_be(&Fr::MODULUS.to_bytes_be()); diff --git a/ff/src/fields/models/cubic_extension.rs b/ff/src/fields/models/cubic_extension.rs index ff4203f03..8369706be 100644 --- a/ff/src/fields/models/cubic_extension.rs +++ b/ff/src/fields/models/cubic_extension.rs @@ -21,7 +21,7 @@ use ark_std::rand::{ use crate::{ fields::{Field, PrimeField}, - LegendreSymbol, SqrtPrecomputation, ToConstraintField, UniformRand, + AdditiveGroup, LegendreSymbol, SqrtPrecomputation, ToConstraintField, UniformRand, }; /// Defines a Cubic extension field from a cubic non-residue. @@ -164,6 +164,32 @@ impl One for CubicExtField

{ } } +impl AdditiveGroup for CubicExtField

{ + type Scalar = Self; + + const ZERO: Self = Self::new(P::BaseField::ZERO, P::BaseField::ZERO, P::BaseField::ZERO); + + fn double(&self) -> Self { + let mut result = *self; + result.double_in_place(); + result + } + + fn double_in_place(&mut self) -> &mut Self { + self.c0.double_in_place(); + self.c1.double_in_place(); + self.c2.double_in_place(); + self + } + + fn neg_in_place(&mut self) -> &mut Self { + self.c0.neg_in_place(); + self.c1.neg_in_place(); + self.c2.neg_in_place(); + self + } +} + type BaseFieldIter

= <

::BaseField as Field>::BasePrimeFieldIter; impl Field for CubicExtField

{ type BasePrimeField = P::BasePrimeField; @@ -171,8 +197,6 @@ impl Field for CubicExtField

{ const SQRT_PRECOMP: Option> = P::SQRT_PRECOMP; - const ZERO: Self = Self::new(P::BaseField::ZERO, P::BaseField::ZERO, P::BaseField::ZERO); - const ONE: Self = Self::new(P::BaseField::ONE, P::BaseField::ZERO, P::BaseField::ZERO); fn extension_degree() -> u64 { @@ -205,26 +229,6 @@ impl Field for CubicExtField

{ )) } - fn double(&self) -> Self { - let mut result = *self; - result.double_in_place(); - result - } - - fn double_in_place(&mut self) -> &mut Self { - self.c0.double_in_place(); - self.c1.double_in_place(); - self.c2.double_in_place(); - self - } - - fn neg_in_place(&mut self) -> &mut Self { - self.c0.neg_in_place(); - self.c1.neg_in_place(); - self.c2.neg_in_place(); - self - } - #[inline] fn from_random_bytes_with_flags(bytes: &[u8]) -> Option<(Self, F)> { let split_at = bytes.len() / 3; @@ -700,9 +704,9 @@ mod cube_ext_tests { use super::*; use ark_std::test_rng; use ark_test_curves::{ + ark_ff::Field, bls12_381::{Fq, Fq2, Fq6}, mnt6_753::Fq3, - Field, }; #[test] diff --git a/ff/src/fields/models/fp/mod.rs b/ff/src/fields/models/fp/mod.rs index 7f417ed9a..1113c1222 100644 --- a/ff/src/fields/models/fp/mod.rs +++ b/ff/src/fields/models/fp/mod.rs @@ -18,7 +18,10 @@ use ark_std::{ mod montgomery_backend; pub use montgomery_backend::*; -use crate::{BigInt, BigInteger, FftField, Field, LegendreSymbol, PrimeField, SqrtPrecomputation}; +use crate::{ + AdditiveGroup, BigInt, BigInteger, FftField, Field, LegendreSymbol, PrimeField, + SqrtPrecomputation, +}; /// A trait that specifies the configuration of a prime field. /// Also specifies how to perform arithmetic on field elements. pub trait FpConfig: Send + Sync + 'static + Sized { @@ -186,12 +189,35 @@ impl, const N: usize> One for Fp { } } +impl, const N: usize> AdditiveGroup for Fp { + type Scalar = Self; + const ZERO: Self = P::ZERO; + + #[inline] + fn double(&self) -> Self { + let mut temp = *self; + temp.double_in_place(); + temp + } + + #[inline] + fn double_in_place(&mut self) -> &mut Self { + P::double_in_place(self); + self + } + + #[inline] + fn neg_in_place(&mut self) -> &mut Self { + P::neg_in_place(self); + self + } +} + impl, const N: usize> Field for Fp { type BasePrimeField = Self; type BasePrimeFieldIter = iter::Once; const SQRT_PRECOMP: Option> = P::SQRT_PRECOMP; - const ZERO: Self = P::ZERO; const ONE: Self = P::ONE; fn extension_degree() -> u64 { @@ -213,25 +239,6 @@ impl, const N: usize> Field for Fp { Some(elems[0]) } - #[inline] - fn double(&self) -> Self { - let mut temp = *self; - temp.double_in_place(); - temp - } - - #[inline] - fn double_in_place(&mut self) -> &mut Self { - P::double_in_place(self); - self - } - - #[inline] - fn neg_in_place(&mut self) -> &mut Self { - P::neg_in_place(self); - self - } - #[inline] fn characteristic() -> &'static [u64] { P::MODULUS.as_ref() diff --git a/ff/src/fields/models/fp/montgomery_backend.rs b/ff/src/fields/models/fp/montgomery_backend.rs index 4b3a80219..2caefca11 100644 --- a/ff/src/fields/models/fp/montgomery_backend.rs +++ b/ff/src/fields/models/fp/montgomery_backend.rs @@ -574,9 +574,9 @@ pub const fn sqrt_precomputation>( /// # Usage /// /// ```rust -/// # use ark_test_curves::{MontFp, One}; +/// # use ark_test_curves::MontFp; /// # use ark_test_curves::bls12_381 as ark_bls12_381; -/// # use ark_std::str::FromStr; +/// # use ark_std::{One, str::FromStr}; /// use ark_bls12_381::Fq; /// const ONE: Fq = MontFp!("1"); /// const NEG_ONE: Fq = MontFp!("-1"); diff --git a/ff/src/fields/models/fp12_2over3over2.rs b/ff/src/fields/models/fp12_2over3over2.rs index 7350a0c0c..16f0e2ba0 100644 --- a/ff/src/fields/models/fp12_2over3over2.rs +++ b/ff/src/fields/models/fp12_2over3over2.rs @@ -3,7 +3,7 @@ use ark_std::Zero; use super::quadratic_extension::*; use crate::{ fields::{fp6_3over2::*, Field, Fp2, Fp2Config as Fp2ConfigTrait}, - CyclotomicMultSubgroup, + AdditiveGroup, CyclotomicMultSubgroup, }; use core::{ marker::PhantomData, diff --git a/ff/src/fields/models/quadratic_extension.rs b/ff/src/fields/models/quadratic_extension.rs index f62c7d353..5d7f6de96 100644 --- a/ff/src/fields/models/quadratic_extension.rs +++ b/ff/src/fields/models/quadratic_extension.rs @@ -22,7 +22,7 @@ use ark_std::rand::{ use crate::{ biginteger::BigInteger, fields::{Field, LegendreSymbol, PrimeField}, - SqrtPrecomputation, ToConstraintField, UniformRand, + AdditiveGroup, SqrtPrecomputation, ToConstraintField, UniformRand, }; /// Defines a Quadratic extension field from a quadratic non-residue. @@ -193,6 +193,30 @@ impl One for QuadExtField

{ } } +impl AdditiveGroup for QuadExtField

{ + type Scalar = Self; + + const ZERO: Self = Self::new(P::BaseField::ZERO, P::BaseField::ZERO); + + fn double(&self) -> Self { + let mut result = *self; + result.double_in_place(); + result + } + + fn double_in_place(&mut self) -> &mut Self { + self.c0.double_in_place(); + self.c1.double_in_place(); + self + } + + fn neg_in_place(&mut self) -> &mut Self { + self.c0.neg_in_place(); + self.c1.neg_in_place(); + self + } +} + type BaseFieldIter

= <

::BaseField as Field>::BasePrimeFieldIter; impl Field for QuadExtField

{ type BasePrimeField = P::BasePrimeField; @@ -201,7 +225,6 @@ impl Field for QuadExtField

{ const SQRT_PRECOMP: Option> = None; - const ZERO: Self = Self::new(P::BaseField::ZERO, P::BaseField::ZERO); const ONE: Self = Self::new(P::BaseField::ONE, P::BaseField::ZERO); fn extension_degree() -> u64 { @@ -230,24 +253,6 @@ impl Field for QuadExtField

{ )) } - fn double(&self) -> Self { - let mut result = *self; - result.double_in_place(); - result - } - - fn double_in_place(&mut self) -> &mut Self { - self.c0.double_in_place(); - self.c1.double_in_place(); - self - } - - fn neg_in_place(&mut self) -> &mut Self { - self.c0.neg_in_place(); - self.c1.neg_in_place(); - self - } - fn square(&self) -> Self { let mut result = *self; result.square_in_place(); @@ -772,8 +777,8 @@ mod quad_ext_tests { use super::*; use ark_std::test_rng; use ark_test_curves::{ + ark_ff::Field, bls12_381::{Fq, Fq2}, - Field, }; #[test] diff --git a/ff/src/lib.rs b/ff/src/lib.rs index 5a1b71096..fce601ab8 100644 --- a/ff/src/lib.rs +++ b/ff/src/lib.rs @@ -18,7 +18,10 @@ extern crate derivative; #[macro_use] pub mod biginteger; -pub use self::biginteger::*; +pub use biginteger::{ + signed_mod_reduction, BigInt, BigInteger, BigInteger128, BigInteger256, BigInteger320, + BigInteger384, BigInteger448, BigInteger64, BigInteger768, BigInteger832, +}; #[macro_use] pub mod fields; diff --git a/poly/Cargo.toml b/poly/Cargo.toml index e1d414ff8..111066840 100644 --- a/poly/Cargo.toml +++ b/poly/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-poly" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for efficient polynomial arithmetic via FFTs over finite fields" homepage = "https://arkworks.rs" @@ -14,8 +14,8 @@ edition = "2021" rust-version = "1.63" [dependencies] -ark-ff = { version = "0.4.0", path = "../ff", default-features = false } -ark-serialize = { version = "0.4.0", path = "../serialize", default-features = false, features = ["derive"] } +ark-ff = { version = "0.4.2", path = "../ff", default-features = false } +ark-serialize = { version = "0.4.2", path = "../serialize", default-features = false, features = ["derive"] } ark-std = { version = "0.4.0", default-features = false } rayon = { version = "1", optional = true } derivative = { version = "2", default-features = false, features = [ "use_core" ] } diff --git a/poly/src/domain/general.rs b/poly/src/domain/general.rs index b58aefd5e..a6ef06327 100644 --- a/poly/src/domain/general.rs +++ b/poly/src/domain/general.rs @@ -22,6 +22,29 @@ use ark_std::{ /// Defines a domain over which finite field (I)FFTs can be performed. /// Generally tries to build a radix-2 domain and falls back to a mixed-radix /// domain if the radix-2 multiplicative subgroup is too small. +/// +/// # Examples +/// +/// ``` +/// use ark_poly::{GeneralEvaluationDomain, EvaluationDomain}; +/// use ark_poly::{univariate::DensePolynomial, Polynomial, DenseUVPolynomial}; +/// use ark_ff::FftField; +/// +/// // The field we are using is FFT-friendly, with 2-adicity of 32. +/// // We can efficiently evaluate polynomials over this field on up to 2^32 points. +/// use ark_test_curves::bls12_381::Fr; +/// +/// let small_domain = GeneralEvaluationDomain::::new(4).unwrap(); +/// let evals = vec![Fr::from(1u8), Fr::from(2u8), Fr::from(3u8), Fr::from(4u8)]; +/// // From a vector of evaluations, we can recover the polynomial. +/// let coeffs = small_domain.ifft(&evals); +/// let poly = DensePolynomial::from_coefficients_vec(coeffs.clone()); +/// assert_eq!(poly.degree(), 3); +/// +/// // We could also evaluate this polynomial at a large number of points efficiently, e.g. for Reed-Solomon encoding. +/// let large_domain = GeneralEvaluationDomain::::new(1<<10).unwrap(); +/// let new_evals = large_domain.fft(&coeffs); +/// ``` #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] pub enum GeneralEvaluationDomain { /// Radix-2 domain diff --git a/poly/src/polynomial/univariate/dense.rs b/poly/src/polynomial/univariate/dense.rs index d2b3f3606..345b790b3 100644 --- a/poly/src/polynomial/univariate/dense.rs +++ b/poly/src/polynomial/univariate/dense.rs @@ -353,8 +353,8 @@ impl<'a, F: Field> AddAssign<&'a DensePolynomial> for DensePolynomial { .for_each(|(a, b)| { *a += b; }); - self.truncate_leading_zeros(); } + self.truncate_leading_zeros(); } } @@ -584,7 +584,7 @@ impl<'a, 'b, F: FftField> Mul<&'a DensePolynomial> for &'b DensePolynomial if self.is_zero() || other.is_zero() { DensePolynomial::zero() } else { - let domain = GeneralEvaluationDomain::new(self.coeffs.len() + other.coeffs.len()) + let domain = GeneralEvaluationDomain::new(self.coeffs.len() + other.coeffs.len() - 1) .expect("field is not smooth enough to construct domain"); let mut self_evals = self.evaluate_over_domain_by_ref(domain); let other_evals = other.evaluate_over_domain_by_ref(domain); diff --git a/serialize-derive/Cargo.toml b/serialize-derive/Cargo.toml index 71915b17d..50182e67e 100644 --- a/serialize-derive/Cargo.toml +++ b/serialize-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-serialize-derive" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks Contributors" ] description = "A library for deriving serialization traits for the arkworks ecosystem" homepage = "https://arkworks.rs" diff --git a/serialize/Cargo.toml b/serialize/Cargo.toml index b1d4b2bce..27f1bdff2 100644 --- a/serialize/Cargo.toml +++ b/serialize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-serialize" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for serializing types in the arkworks ecosystem" homepage = "https://arkworks.rs" @@ -14,7 +14,7 @@ edition = "2021" rust-version = "1.63" [dependencies] -ark-serialize-derive = { version = "0.4.0", path = "../serialize-derive", optional = true } +ark-serialize-derive = { version = "0.4.2", path = "../serialize-derive", optional = true } ark-std = { version = "0.4.0", default-features = false } digest = { version = "0.10", default-features = false } num-bigint = { version = "0.4", default-features = false } @@ -23,7 +23,7 @@ num-bigint = { version = "0.4", default-features = false } sha2 = { version = "0.10", default-features = false} sha3 = { version = "0.10", default-features = false} blake2 = { version = "0.10", default-features = false} -ark-test-curves = { version = "0.4.0", path = "../test-curves", default-features = false, features = [ "bls12_381_curve"] } +ark-test-curves = { version = "0.4.2", path = "../test-curves", default-features = false, features = [ "bls12_381_curve"] } [features] diff --git a/serialize/README.md b/serialize/README.md index b95e6003c..d38517240 100644 --- a/serialize/README.md +++ b/serialize/README.md @@ -14,13 +14,13 @@ Most types in `arkworks-rs` implement these traits. To use `ark-serialize`, add the following to your `Cargo.toml`: ```toml -ark_serialize = "0.4" +ark-serialize = "0.4" ``` If you additionally want to derive implementations of the `CanonicalSerialize` and `CanonicalDeserialize` traits for your own types, you can enable the `derive` feature: ```toml -ark_serialize = { version = "0.4", features = ["derive"] } +ark-serialize = { version = "0.4", features = ["derive"] } ``` ### Examples diff --git a/serialize/src/impls.rs b/serialize/src/impls.rs index c0ad80e94..c71349f1b 100644 --- a/serialize/src/impls.rs +++ b/serialize/src/impls.rs @@ -507,8 +507,10 @@ impl CanonicalDeserialize for Vec { compress: Compress, validate: Validate, ) -> Result { - let len = u64::deserialize_with_mode(&mut reader, compress, validate)?; - let mut values = Vec::new(); + let len = u64::deserialize_with_mode(&mut reader, compress, validate)? + .try_into() + .map_err(|_| SerializationError::NotEnoughSpace)?; + let mut values = Vec::with_capacity(len); for _ in 0..len { values.push(T::deserialize_with_mode( &mut reader, diff --git a/serialize/src/lib.rs b/serialize/src/lib.rs index e92b693a5..1baf85a12 100644 --- a/serialize/src/lib.rs +++ b/serialize/src/lib.rs @@ -18,6 +18,9 @@ pub use ark_std::io::{Read, Write}; pub use error::*; pub use flags::*; +#[cfg(test)] +mod test; + #[cfg(feature = "derive")] #[doc(hidden)] pub use ark_serialize_derive::*; @@ -221,266 +224,3 @@ pub fn buffer_bit_byte_size(modulus_bits: usize) -> (usize, usize) { pub const fn buffer_byte_size(modulus_bits: usize) -> usize { (modulus_bits + 7) / 8 } - -#[cfg(test)] -mod test { - use super::*; - use ark_std::{ - collections::{BTreeMap, BTreeSet}, - rand::RngCore, - string::String, - vec, - vec::Vec, - }; - use num_bigint::BigUint; - - #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] - struct Dummy; - - impl CanonicalSerialize for Dummy { - #[inline] - fn serialize_with_mode( - &self, - mut writer: W, - compress: Compress, - ) -> Result<(), SerializationError> { - match compress { - Compress::Yes => 100u8.serialize_compressed(&mut writer), - Compress::No => [100u8, 200u8].serialize_compressed(&mut writer), - } - } - - fn serialized_size(&self, compress: Compress) -> usize { - match compress { - Compress::Yes => 1, - Compress::No => 2, - } - } - } - - impl Valid for Dummy { - fn check(&self) -> Result<(), SerializationError> { - Ok(()) - } - } - impl CanonicalDeserialize for Dummy { - #[inline] - fn deserialize_with_mode( - reader: R, - compress: Compress, - _validate: Validate, - ) -> Result { - match compress { - Compress::Yes => assert_eq!(u8::deserialize_compressed(reader)?, 100u8), - Compress::No => { - assert_eq!(<[u8; 2]>::deserialize_compressed(reader)?, [100u8, 200u8]) - }, - } - Ok(Dummy) - } - } - - fn test_serialize< - T: PartialEq + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize, - >( - data: T, - ) { - for compress in [Compress::Yes, Compress::No] { - for validate in [Validate::Yes, Validate::No] { - let mut serialized = vec![0; data.serialized_size(compress)]; - data.serialize_with_mode(&mut serialized[..], compress) - .unwrap(); - let de = T::deserialize_with_mode(&serialized[..], compress, validate).unwrap(); - assert_eq!(data, de); - } - } - } - - fn test_hash(data: T) { - let h1 = data.hash::(); - - let mut hash = H::new(); - let mut serialized = vec![0; data.serialized_size(Compress::Yes)]; - data.serialize_compressed(&mut serialized[..]).unwrap(); - hash.update(&serialized); - let h2 = hash.finalize(); - - assert_eq!(h1, h2); - - let h3 = data.hash_uncompressed::(); - - let mut hash = H::new(); - serialized = vec![0; data.uncompressed_size()]; - data.serialize_uncompressed(&mut serialized[..]).unwrap(); - hash.update(&serialized); - let h4 = hash.finalize(); - - assert_eq!(h3, h4); - } - - // Serialize T, randomly mutate the data, and deserialize it. - // Ensure it fails. - // Up to the caller to provide a valid mutation criterion - // to ensure that this test always fails. - // This method requires a concrete instance of the data to be provided, - // to get the serialized size. - fn ensure_non_malleable_encoding< - T: PartialEq + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize, - >( - data: T, - valid_mutation: fn(&[u8]) -> bool, - ) { - let mut r = ark_std::test_rng(); - let mut serialized = vec![0; data.compressed_size()]; - r.fill_bytes(&mut serialized); - while !valid_mutation(&serialized) { - r.fill_bytes(&mut serialized); - } - let de = T::deserialize_compressed(&serialized[..]); - assert!(de.is_err()); - - let mut serialized = vec![0; data.uncompressed_size()]; - r.fill_bytes(&mut serialized); - while !valid_mutation(&serialized) { - r.fill_bytes(&mut serialized); - } - let de = T::deserialize_uncompressed(&serialized[..]); - assert!(de.is_err()); - } - - #[test] - fn test_array() { - test_serialize([1u64, 2, 3, 4, 5]); - test_serialize([1u8; 33]); - } - - #[test] - fn test_vec() { - test_serialize(vec![1u64, 2, 3, 4, 5]); - test_serialize(Vec::::new()); - } - - #[test] - fn test_uint() { - test_serialize(192830918usize); - test_serialize(192830918u64); - test_serialize(192830918u32); - test_serialize(22313u16); - test_serialize(123u8); - } - - #[test] - fn test_string() { - test_serialize(String::from("arkworks")); - } - - #[test] - fn test_tuple() { - test_serialize(()); - test_serialize((123u64, Dummy)); - test_serialize((123u64, 234u32, Dummy)); - } - - #[test] - fn test_tuple_vec() { - test_serialize(vec![ - (Dummy, Dummy, Dummy), - (Dummy, Dummy, Dummy), - (Dummy, Dummy, Dummy), - ]); - test_serialize(vec![ - (86u8, 98u64, Dummy), - (86u8, 98u64, Dummy), - (86u8, 98u64, Dummy), - ]); - } - - #[test] - fn test_option() { - test_serialize(Some(Dummy)); - test_serialize(None::); - - test_serialize(Some(10u64)); - test_serialize(None::); - } - - #[test] - fn test_bool() { - test_serialize(true); - test_serialize(false); - - let valid_mutation = |data: &[u8]| -> bool { data.len() == 1 && data[0] > 1 }; - for _ in 0..10 { - ensure_non_malleable_encoding(true, valid_mutation); - ensure_non_malleable_encoding(false, valid_mutation); - } - } - - #[test] - fn test_btreemap() { - let mut map = BTreeMap::new(); - map.insert(0u64, Dummy); - map.insert(5u64, Dummy); - test_serialize(map); - let mut map = BTreeMap::new(); - map.insert(10u64, vec![1u8, 2u8, 3u8]); - map.insert(50u64, vec![4u8, 5u8, 6u8]); - test_serialize(map); - } - - #[test] - fn test_btreeset() { - let mut set = BTreeSet::new(); - set.insert(Dummy); - set.insert(Dummy); - test_serialize(set); - let mut set = BTreeSet::new(); - set.insert(vec![1u8, 2u8, 3u8]); - set.insert(vec![4u8, 5u8, 6u8]); - test_serialize(set); - } - - #[test] - fn test_phantomdata() { - test_serialize(core::marker::PhantomData::); - } - - #[test] - fn test_sha2() { - test_hash::<_, sha2::Sha256>(Dummy); - test_hash::<_, sha2::Sha512>(Dummy); - } - - #[test] - fn test_blake2() { - test_hash::<_, blake2::Blake2b512>(Dummy); - test_hash::<_, blake2::Blake2s256>(Dummy); - } - - #[test] - fn test_sha3() { - test_hash::<_, sha3::Sha3_256>(Dummy); - test_hash::<_, sha3::Sha3_512>(Dummy); - } - - #[test] - fn test_biguint() { - let biguint = BigUint::from(123456u64); - test_serialize(biguint.clone()); - - let mut expected = (biguint.to_bytes_le().len() as u64).to_le_bytes().to_vec(); - expected.extend_from_slice(&biguint.to_bytes_le()); - - let mut bytes = Vec::new(); - biguint - .serialize_with_mode(&mut bytes, Compress::Yes) - .unwrap(); - assert_eq!(bytes, expected); - - let mut bytes = Vec::new(); - biguint - .serialize_with_mode(&mut bytes, Compress::No) - .unwrap(); - assert_eq!(bytes, expected); - } -} diff --git a/serialize/src/test.rs b/serialize/src/test.rs new file mode 100644 index 000000000..25977357c --- /dev/null +++ b/serialize/src/test.rs @@ -0,0 +1,257 @@ +use super::*; +use ark_std::{ + collections::{BTreeMap, BTreeSet}, + rand::RngCore, + string::String, + vec, + vec::Vec, +}; +use num_bigint::BigUint; + +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] +struct Dummy; + +impl CanonicalSerialize for Dummy { + #[inline] + fn serialize_with_mode( + &self, + mut writer: W, + compress: Compress, + ) -> Result<(), SerializationError> { + match compress { + Compress::Yes => 100u8.serialize_compressed(&mut writer), + Compress::No => [100u8, 200u8].serialize_compressed(&mut writer), + } + } + + fn serialized_size(&self, compress: Compress) -> usize { + match compress { + Compress::Yes => 1, + Compress::No => 2, + } + } +} + +impl Valid for Dummy { + fn check(&self) -> Result<(), SerializationError> { + Ok(()) + } +} +impl CanonicalDeserialize for Dummy { + #[inline] + fn deserialize_with_mode( + reader: R, + compress: Compress, + _validate: Validate, + ) -> Result { + match compress { + Compress::Yes => assert_eq!(u8::deserialize_compressed(reader)?, 100u8), + Compress::No => { + assert_eq!(<[u8; 2]>::deserialize_compressed(reader)?, [100u8, 200u8]) + }, + } + Ok(Dummy) + } +} + +fn test_serialize( + data: T, +) { + for compress in [Compress::Yes, Compress::No] { + for validate in [Validate::Yes, Validate::No] { + let mut serialized = vec![0; data.serialized_size(compress)]; + data.serialize_with_mode(&mut serialized[..], compress) + .unwrap(); + let de = T::deserialize_with_mode(&serialized[..], compress, validate).unwrap(); + assert_eq!(data, de); + } + } +} + +fn test_hash(data: T) { + let h1 = data.hash::(); + + let mut hash = H::new(); + let mut serialized = vec![0; data.serialized_size(Compress::Yes)]; + data.serialize_compressed(&mut serialized[..]).unwrap(); + hash.update(&serialized); + let h2 = hash.finalize(); + + assert_eq!(h1, h2); + + let h3 = data.hash_uncompressed::(); + + let mut hash = H::new(); + serialized = vec![0; data.uncompressed_size()]; + data.serialize_uncompressed(&mut serialized[..]).unwrap(); + hash.update(&serialized); + let h4 = hash.finalize(); + + assert_eq!(h3, h4); +} + +// Serialize T, randomly mutate the data, and deserialize it. +// Ensure it fails. +// Up to the caller to provide a valid mutation criterion +// to ensure that this test always fails. +// This method requires a concrete instance of the data to be provided, +// to get the serialized size. +fn ensure_non_malleable_encoding< + T: PartialEq + core::fmt::Debug + CanonicalSerialize + CanonicalDeserialize, +>( + data: T, + valid_mutation: fn(&[u8]) -> bool, +) { + let mut r = ark_std::test_rng(); + let mut serialized = vec![0; data.compressed_size()]; + r.fill_bytes(&mut serialized); + while !valid_mutation(&serialized) { + r.fill_bytes(&mut serialized); + } + let de = T::deserialize_compressed(&serialized[..]); + assert!(de.is_err()); + + let mut serialized = vec![0; data.uncompressed_size()]; + r.fill_bytes(&mut serialized); + while !valid_mutation(&serialized) { + r.fill_bytes(&mut serialized); + } + let de = T::deserialize_uncompressed(&serialized[..]); + assert!(de.is_err()); +} + +#[test] +fn test_array() { + test_serialize([1u64, 2, 3, 4, 5]); + test_serialize([1u8; 33]); +} + +#[test] +fn test_vec() { + test_serialize(vec![1u64, 2, 3, 4, 5]); + test_serialize(Vec::::new()); +} + +#[test] +fn test_uint() { + test_serialize(192830918usize); + test_serialize(192830918u64); + test_serialize(192830918u32); + test_serialize(22313u16); + test_serialize(123u8); +} + +#[test] +fn test_string() { + test_serialize(String::from("arkworks")); +} + +#[test] +fn test_tuple() { + test_serialize(()); + test_serialize((123u64, Dummy)); + test_serialize((123u64, 234u32, Dummy)); +} + +#[test] +fn test_tuple_vec() { + test_serialize(vec![ + (Dummy, Dummy, Dummy), + (Dummy, Dummy, Dummy), + (Dummy, Dummy, Dummy), + ]); + test_serialize(vec![ + (86u8, 98u64, Dummy), + (86u8, 98u64, Dummy), + (86u8, 98u64, Dummy), + ]); +} + +#[test] +fn test_option() { + test_serialize(Some(Dummy)); + test_serialize(None::); + + test_serialize(Some(10u64)); + test_serialize(None::); +} + +#[test] +fn test_bool() { + test_serialize(true); + test_serialize(false); + + let valid_mutation = |data: &[u8]| -> bool { data.len() == 1 && data[0] > 1 }; + for _ in 0..10 { + ensure_non_malleable_encoding(true, valid_mutation); + ensure_non_malleable_encoding(false, valid_mutation); + } +} + +#[test] +fn test_btreemap() { + let mut map = BTreeMap::new(); + map.insert(0u64, Dummy); + map.insert(5u64, Dummy); + test_serialize(map); + let mut map = BTreeMap::new(); + map.insert(10u64, vec![1u8, 2u8, 3u8]); + map.insert(50u64, vec![4u8, 5u8, 6u8]); + test_serialize(map); +} + +#[test] +fn test_btreeset() { + let mut set = BTreeSet::new(); + set.insert(Dummy); + set.insert(Dummy); + test_serialize(set); + let mut set = BTreeSet::new(); + set.insert(vec![1u8, 2u8, 3u8]); + set.insert(vec![4u8, 5u8, 6u8]); + test_serialize(set); +} + +#[test] +fn test_phantomdata() { + test_serialize(core::marker::PhantomData::); +} + +#[test] +fn test_sha2() { + test_hash::<_, sha2::Sha256>(Dummy); + test_hash::<_, sha2::Sha512>(Dummy); +} + +#[test] +fn test_blake2() { + test_hash::<_, blake2::Blake2b512>(Dummy); + test_hash::<_, blake2::Blake2s256>(Dummy); +} + +#[test] +fn test_sha3() { + test_hash::<_, sha3::Sha3_256>(Dummy); + test_hash::<_, sha3::Sha3_512>(Dummy); +} + +#[test] +fn test_biguint() { + let biguint = BigUint::from(123456u64); + test_serialize(biguint.clone()); + + let mut expected = (biguint.to_bytes_le().len() as u64).to_le_bytes().to_vec(); + expected.extend_from_slice(&biguint.to_bytes_le()); + + let mut bytes = Vec::new(); + biguint + .serialize_with_mode(&mut bytes, Compress::Yes) + .unwrap(); + assert_eq!(bytes, expected); + + let mut bytes = Vec::new(); + biguint + .serialize_with_mode(&mut bytes, Compress::No) + .unwrap(); + assert_eq!(bytes, expected); +} diff --git a/test-curves/Cargo.toml b/test-curves/Cargo.toml index 01221fdfa..358fdc2ba 100644 --- a/test-curves/Cargo.toml +++ b/test-curves/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-test-curves" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for testing ark-ec & ark-poly" homepage = "https://arkworks.rs" @@ -15,13 +15,13 @@ rust-version = "1.63" [dependencies] ark-std = { version = "0.4.0", default-features = false } -ark-ff = { version = "0.4.0", path = "../ff", default-features = false } -ark-ec = { version = "0.4.0", path = "../ec", default-features = false } +ark-ff = { version = "0.4.2", path = "../ff", default-features = false } +ark-ec = { version = "0.4.2", path = "../ec", default-features = false } [dev-dependencies] -ark-serialize = { version = "0.4.0", path = "../serialize", default-features = false } -ark-algebra-test-templates = { version = "0.4.0", path = "../test-templates", default-features = false } -ark-algebra-bench-templates = { version = "0.4.0", path = "../bench-templates", default-features = false } +ark-serialize = { version = "0.4.2", path = "../serialize", default-features = false } +ark-algebra-test-templates = { version = "0.4.2", path = "../test-templates", default-features = false } +ark-algebra-bench-templates = { version = "0.4.2", path = "../bench-templates", default-features = false } [features] default = [] diff --git a/test-curves/src/bls12_381/g1.rs b/test-curves/src/bls12_381/g1.rs index f653e6f7a..ff05bb6aa 100644 --- a/test-curves/src/bls12_381/g1.rs +++ b/test-curves/src/bls12_381/g1.rs @@ -2,9 +2,10 @@ use crate::bls12_381::*; use ark_ec::{ hashing::curve_maps::wb::{IsogenyMap, WBConfig}, models::CurveConfig, + scalar_mul::glv::GLVConfig, short_weierstrass::{self, *}, }; -use ark_ff::{MontFp, Zero}; +use ark_ff::{BigInt, MontFp, PrimeField, Zero}; pub type G1Affine = Affine; pub type G1Projective = Projective; @@ -41,6 +42,11 @@ impl short_weierstrass::SWCurveConfig for Config { Self::BaseField::zero() } + fn mul_projective(p: &G1Projective, scalar: &[u64]) -> G1Projective { + let s = Self::ScalarField::from_sign_and_limbs(true, scalar); + GLVConfig::glv_mul_projective(*p, s) + } + #[inline] fn clear_cofactor(p: &G1Affine) -> G1Affine { // Using the effective cofactor, as explained in @@ -71,6 +77,38 @@ pub const G1_GENERATOR_X: Fq = MontFp!("3685416753713387016781088315183077757961 #[rustfmt::skip] pub const G1_GENERATOR_Y: Fq = MontFp!("1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569"); +impl GLVConfig for Config { + const ENDO_COEFFS: &'static[Self::BaseField] = &[ + MontFp!("793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350") + ]; + + const LAMBDA: Self::ScalarField = + MontFp!("52435875175126190479447740508185965837461563690374988244538805122978187051009"); + + /// Optimal decomposition as per Ch. 6.3.2: Decompositions for the k = 12 BLS Family, + /// from Guide to Pairing Based Cryptography by El Mrabet + const SCALAR_DECOMP_COEFFS: [(bool, ::BigInt); 4] = [ + // v_2 = (X^2, 1) + (true, BigInt!("228988810152649578064853576960394133504")), + (true, BigInt!("1")), + // v_1 = (-1, X^2-1) + (false, BigInt!("1")), + (true, BigInt!("228988810152649578064853576960394133503")), + ]; + + fn endomorphism(p: &Projective) -> Projective { + let mut res = *p; + res.x *= Self::ENDO_COEFFS[0]; + res + } + + fn endomorphism_affine(p: &Affine) -> Affine { + let mut res = *p; + res.x *= Self::ENDO_COEFFS[0]; + res + } +} + #[cfg(test)] mod test { use super::*; diff --git a/test-curves/src/bls12_381/g2.rs b/test-curves/src/bls12_381/g2.rs index f4c970910..4b0065eb1 100644 --- a/test-curves/src/bls12_381/g2.rs +++ b/test-curves/src/bls12_381/g2.rs @@ -6,9 +6,9 @@ use ark_ec::{ hashing::curve_maps::wb::{IsogenyMap, WBConfig}, models::CurveConfig, short_weierstrass::{self, *}, - AffineRepr, CurveGroup, Group, + AffineRepr, CurveGroup, PrimeGroup, }; -use ark_ff::{BigInt, Field, MontFp, Zero}; +use ark_ff::{AdditiveGroup, BigInt, Field, MontFp, Zero}; pub type G2Affine = bls12::G2Affine; pub type G2Projective = bls12::G2Projective; diff --git a/test-curves/src/bls12_381/g2_swu_iso.rs b/test-curves/src/bls12_381/g2_swu_iso.rs index 0790cf99f..988e30ef1 100644 --- a/test-curves/src/bls12_381/g2_swu_iso.rs +++ b/test-curves/src/bls12_381/g2_swu_iso.rs @@ -80,27 +80,27 @@ pub const ISOGENY_MAP_TO_G2 : IsogenyMap<'_, SwuIsoConfig, g2::Config> = Isogen x_map_numerator: &[ Fq2::new( MontFp!("889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235542"), - MontFp!("889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235542")), + MontFp!("889424345604814976315064405719089812568196182208668418962679585805340366775741747653930584250892369786198727235542")), Fq2::new( MontFp!("0"), MontFp!("2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706522")), Fq2::new( MontFp!("2668273036814444928945193217157269437704588546626005256888038757416021100327225242961791752752677109358596181706526"), - MontFp!("1334136518407222464472596608578634718852294273313002628444019378708010550163612621480895876376338554679298090853261")), + MontFp!("1334136518407222464472596608578634718852294273313002628444019378708010550163612621480895876376338554679298090853261")), Fq2::new( - MontFp!("3557697382419259905260257622876359250272784728834673675850718343221361467102966990615722337003569479144794908942033"), + MontFp!("3557697382419259905260257622876359250272784728834673675850718343221361467102966990615722337003569479144794908942033"), MontFp!("0")), ], x_map_denominator: &[ Fq2::new( - MontFp!("0"), - MontFp!("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559715")), + MontFp!("0"), + MontFp!("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559715")), Fq2::new( - MontFp!("12"), - MontFp!("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559775")), + MontFp!("12"), + MontFp!("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559775")), Fq2::new( - MontFp!("1"), + MontFp!("1"), MontFp!("0")), ], diff --git a/test-curves/src/bls12_381/tests.rs b/test-curves/src/bls12_381/tests.rs index 863be4afc..edd460be3 100644 --- a/test-curves/src/bls12_381/tests.rs +++ b/test-curves/src/bls12_381/tests.rs @@ -17,6 +17,8 @@ test_group!(g2; G2Projective; sw); #[cfg(feature = "bls12_381_curve")] test_group!(pairing_output; ark_ec::pairing::PairingOutput; msm); #[cfg(feature = "bls12_381_curve")] +test_group!(glv; G1Projective; glv); +#[cfg(feature = "bls12_381_curve")] test_pairing!(pairing; crate::bls12_381::Bls12_381); #[cfg(feature = "bls12_381_curve")] test_h2c!(g1_h2c; "./src/testdata"; "BLS12381G1"; crate::bls12_381::g1::Config; crate::bls12_381::Fq; crate::bls12_381::Fq; 1); diff --git a/test-curves/src/bn384_small_two_adicity/tests.rs b/test-curves/src/bn384_small_two_adicity/tests.rs index 1313324c4..a5a835553 100644 --- a/test-curves/src/bn384_small_two_adicity/tests.rs +++ b/test-curves/src/bn384_small_two_adicity/tests.rs @@ -1,5 +1,7 @@ #![allow(unused_imports)] -use ark_ec::{models::short_weierstrass::SWCurveConfig, pairing::Pairing, AffineRepr, CurveGroup}; +use ark_ec::{ + models::short_weierstrass::SWCurveConfig, pairing::Pairing, AffineRepr, CurveGroup, PrimeGroup, +}; use ark_ff::{Field, One, UniformRand, Zero}; use ark_std::{rand::Rng, test_rng}; diff --git a/test-curves/src/lib.rs b/test-curves/src/lib.rs index 388864139..9337089fa 100644 --- a/test-curves/src/lib.rs +++ b/test-curves/src/lib.rs @@ -1,15 +1,15 @@ #![no_std] -extern crate ark_ff; -pub use ark_ff::*; +pub use ark_ff; +pub use ark_ff::{fields::models::*, FftField, Field, LegendreSymbol, MontFp, PrimeField}; -extern crate ark_ec; +pub use ark_ec; pub use ark_ec::*; #[cfg(any(feature = "bls12_381_scalar_field", feature = "bls12_381_curve"))] pub mod bls12_381; -#[cfg(feature = "ed_on_bls12_381")] +#[cfg(any(feature = "bls12_381_scalar_field", feature = "ed_on_bls12_381"))] pub mod ed_on_bls12_381; #[cfg(feature = "mnt6_753")] diff --git a/test-curves/src/secp256k1/g1.rs b/test-curves/src/secp256k1/g1.rs index 9e7e4de24..1613b481f 100644 --- a/test-curves/src/secp256k1/g1.rs +++ b/test-curves/src/secp256k1/g1.rs @@ -1,6 +1,6 @@ use crate::secp256k1::{Fq, Fr}; use ark_ec::{models::CurveConfig, short_weierstrass::*}; -use ark_ff::{Field, MontFp, Zero}; +use ark_ff::{AdditiveGroup, Field, MontFp, Zero}; pub type G1Affine = Affine; pub type G1Projective = Projective; diff --git a/test-templates/Cargo.toml b/test-templates/Cargo.toml index be273039e..ccd12fecd 100644 --- a/test-templates/Cargo.toml +++ b/test-templates/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-algebra-test-templates" -version = "0.4.0" +version = "0.4.2" authors = [ "arkworks contributors" ] description = "A library for tests for finite fields, elliptic curves, and pairings" homepage = "https://arkworks.rs" @@ -15,9 +15,9 @@ rust-version = "1.63" [dependencies] ark-std = { version = "0.4.0", default-features = false } -ark-serialize = { version = "0.4.0", path = "../serialize", default-features = false } -ark-ff = { version = "0.4.0", path = "../ff", default-features = false } -ark-ec = { version = "0.4.0", path = "../ec", default-features = false } +ark-serialize = { version = "0.4.2", path = "../serialize", default-features = false } +ark-ff = { version = "0.4.2", path = "../ff", default-features = false } +ark-ec = { version = "0.4.2", path = "../ec", default-features = false } num-bigint = { version = "0.4", default-features = false } num-integer = { version = "0.1", default-features = false } num-traits = { version = "0.2", default-features = false } diff --git a/test-templates/src/fields.rs b/test-templates/src/fields.rs index 15bb18005..568de6abf 100644 --- a/test-templates/src/fields.rs +++ b/test-templates/src/fields.rs @@ -2,6 +2,7 @@ #![allow(clippy::eq_op)] use ark_std::rand::Rng; + #[derive(Default, Clone, Copy, Debug)] pub struct DummyFlags; @@ -141,6 +142,8 @@ macro_rules! __test_field { #[test] fn test_add_properties() { use ark_std::UniformRand; + use ark_ff::AdditiveGroup; + let mut rng = test_rng(); let zero = <$field>::zero(); assert_eq!(-zero, zero); @@ -258,9 +261,9 @@ macro_rules! __test_field { assert_eq!(a * (b + c), a * b + a * c, "Distributivity failed"); assert_eq!(b * (a + c), b * a + b * c, "Distributivity failed"); assert_eq!(c * (a + b), c * a + c * b, "Distributivity failed"); - assert_eq!((a + b).square(), a.square() + b.square() + a * b.double(), "Distributivity for square failed"); - assert_eq!((b + c).square(), c.square() + b.square() + c * b.double(), "Distributivity for square failed"); - assert_eq!((c + a).square(), a.square() + c.square() + a * c.double(), "Distributivity for square failed"); + assert_eq!((a + b).square(), a.square() + b.square() + a * ark_ff::AdditiveGroup::double(&b), "Distributivity for square failed"); + assert_eq!((b + c).square(), c.square() + b.square() + c * ark_ff::AdditiveGroup::double(&b), "Distributivity for square failed"); + assert_eq!((c + a).square(), a.square() + c.square() + a * ark_ff::AdditiveGroup::double(&c), "Distributivity for square failed"); } } @@ -389,7 +392,8 @@ macro_rules! __test_field { #[test] fn test_sum_of_products_edge_case() { - use ark_ff::BigInteger; + use ark_ff::{AdditiveGroup, BigInteger}; + let mut a_max = <$field>::ZERO.into_bigint(); for (i, limb) in a_max.as_mut().iter_mut().enumerate() { if i == <$field as PrimeField>::BigInt::NUM_LIMBS - 1 { diff --git a/test-templates/src/glv.rs b/test-templates/src/glv.rs new file mode 100644 index 000000000..866594faf --- /dev/null +++ b/test-templates/src/glv.rs @@ -0,0 +1,67 @@ +use std::ops::Mul; + +use ark_ec::{ + scalar_mul::{glv::GLVConfig, sw_double_and_add_affine, sw_double_and_add_projective}, + short_weierstrass::{Affine, Projective}, + AffineRepr, CurveGroup, PrimeGroup, +}; +use ark_ff::PrimeField; +use ark_std::UniformRand; + +pub fn glv_scalar_decomposition() { + let mut rng = ark_std::test_rng(); + for _i in 0..100 { + let k = P::ScalarField::rand(&mut rng); + + let ((is_k1_positive, k1), (is_k2_positive, k2)) = +

::scalar_decomposition(k); + + if is_k1_positive && is_k2_positive { + assert_eq!(k1 + k2 * P::LAMBDA, k); + } + if is_k1_positive && !is_k2_positive { + assert_eq!(k1 - k2 * P::LAMBDA, k); + } + if !is_k1_positive && is_k2_positive { + assert_eq!(-k1 + k2 * P::LAMBDA, k); + } + if !is_k1_positive && !is_k2_positive { + assert_eq!(-k1 - k2 * P::LAMBDA, k); + } + // could be nice to check if k1 and k2 are indeed small. + } +} + +pub fn glv_endomorphism_eigenvalue() { + let g = Projective::

::generator(); + let endo_g =

::endomorphism(&g); + assert_eq!(endo_g, g.mul(P::LAMBDA)); +} + +pub fn glv_projective() { + // check that glv_mul indeed computes the scalar multiplication + let mut rng = ark_std::test_rng(); + + let g = Projective::

::generator(); + for _i in 0..100 { + let k = P::ScalarField::rand(&mut rng); + + let k_g =

::glv_mul_projective(g, k); + let k_g_2 = sw_double_and_add_projective(&g, &k.into_bigint()); + assert_eq!(k_g, k_g_2); + } +} + +pub fn glv_affine() { + // check that glv_mul indeed computes the scalar multiplication + let mut rng = ark_std::test_rng(); + + let g = Affine::

::generator(); + for _i in 0..100 { + let k = P::ScalarField::rand(&mut rng); + + let k_g =

::glv_mul_affine(g, k); + let k_g_2 = sw_double_and_add_affine(&g, &k.into_bigint()).into_affine(); + assert_eq!(k_g, k_g_2); + } +} diff --git a/test-templates/src/groups.rs b/test-templates/src/groups.rs index b223443d2..a17634956 100644 --- a/test-templates/src/groups.rs +++ b/test-templates/src/groups.rs @@ -2,7 +2,7 @@ #[doc(hidden)] macro_rules! __test_group { ($group: ty) => { - type ScalarField = <$group as Group>::ScalarField; + type ScalarField = <$group as PrimeGroup>::ScalarField; #[test] fn test_add_properties() { let mut rng = &mut ark_std::test_rng(); @@ -122,6 +122,8 @@ macro_rules! __test_group { let mut rng = ark_std::test_rng(); + // Test that serializing and deserializing random elements + // works. for _ in 0..ITERATIONS { let a = <$group>::rand(&mut rng); { @@ -133,29 +135,37 @@ macro_rules! __test_group { let b = <$group>::deserialize_with_mode(&mut cursor, compress, validate).unwrap(); assert_eq!(a, b); } + } - { - let a = <$group>::zero(); - let mut serialized = vec![0; buf_size]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_with_mode(&mut cursor, compress).unwrap(); - let mut cursor = Cursor::new(&serialized[..]); - let b = <$group>::deserialize_with_mode(&mut cursor, compress, validate).unwrap(); - assert_eq!(a, b); - } + // Test that serializing and deserializing the identity element + // works. + { + let a = <$group>::zero(); + let mut serialized = vec![0; buf_size]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_with_mode(&mut cursor, compress).unwrap(); + let mut cursor = Cursor::new(&serialized[..]); + let b = <$group>::deserialize_with_mode(&mut cursor, compress, validate).unwrap(); + assert_eq!(a, b); + } - { - let a = <$group>::zero(); - let mut serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&mut serialized[..]); - a.serialize_with_mode(&mut cursor, compress).unwrap_err(); - } + // Test that serializing the identity point into a buffer that + // is not big enough will yield an error. + { + let a = <$group>::zero(); + let mut serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&mut serialized[..]); + a.serialize_with_mode(&mut cursor, compress).unwrap_err(); + } - { - let serialized = vec![0; buf_size - 1]; - let mut cursor = Cursor::new(&serialized[..]); - <$group>::deserialize_with_mode(&mut cursor, compress, validate).unwrap_err(); - } + // Test that deserializing from a buffer that is not big enough + // will yield and error. + // This test does not explicitly check that the error is due to + // a buffer that is not big enough. + { + let serialized = vec![0; buf_size - 1]; + let mut cursor = Cursor::new(&serialized[..]); + <$group>::deserialize_with_mode(&mut cursor, compress, validate).unwrap_err(); } } } @@ -357,6 +367,27 @@ macro_rules! __test_group { assert_eq!(a, ::COEFF_A); assert_eq!(b, ::COEFF_B); } + }; + ($group:ty; glv) => { + type Config = <$group as CurveGroup>::Config; + + #[test] + fn test_scalar_decomposition() + { + $crate::glv::glv_scalar_decomposition::(); + } + + + #[test] + fn test_endomorphism_eigenvalue() { + $crate::glv::glv_endomorphism_eigenvalue::(); + } + + #[test] + fn test_glv_mul() { + $crate::glv::glv_projective::(); + $crate::glv::glv_affine::(); + } } } @@ -366,7 +397,7 @@ macro_rules! test_group { mod $mod_name { use super::*; use ark_ff::*; - use ark_ec::{Group, CurveGroup, ScalarMul, AffineRepr, CurveConfig, short_weierstrass::SWCurveConfig, twisted_edwards::TECurveConfig, scalar_mul::{*, wnaf::*}}; + use ark_ec::{PrimeGroup, CurveGroup, ScalarMul, AffineRepr, CurveConfig, short_weierstrass::SWCurveConfig, twisted_edwards::TECurveConfig, scalar_mul::{*, wnaf::*}}; use ark_serialize::*; use ark_std::{io::Cursor, rand::Rng, vec::Vec, test_rng, vec, Zero, One, UniformRand}; const ITERATIONS: usize = 500; diff --git a/test-templates/src/lib.rs b/test-templates/src/lib.rs index 37fa548fb..185058935 100644 --- a/test-templates/src/lib.rs +++ b/test-templates/src/lib.rs @@ -2,6 +2,7 @@ pub mod groups; #[macro_use] pub mod fields; +pub mod glv; pub mod msm; #[macro_use] pub mod pairing; diff --git a/test-templates/src/pairing.rs b/test-templates/src/pairing.rs index 8386969ca..ec4fd4e3e 100644 --- a/test-templates/src/pairing.rs +++ b/test-templates/src/pairing.rs @@ -3,7 +3,7 @@ macro_rules! test_pairing { ($mod_name: ident; $Pairing: ty) => { mod $mod_name { pub const ITERATIONS: usize = 100; - use ark_ec::{pairing::*, CurveGroup, Group}; + use ark_ec::{pairing::*, CurveGroup, PrimeGroup}; use ark_ff::{Field, PrimeField}; use ark_std::{test_rng, One, UniformRand, Zero}; #[test]