diff --git a/stwo_cairo_verifier/src/fields/cm31.cairo b/stwo_cairo_verifier/src/fields/cm31.cairo index fef2aa65..0daef42f 100644 --- a/stwo_cairo_verifier/src/fields/cm31.cairo +++ b/stwo_cairo_verifier/src/fields/cm31.cairo @@ -1,5 +1,6 @@ use core::num::traits::{One, Zero}; -use super::m31::{M31, m31, M31Trait}; +use core::ops::{AddAssign, MulAssign, SubAssign}; +use super::m31::{M31, M31Impl, m31, M31Trait}; #[derive(Copy, Drop, Debug, PartialEq)] pub struct CM31 { @@ -14,34 +15,104 @@ pub impl CM31Impl of CM31Trait { let denom_inverse: M31 = (self.a * self.a + self.b * self.b).inverse(); CM31 { a: self.a * denom_inverse, b: -self.b * denom_inverse } } + + /// Computes all `1/arr[i]` with a single call to `inverse()` using Montgomery batch inversion. + fn batch_inverse(arr: Array) -> Array { + // Construct array `1, z, zy, ..., zy..b`. + let mut prefix_product_rev = array![]; + let mut cumulative_product: CM31 = One::one(); + + let mut i = arr.len(); + while i != 0 { + i -= 1; + prefix_product_rev.append(cumulative_product); + cumulative_product *= *arr[i]; + }; + + // Compute `1/zy..a`. + let mut cumulative_product_inv = cumulative_product.inverse(); + // Compute all `1/a = zy..b/zy..a, 1/b = zy..c/zy..b, ...`. + let mut inverses = array![]; + + let mut i = prefix_product_rev.len(); + for v in arr { + i -= 1; + inverses.append(cumulative_product_inv * *prefix_product_rev[i]); + cumulative_product_inv *= v; + }; + + inverses + } + + // TODO(andrew): When associated types are supported, support `Mul`. + #[inline] + fn mul_m31(self: CM31, rhs: M31) -> CM31 { + CM31 { a: self.a * rhs, b: self.b * rhs } + } + + // TODO(andrew): When associated types are supported, support `Sub`. + #[inline] + fn sub_m31(self: CM31, rhs: M31) -> CM31 { + CM31 { a: self.a - rhs, b: self.b } + } } pub impl CM31Add of core::traits::Add { + #[inline] fn add(lhs: CM31, rhs: CM31) -> CM31 { CM31 { a: lhs.a + rhs.a, b: lhs.b + rhs.b } } } + pub impl CM31Sub of core::traits::Sub { + #[inline] fn sub(lhs: CM31, rhs: CM31) -> CM31 { CM31 { a: lhs.a - rhs.a, b: lhs.b - rhs.b } } } + pub impl CM31Mul of core::traits::Mul { + #[inline] fn mul(lhs: CM31, rhs: CM31) -> CM31 { CM31 { a: lhs.a * rhs.a - lhs.b * rhs.b, b: lhs.a * rhs.b + lhs.b * rhs.a } } } + +pub impl CM31AddAssign of AddAssign { + #[inline] + fn add_assign(ref self: CM31, rhs: CM31) { + self = self + rhs + } +} + +pub impl CM31SubAssign of SubAssign { + #[inline] + fn sub_assign(ref self: CM31, rhs: CM31) { + self = self - rhs + } +} + +pub impl CM31MulAssign of MulAssign { + #[inline] + fn mul_assign(ref self: CM31, rhs: CM31) { + self = self * rhs + } +} + pub impl CM31Zero of Zero { fn zero() -> CM31 { cm31(0, 0) } + fn is_zero(self: @CM31) -> bool { (*self).a.is_zero() && (*self).b.is_zero() } + fn is_non_zero(self: @CM31) -> bool { (*self).a.is_non_zero() || (*self).b.is_non_zero() } } + pub impl CM31One of One { fn one() -> CM31 { cm31(1, 0) @@ -53,17 +124,28 @@ pub impl CM31One of One { (*self).a.is_non_one() || (*self).b.is_non_zero() } } + pub impl M31IntoCM31 of core::traits::Into { + #[inline] fn into(self: M31) -> CM31 { CM31 { a: self, b: m31(0) } } } + pub impl CM31Neg of Neg { + #[inline] fn neg(a: CM31) -> CM31 { CM31 { a: -a.a, b: -a.b } } } +impl CM31PartialOrd of PartialOrd { + fn lt(lhs: CM31, rhs: CM31) -> bool { + lhs.a < rhs.a || (lhs.a == rhs.a && lhs.b < rhs.b) + } +} + +#[inline] pub fn cm31(a: u32, b: u32) -> CM31 { CM31 { a: m31(a), b: m31(b) } } diff --git a/stwo_cairo_verifier/src/fields/m31.cairo b/stwo_cairo_verifier/src/fields/m31.cairo index b01ba972..a3f819c5 100644 --- a/stwo_cairo_verifier/src/fields/m31.cairo +++ b/stwo_cairo_verifier/src/fields/m31.cairo @@ -1,11 +1,20 @@ use core::num::traits::{WideMul, CheckedSub}; +use core::ops::{AddAssign, MulAssign, SubAssign}; use core::option::OptionTrait; use core::traits::TryInto; +/// Equals `2^31 - 1`. pub const P: u32 = 0x7fffffff; + +/// Equals `2^31 - 1`. const P32NZ: NonZero = 0x7fffffff; + +/// Equals `2^31 - 1`. const P64NZ: NonZero = 0x7fffffff; +/// Equals `2^31 - 1`. +const P128NZ: NonZero = 0x7fffffff; + #[derive(Copy, Drop, Debug, PartialEq)] pub struct M31 { pub inner: u32 @@ -13,16 +22,24 @@ pub struct M31 { #[generate_trait] pub impl M31Impl of M31Trait { + #[inline] fn reduce_u32(val: u32) -> M31 { let (_, res) = core::integer::u32_safe_divmod(val, P32NZ); M31 { inner: res.try_into().unwrap() } } + #[inline] fn reduce_u64(val: u64) -> M31 { let (_, res) = core::integer::u64_safe_divmod(val, P64NZ); M31 { inner: res.try_into().unwrap() } } + #[inline] + fn reduce_u128(val: u128) -> M31 { + let (_, res) = core::integer::u128_safe_divmod(val, P128NZ); + M31 { inner: res.try_into().unwrap() } + } + #[inline] fn sqn(v: M31, n: usize) -> M31 { if n == 0 { @@ -43,45 +60,81 @@ pub impl M31Impl of M31Trait { } } pub impl M31Add of core::traits::Add { + #[inline] fn add(lhs: M31, rhs: M31) -> M31 { let res = lhs.inner + rhs.inner; let res = res.checked_sub(P).unwrap_or(res); M31 { inner: res } } } + pub impl M31Sub of core::traits::Sub { + #[inline] fn sub(lhs: M31, rhs: M31) -> M31 { lhs + (-rhs) } } + pub impl M31Mul of core::traits::Mul { + #[inline] fn mul(lhs: M31, rhs: M31) -> M31 { M31Impl::reduce_u64(lhs.inner.wide_mul(rhs.inner)) } } + +pub impl M31AddAssign of AddAssign { + #[inline] + fn add_assign(ref self: M31, rhs: M31) { + self = self + rhs + } +} + +pub impl M31SubAssign of SubAssign { + #[inline] + fn sub_assign(ref self: M31, rhs: M31) { + self = self - rhs + } +} + +pub impl M31MulAssign of MulAssign { + #[inline] + fn mul_assign(ref self: M31, rhs: M31) { + self = self * rhs + } +} + pub impl M31Zero of core::num::traits::Zero { + #[inline] fn zero() -> M31 { M31 { inner: 0 } } + fn is_zero(self: @M31) -> bool { *self.inner == 0 } + fn is_non_zero(self: @M31) -> bool { *self.inner != 0 } } + pub impl M31One of core::num::traits::One { + #[inline] fn one() -> M31 { M31 { inner: 1 } } + fn is_one(self: @M31) -> bool { *self.inner == 1 } + fn is_non_one(self: @M31) -> bool { *self.inner != 1 } } + pub impl M31Neg of Neg { + #[inline] fn neg(a: M31) -> M31 { if a.inner == 0 { M31 { inner: 0 } @@ -90,16 +143,55 @@ pub impl M31Neg of Neg { } } } + impl M31IntoFelt252 of Into { + #[inline] fn into(self: M31) -> felt252 { self.inner.into() } } +impl M31PartialOrd of PartialOrd { + fn ge(lhs: M31, rhs: M31) -> bool { + lhs.inner >= rhs.inner + } + + fn lt(lhs: M31, rhs: M31) -> bool { + lhs.inner < rhs.inner + } +} + +#[inline] pub fn m31(val: u32) -> M31 { M31Impl::reduce_u32(val) } +#[derive(Copy, Drop, Debug)] +pub struct UnreducedM31 { + pub inner: felt252, +} + +pub impl UnreducedM31Sub of Sub { + #[inline] + fn sub(lhs: UnreducedM31, rhs: UnreducedM31) -> UnreducedM31 { + UnreducedM31 { inner: lhs.inner - rhs.inner } + } +} + +pub impl UnreducedM31Add of Add { + #[inline] + fn add(lhs: UnreducedM31, rhs: UnreducedM31) -> UnreducedM31 { + UnreducedM31 { inner: lhs.inner + rhs.inner } + } +} + +impl M31IntoUnreducedM31 of Into { + #[inline] + fn into(self: M31) -> UnreducedM31 { + UnreducedM31 { inner: self.inner.into() } + } +} + #[cfg(test)] mod tests { use super::{m31, P, M31Trait}; diff --git a/stwo_cairo_verifier/src/fields/qm31.cairo b/stwo_cairo_verifier/src/fields/qm31.cairo index f8eed1ff..549259ea 100644 --- a/stwo_cairo_verifier/src/fields/qm31.cairo +++ b/stwo_cairo_verifier/src/fields/qm31.cairo @@ -1,7 +1,8 @@ use core::num::traits::one::One; use core::num::traits::zero::Zero; +use core::ops::{AddAssign, MulAssign, SubAssign}; use super::cm31::{CM31, cm31, CM31Trait}; -use super::m31::{M31, M31Impl}; +use super::m31::{M31, M31Impl, UnreducedM31}; /// Equals `(2^31 - 1)^4`. pub const P4: u128 = 21267647892944572736998860269687930881; @@ -36,6 +37,7 @@ pub impl QM31Impl of QM31Trait { QM31 { a: self.a * denom_inverse, b: -self.b * denom_inverse } } + #[inline] fn mul_m31(self: QM31, multiplier: M31) -> QM31 { QM31 { a: CM31 { a: self.a.a * multiplier, b: self.a.b * multiplier }, @@ -43,75 +45,389 @@ pub impl QM31Impl of QM31Trait { } } + // TODO(andrew): When associated types are supported, support `Mul`. + #[inline] + fn mul_cm31(self: QM31, rhs: CM31) -> QM31 { + QM31 { a: self.a * rhs, b: self.b * rhs } + } + fn complex_conjugate(self: QM31) -> QM31 { QM31 { a: self.a, b: -self.b } } + + /// Returns a fused multiply-subtract i.e. returns `a * b - c`. + #[inline] + fn fms(a: QM31, b: QM31, c: QM31) -> QM31 { + let res: UnreducedQM31 = a.into() * b.into() - c.into(); + QM31 { + a: CM31 { + a: M31Impl::reduce_u128(res.a.try_into().unwrap()), + b: M31Impl::reduce_u128(res.b.try_into().unwrap()) + }, + b: CM31 { + a: M31Impl::reduce_u128(res.c.try_into().unwrap()), + b: M31Impl::reduce_u128(res.d.try_into().unwrap()) + } + } + } + + /// Returns a fused multiply-add i.e. returns `a * b + c`. + #[inline] + fn fma(a: QM31, b: QM31, c: QM31) -> QM31 { + let a: UnreducedQM31 = a.into(); + let b: UnreducedQM31 = b.into(); + let res = a * b + c.into(); + QM31 { + a: CM31 { + a: M31Impl::reduce_u128(res.a.try_into().unwrap()), + b: M31Impl::reduce_u128(res.b.try_into().unwrap()) + }, + b: CM31 { + a: M31Impl::reduce_u128(res.c.try_into().unwrap()), + b: M31Impl::reduce_u128(res.d.try_into().unwrap()) + } + } + } } pub impl QM31Add of core::traits::Add { + #[inline] fn add(lhs: QM31, rhs: QM31) -> QM31 { QM31 { a: lhs.a + rhs.a, b: lhs.b + rhs.b } } } + pub impl QM31Sub of core::traits::Sub { fn sub(lhs: QM31, rhs: QM31) -> QM31 { QM31 { a: lhs.a - rhs.a, b: lhs.b - rhs.b } } } + pub impl QM31Mul of core::traits::Mul { + #[inline] fn mul(lhs: QM31, rhs: QM31) -> QM31 { // (a + bu) * (c + du) = (ac + rbd) + (ad + bc)u. QM31 { a: lhs.a * rhs.a + R * lhs.b * rhs.b, b: lhs.a * rhs.b + lhs.b * rhs.a } } } + +pub impl QM31AddAssign of AddAssign { + #[inline] + fn add_assign(ref self: QM31, rhs: QM31) { + self = self + rhs + } +} + +pub impl QM31SubAssign of SubAssign { + #[inline] + fn sub_assign(ref self: QM31, rhs: QM31) { + self = self - rhs + } +} + +pub impl QM31MulAssign of MulAssign { + #[inline] + fn mul_assign(ref self: QM31, rhs: QM31) { + self = self * rhs + } +} + pub impl QM31Zero of Zero { + #[inline] fn zero() -> QM31 { QM31 { a: Zero::zero(), b: Zero::zero() } } + fn is_zero(self: @QM31) -> bool { (*self).a.is_zero() && (*self).b.is_zero() } + fn is_non_zero(self: @QM31) -> bool { (*self).a.is_non_zero() || (*self).b.is_non_zero() } } + pub impl QM31One of One { + #[inline] fn one() -> QM31 { QM31 { a: One::one(), b: Zero::zero() } } + fn is_one(self: @QM31) -> bool { (*self).a.is_one() && (*self).b.is_zero() } + fn is_non_one(self: @QM31) -> bool { (*self).a.is_non_one() || (*self).b.is_non_zero() } } + pub impl M31IntoQM31 of core::traits::Into { + #[inline] fn into(self: M31) -> QM31 { QM31 { a: self.into(), b: Zero::zero() } } } + pub impl CM31IntoQM31 of core::traits::Into { + #[inline] fn into(self: CM31) -> QM31 { QM31 { a: self, b: Zero::zero() } } } + pub impl QM31Neg of Neg { + #[inline] fn neg(a: QM31) -> QM31 { QM31 { a: -a.a, b: -a.b } } } +impl QM31PartialOrd of PartialOrd { + #[inline] + fn lt(lhs: QM31, rhs: QM31) -> bool { + lhs.a < rhs.a || (lhs.a == rhs.a && lhs.b < rhs.b) + } +} + +#[inline] pub fn qm31(a: u32, b: u32, c: u32, d: u32) -> QM31 { QM31 { a: cm31(a, b), b: cm31(c, d) } } +/// Stores an unreduced [`QM31`] with each coordinate stored as a `felt252`. +#[derive(Copy, Drop, Debug)] +struct UnreducedQM31 { + pub a: felt252, + pub b: felt252, + pub c: felt252, + pub d: felt252, +} + +#[generate_trait] +impl UnreducedQM31Impl of UnreducedQM31Trait { + #[inline] + fn reduce(self: UnreducedQM31) -> QM31 { + QM31 { + a: CM31 { + a: M31Impl::reduce_u128(self.a.try_into().unwrap()), + b: M31Impl::reduce_u128(self.b.try_into().unwrap()) + }, + b: CM31 { + a: M31Impl::reduce_u128(self.c.try_into().unwrap()), + b: M31Impl::reduce_u128(self.d.try_into().unwrap()) + }, + } + } +} + +impl UnreducedQM31Sub of Sub { + #[inline] + fn sub(lhs: UnreducedQM31, rhs: UnreducedQM31) -> UnreducedQM31 { + UnreducedQM31 { a: lhs.a - rhs.a, b: lhs.b - rhs.b, c: lhs.c - rhs.c, d: lhs.d - rhs.d } + } +} + +impl UnreducedQM31Add of Add { + #[inline] + fn add(lhs: UnreducedQM31, rhs: UnreducedQM31) -> UnreducedQM31 { + UnreducedQM31 { a: lhs.a + rhs.a, b: lhs.b + rhs.b, c: lhs.c + rhs.c, d: lhs.d + rhs.d } + } +} + +impl UnreducedQM31Mul of Mul { + /// Returns `lhs * rhs`. Assumes input coordinates are in the range `[0, P)`. + /// + /// Output coordinates are returned in the range `[P * P, P * P * 13)`. + // TODO(andrew): May be net worse performance doing unreduced arithmetic due to all felt252 + // multiplications (which are expensive for the M31 prover to simulate). Measure overall + // prove+verify performance differences with unreduced felt252 vs reduced u32. If prover + // performance is an issue consider Karatsuba. + #[inline] + fn mul(lhs: UnreducedQM31, rhs: UnreducedQM31) -> UnreducedQM31 { + /// Equals `P * P * 16`. + const PP16: felt252 = 0x3fffffff000000010; + + // `lhs` 1st CM31 coordinate. + let lhs_aa = lhs.a; + let lhs_ab = lhs.b; + + // `lhs` 2nd CM31 coordinate. + let lhs_ba = lhs.c; + let lhs_bb = lhs.d; + + // `rhs` 1st CM31 coordinate. + let rhs_aa = rhs.a; + let rhs_ab = rhs.b; + + // `rhs` 2nd CM31 coordinate. + let rhs_ba = rhs.c; + let rhs_bb = rhs.d; + + // lhs.a * rhs.a + let (aa_t_ba_a, aa_t_ba_b) = { + let res_a = lhs_aa * rhs_aa - lhs_ab * rhs_ab; + let res_b = lhs_aa * rhs_ab + lhs_ab * rhs_aa; + (res_a, res_b) + }; + + // R * lhs.b * rhs.b + let (r_t_ab_t_bb_a, r_t_ab_t_bb_b) = { + let res_a = lhs_ba * rhs_ba - lhs_bb * rhs_bb; + let res_b = lhs_ba * rhs_bb + lhs_bb * rhs_ba; + (res_a + res_a - res_b, res_a + res_b + res_b) + }; + + // lhs.a * rhs.b + let (aa_t_bb_a, aa_t_bb_b) = { + let res_a = lhs_aa * rhs_ba - lhs_ab * rhs_bb; + let res_b = lhs_aa * rhs_bb + lhs_ab * rhs_ba; + (res_a, res_b) + }; + + // lhs.b * rhs.a + let (ab_t_ba_a, ab_t_ba_b) = { + let res_a = lhs_ba * rhs_aa - lhs_bb * rhs_ab; + let res_b = lhs_ba * rhs_ab + lhs_bb * rhs_aa; + (res_a, res_b) + }; + + UnreducedQM31 { + a: PP16 + aa_t_ba_a + r_t_ab_t_bb_a, + b: PP16 + aa_t_ba_b + r_t_ab_t_bb_b, + c: PP16 + aa_t_bb_a + ab_t_ba_a, + d: PP16 + aa_t_bb_b + ab_t_ba_b + } + } +} + +impl QM31IntoUnreducedQM31 of Into { + #[inline] + fn into(self: QM31) -> UnreducedQM31 { + UnreducedQM31 { + a: self.a.a.inner.into(), + b: self.a.b.inner.into(), + c: self.b.a.inner.into(), + d: self.b.b.inner.into() + } + } +} + +/// Stores an unreduced [`QM31`] packed into two `felt252`. +// TODO: Determine if performance difference between UnreducedQM31 and PackedUnreducedQM31 is worth +// keeping around both types. +#[derive(Copy, Drop, Debug)] +pub struct PackedUnreducedQM31 { + pub a: PackedUnreducedCM31, + pub b: PackedUnreducedCM31, +} + +#[generate_trait] +pub impl PackedUnreducedQM31Impl of PackedUnreducedQM31Trait { + #[inline] + fn mul_m31(self: PackedUnreducedQM31, rhs: UnreducedM31) -> PackedUnreducedQM31 { + PackedUnreducedQM31 { a: self.a.mul_m31(rhs), b: self.b.mul_m31(rhs) } + } + + /// Returns a zero element with each coordinate set to `P*P*P`. + #[inline] + fn large_zero() -> PackedUnreducedQM31 { + PackedUnreducedQM31 { + a: PackedUnreducedCM31Impl::large_zero(), b: PackedUnreducedCM31Impl::large_zero(), + } + } + + #[inline] + fn reduce(self: PackedUnreducedQM31) -> QM31 { + QM31 { a: self.a.reduce(), b: self.b.reduce() } + } +} + +pub impl PackedUnreducedQM31AddAssign of AddAssign { + #[inline] + fn add_assign(ref self: PackedUnreducedQM31, rhs: PackedUnreducedQM31) { + self = self + rhs + } +} + +pub impl PackedUnreducedQM31Add of Add { + #[inline] + fn add(lhs: PackedUnreducedQM31, rhs: PackedUnreducedQM31) -> PackedUnreducedQM31 { + PackedUnreducedQM31 { a: lhs.a + rhs.a, b: lhs.b + rhs.b } + } +} + +pub impl PackedUnreducedQM31Sub of Sub { + #[inline] + fn sub(lhs: PackedUnreducedQM31, rhs: PackedUnreducedQM31) -> PackedUnreducedQM31 { + PackedUnreducedQM31 { a: lhs.a - rhs.a, b: lhs.b - rhs.b } + } +} + +pub impl QM31IntoPackedUnreducedQM31 of Into { + #[inline] + fn into(self: QM31) -> PackedUnreducedQM31 { + PackedUnreducedQM31 { a: self.a.into(), b: self.b.into() } + } +} + +/// An unreduced [`CM31`] packed into a single `felt252`. +#[derive(Copy, Drop, Debug)] +pub struct PackedUnreducedCM31 { + /// Stores a 128 bit and 124 bit unreduced M31 packed into a felt252 i.e. `a + (b << 128)`. + pub inner: felt252, +} + +#[generate_trait] +pub impl PackedUnreducedCM31Impl of PackedUnreducedCM31Trait { + #[inline] + fn mul_m31(self: PackedUnreducedCM31, rhs: UnreducedM31) -> PackedUnreducedCM31 { + PackedUnreducedCM31 { inner: self.inner * rhs.inner } + } + + /// Returns a zero element with each coordinate set to `P*P*P`. + #[inline] + fn large_zero() -> PackedUnreducedCM31 { + // Stores `P*P*P + (P*P*P << 128)`. + const PPP_PPP: felt252 = 0x1fffffff400000017fffffff000000001fffffff400000017fffffff; + PackedUnreducedCM31 { inner: PPP_PPP } + } + + #[inline] + fn reduce(self: PackedUnreducedCM31) -> CM31 { + let u256 { low: a, high: b } = self.inner.into(); + CM31 { a: M31Impl::reduce_u128(a), b: M31Impl::reduce_u128(b) } + } +} + +pub impl PackedUnreducedCM31Add of Add { + #[inline] + fn add(lhs: PackedUnreducedCM31, rhs: PackedUnreducedCM31) -> PackedUnreducedCM31 { + PackedUnreducedCM31 { inner: lhs.inner + rhs.inner } + } +} + +pub impl PackedUnreducedCM31Sub of Sub { + #[inline] + fn sub(lhs: PackedUnreducedCM31, rhs: PackedUnreducedCM31) -> PackedUnreducedCM31 { + PackedUnreducedCM31 { inner: lhs.inner - rhs.inner } + } +} + +pub impl CM31IntoPackedUnreducedCM31 of Into { + #[inline] + fn into(self: CM31) -> PackedUnreducedCM31 { + const POW2_128: felt252 = 0x100000000000000000000000000000000; + let a_felt: felt252 = self.a.into(); + let b_felt: felt252 = self.b.into(); + PackedUnreducedCM31 { inner: a_felt + b_felt * POW2_128 } + } +} #[cfg(test)] mod tests { - use super::CM31; use super::super::m31::{m31, P, M31Trait}; - use super::{QM31, qm31, QM31Trait, QM31Impl}; + use super::{QM31, qm31, QM31Trait, QM31Impl, UnreducedQM31, UnreducedQM31Impl, QM31IntoPackedUnreducedQM31, PackedUnreducedQM31Impl}; #[test] fn test_QM31() { @@ -132,4 +448,26 @@ mod tests { assert_eq!(qm1 * m.inverse().into(), qm1 * qm.inverse()); assert_eq!(qm1.mul_m31(m), qm1 * m.into()); } + + #[test] + fn test_unreduced_qm31() { + let a = qm31(P - 4, P - 90, 958, P - 1); + let b = qm31(P - 183, 75, P - 921, P - 6124); + + let res_unreduced: UnreducedQM31 = a.into() * b.into(); + let res = res_unreduced.reduce(); + + assert_eq!(res, a * b); + } + + #[test] + fn test_packed_unreduced_qm31() { + let a = qm31(P - 4, P - 90, 958, P - 1); + let b = m31(P - 183); + + let res_unreduced = QM31IntoPackedUnreducedQM31::into(a).mul_m31(b.into()); + let res = res_unreduced.reduce(); + + assert_eq!(res, a.mul_m31(b)); + } }