diff --git a/mlkem/src/indcpa.c b/mlkem/src/indcpa.c index 0b4315e01d..ba34c75a13 100644 --- a/mlkem/src/indcpa.c +++ b/mlkem/src/indcpa.c @@ -39,6 +39,8 @@ #define mlk_pack_ciphertext MLK_ADD_PARAM_SET(mlk_pack_ciphertext) #define mlk_unpack_ciphertext MLK_ADD_PARAM_SET(mlk_unpack_ciphertext) #define mlk_matvec_mul MLK_ADD_PARAM_SET(mlk_matvec_mul) +#define mlk_polyvec_permute_bitrev_to_custom \ + MLK_ADD_PARAM_SET(mlk_polyvec_permute_bitrev_to_custom) #define mlk_polymat_permute_bitrev_to_custom \ MLK_ADD_PARAM_SET(mlk_polymat_permute_bitrev_to_custom) /* End of parameter set namespacing */ @@ -59,10 +61,11 @@ * Implements @[FIPS203, Algorithm 13 (K-PKE.KeyGen), L19] * **************************************************/ -static void mlk_pack_pk(uint8_t r[MLKEM_INDCPA_PUBLICKEYBYTES], mlk_polyvec pk, +static void mlk_pack_pk(uint8_t r[MLKEM_INDCPA_PUBLICKEYBYTES], + const mlk_polyvec *pk, const uint8_t seed[MLKEM_SYMBYTES]) { - mlk_assert_bound_2d(pk, MLKEM_K, MLKEM_N, 0, MLKEM_Q); + mlk_assert_bound_2d(pk->vec, MLKEM_K, MLKEM_N, 0, MLKEM_Q); mlk_polyvec_tobytes(r, pk); mlk_memcpy(r + MLKEM_POLYVECBYTES, seed, MLKEM_SYMBYTES); } @@ -83,7 +86,7 @@ static void mlk_pack_pk(uint8_t r[MLKEM_INDCPA_PUBLICKEYBYTES], mlk_polyvec pk, * Implements @[FIPS203, Algorithm 14 (K-PKE.Encrypt), L2-3] * **************************************************/ -static void mlk_unpack_pk(mlk_polyvec pk, uint8_t seed[MLKEM_SYMBYTES], +static void mlk_unpack_pk(mlk_polyvec *pk, uint8_t seed[MLKEM_SYMBYTES], const uint8_t packedpk[MLKEM_INDCPA_PUBLICKEYBYTES]) { mlk_polyvec_frombytes(pk, packedpk); @@ -108,9 +111,10 @@ static void mlk_unpack_pk(mlk_polyvec pk, uint8_t seed[MLKEM_SYMBYTES], * Implements @[FIPS203, Algorithm 13 (K-PKE.KeyGen), L20] * **************************************************/ -static void mlk_pack_sk(uint8_t r[MLKEM_INDCPA_SECRETKEYBYTES], mlk_polyvec sk) +static void mlk_pack_sk(uint8_t r[MLKEM_INDCPA_SECRETKEYBYTES], + const mlk_polyvec *sk) { - mlk_assert_bound_2d(sk, MLKEM_K, MLKEM_N, 0, MLKEM_Q); + mlk_assert_bound_2d(sk->vec, MLKEM_K, MLKEM_N, 0, MLKEM_Q); mlk_polyvec_tobytes(r, sk); } @@ -128,7 +132,7 @@ static void mlk_pack_sk(uint8_t r[MLKEM_INDCPA_SECRETKEYBYTES], mlk_polyvec sk) * Implements @[FIPS203, Algorithm 15 (K-PKE.Decrypt), L5] * **************************************************/ -static void mlk_unpack_sk(mlk_polyvec sk, +static void mlk_unpack_sk(mlk_polyvec *sk, const uint8_t packedsk[MLKEM_INDCPA_SECRETKEYBYTES]) { mlk_polyvec_frombytes(sk, packedsk); @@ -149,8 +153,8 @@ static void mlk_unpack_sk(mlk_polyvec sk, * Implements @[FIPS203, Algorithm 14 (K-PKE.Encrypt), L22-23] * **************************************************/ -static void mlk_pack_ciphertext(uint8_t r[MLKEM_INDCPA_BYTES], mlk_polyvec b, - mlk_poly *v) +static void mlk_pack_ciphertext(uint8_t r[MLKEM_INDCPA_BYTES], + const mlk_polyvec *b, mlk_poly *v) { mlk_polyvec_compress_du(r, b); mlk_poly_compress_dv(r + MLKEM_POLYVECCOMPRESSEDBYTES_DU, v); @@ -170,7 +174,7 @@ static void mlk_pack_ciphertext(uint8_t r[MLKEM_INDCPA_BYTES], mlk_polyvec b, * Implements @[FIPS203, Algorithm 15 (K-PKE.Decrypt), L1-4] * **************************************************/ -static void mlk_unpack_ciphertext(mlk_polyvec b, mlk_poly *v, +static void mlk_unpack_ciphertext(mlk_polyvec *b, mlk_poly *v, const uint8_t c[MLKEM_INDCPA_BYTES]) { mlk_polyvec_decompress_du(b, c); @@ -183,35 +187,57 @@ static void mlk_unpack_ciphertext(mlk_polyvec b, mlk_poly *v, * * We don't inline this into gen_matrix to avoid having to split the CBMC * proof for gen_matrix based on MLK_USE_NATIVE_NTT_CUSTOM_ORDER. */ -static void mlk_polymat_permute_bitrev_to_custom(mlk_polymat a) +static void mlk_polyvec_permute_bitrev_to_custom(mlk_polyvec *v) __contract__( /* We don't specify that this should be a permutation, but only * that it does not change the bound established at the end of mlk_gen_matrix. */ - requires(memory_no_alias(a, sizeof(mlk_polymat))) - requires(forall(x, 0, MLKEM_K * MLKEM_K, - array_bound(a[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) - assigns(object_whole(a)) - ensures(forall(x, 0, MLKEM_K * MLKEM_K, - array_bound(a[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q)))) + requires(memory_no_alias(v, sizeof(mlk_polyvec))) + requires(forall(x, 0, MLKEM_K, + array_bound(v->vec[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) + assigns(memory_slice(v, sizeof(mlk_polyvec))) + ensures(forall(x, 0, MLKEM_K, + array_bound(v->vec[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q)))) { #if defined(MLK_USE_NATIVE_NTT_CUSTOM_ORDER) unsigned i; - for (i = 0; i < MLKEM_K * MLKEM_K; i++) + for (i = 0; i < MLKEM_K; i++) __loop__( - assigns(i, object_whole(a)) - invariant(i <= MLKEM_K * MLKEM_K) - invariant(forall(x, 0, MLKEM_K * MLKEM_K, - array_bound(a[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) - ) + assigns(i, memory_slice(v, sizeof(mlk_polyvec))) + invariant(i <= MLKEM_K) + invariant(forall(x, 0, MLKEM_K, + array_bound(v->vec[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q)))) { - mlk_poly_permute_bitrev_to_custom(a[i].coeffs); + mlk_poly_permute_bitrev_to_custom(v->vec[i].coeffs); } #else /* MLK_USE_NATIVE_NTT_CUSTOM_ORDER */ /* Nothing to do */ - (void)a; + (void)v; #endif /* !MLK_USE_NATIVE_NTT_CUSTOM_ORDER */ } +static void mlk_polymat_permute_bitrev_to_custom(mlk_polymat *a) +__contract__( + /* We don't specify that this should be a permutation, but only + * that it does not change the bound established at the end of mlk_gen_matrix. */ + requires(memory_no_alias(a, sizeof(mlk_polymat))) + requires(forall(x, 0, MLKEM_K, forall(y, 0, MLKEM_K, + array_bound(a->vec[x].vec[y].coeffs, 0, MLKEM_N, 0, MLKEM_Q)))) + assigns(object_whole(a)) + ensures(forall(x, 0, MLKEM_K, forall(y, 0, MLKEM_K, + array_bound(a->vec[x].vec[y].coeffs, 0, MLKEM_N, 0, MLKEM_Q))))) +{ + unsigned i; + for (i = 0; i < MLKEM_K; i++) + __loop__( + assigns(i, object_whole(a)) + invariant(i <= MLKEM_K) + invariant(forall(x, 0, MLKEM_K, forall(y, 0, MLKEM_K, + array_bound(a->vec[x].vec[y].coeffs, 0, MLKEM_N, 0, MLKEM_Q))))) + { + mlk_polyvec_permute_bitrev_to_custom(&a->vec[i]); + } +} + /* Reference: `gen_matrix()` in the reference implementation @[REF]. * - We use a special subroutine to generate 4 polynomials * at a time, to be able to leverage batched Keccak-f1600 @@ -220,7 +246,7 @@ __contract__( * * Not static for benchmarking */ MLK_INTERNAL_API -void mlk_gen_matrix(mlk_polymat a, const uint8_t seed[MLKEM_SYMBYTES], +void mlk_gen_matrix(mlk_polymat *a, const uint8_t seed[MLKEM_SYMBYTES], int transposed) { unsigned i, j; @@ -253,7 +279,11 @@ void mlk_gen_matrix(mlk_polymat a, const uint8_t seed[MLKEM_SYMBYTES], } } - mlk_poly_rej_uniform_x4(&a[i], &a[i + 1], &a[i + 2], &a[i + 3], seed_ext); + mlk_poly_rej_uniform_x4(&a->vec[i / MLKEM_K].vec[i % MLKEM_K], + &a->vec[(i + 1) / MLKEM_K].vec[(i + 1) % MLKEM_K], + &a->vec[(i + 2) / MLKEM_K].vec[(i + 2) % MLKEM_K], + &a->vec[(i + 3) / MLKEM_K].vec[(i + 3) % MLKEM_K], + seed_ext); } #else /* !MLK_CONFIG_SERIAL_FIPS202_ONLY */ /* When using serial FIPS202, sample all entries individually. */ @@ -281,7 +311,7 @@ void mlk_gen_matrix(mlk_polymat a, const uint8_t seed[MLKEM_SYMBYTES], seed_ext[0][MLKEM_SYMBYTES + 1] = x; } - mlk_poly_rej_uniform(&a[i], seed_ext[0]); + mlk_poly_rej_uniform(&a->vec[i / MLKEM_K].vec[i % MLKEM_K], seed_ext[0]); } mlk_assert(i == MLKEM_K * MLKEM_K); @@ -314,15 +344,16 @@ void mlk_gen_matrix(mlk_polymat a, const uint8_t seed[MLKEM_SYMBYTES], * Specification: Implements @[FIPS203, Section 2.4.7, Eq (2.12), (2.13)] * **************************************************/ -static void mlk_matvec_mul(mlk_polyvec out, const mlk_polymat a, - const mlk_polyvec v, const mlk_polyvec_mulcache vc) +static void mlk_matvec_mul(mlk_polyvec *out, const mlk_polymat *a, + const mlk_polyvec *v, const mlk_polyvec_mulcache *vc) __contract__( requires(memory_no_alias(out, sizeof(mlk_polyvec))) requires(memory_no_alias(a, sizeof(mlk_polymat))) requires(memory_no_alias(v, sizeof(mlk_polyvec))) requires(memory_no_alias(vc, sizeof(mlk_polyvec_mulcache))) - requires(forall(k0, 0, MLKEM_K * MLKEM_K, - array_bound(a[k0].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) + requires(forall(k0, 0, MLKEM_K, + forall(k1, 0, MLKEM_K, + array_bound(a->vec[k0].vec[k1].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT)))) assigns(object_whole(out))) { unsigned i; @@ -331,7 +362,7 @@ __contract__( assigns(i, object_whole(out)) invariant(i <= MLKEM_K)) { - mlk_polyvec_basemul_acc_montgomery_cached(&out[i], &a[MLKEM_K * i], v, vc); + mlk_polyvec_basemul_acc_montgomery_cached(&out->vec[i], &a->vec[i], v, vc); } } @@ -370,47 +401,49 @@ void mlk_indcpa_keypair_derand(uint8_t pk[MLKEM_INDCPA_PUBLICKEYBYTES], */ MLK_CT_TESTING_DECLASSIFY(publicseed, MLKEM_SYMBYTES); - mlk_gen_matrix(a, publicseed, 0 /* no transpose */); + mlk_gen_matrix(&a, publicseed, 0 /* no transpose */); #if MLKEM_K == 2 - mlk_poly_getnoise_eta1_4x(&skpv[0], &skpv[1], &e[0], &e[1], noiseseed, 0, 1, - 2, 3); + mlk_poly_getnoise_eta1_4x(&skpv.vec[0], &skpv.vec[1], &e.vec[0], &e.vec[1], + noiseseed, 0, 1, 2, 3); #elif MLKEM_K == 3 /* * Only the first three output buffers are needed. * The laster parameter is a dummy that's overwritten later. */ - mlk_poly_getnoise_eta1_4x(&skpv[0], &skpv[1], &skpv[2], - &pkpv[0] /* irrelevant */, noiseseed, 0, 1, 2, + mlk_poly_getnoise_eta1_4x(&skpv.vec[0], &skpv.vec[1], &skpv.vec[2], + &pkpv.vec[0] /* irrelevant */, noiseseed, 0, 1, 2, 0xFF /* irrelevant */); /* Same here */ - mlk_poly_getnoise_eta1_4x(&e[0], &e[1], &e[2], &pkpv[0] /* irrelevant */, - noiseseed, 3, 4, 5, 0xFF /* irrelevant */); + mlk_poly_getnoise_eta1_4x(&e.vec[0], &e.vec[1], &e.vec[2], + &pkpv.vec[0] /* irrelevant */, noiseseed, 3, 4, 5, + 0xFF /* irrelevant */); #elif MLKEM_K == 4 - mlk_poly_getnoise_eta1_4x(&skpv[0], &skpv[1], &skpv[2], &skpv[3], noiseseed, - 0, 1, 2, 3); - mlk_poly_getnoise_eta1_4x(&e[0], &e[1], &e[2], &e[3], noiseseed, 4, 5, 6, 7); -#endif + mlk_poly_getnoise_eta1_4x(&skpv.vec[0], &skpv.vec[1], &skpv.vec[2], + &skpv.vec[3], noiseseed, 0, 1, 2, 3); + mlk_poly_getnoise_eta1_4x(&e.vec[0], &e.vec[1], &e.vec[2], &e.vec[3], + noiseseed, 4, 5, 6, 7); +#endif /* MLKEM_K == 4 */ - mlk_polyvec_ntt(skpv); - mlk_polyvec_ntt(e); + mlk_polyvec_ntt(&skpv); + mlk_polyvec_ntt(&e); - mlk_polyvec_mulcache_compute(skpv_cache, skpv); - mlk_matvec_mul(pkpv, a, skpv, skpv_cache); - mlk_polyvec_tomont(pkpv); + mlk_polyvec_mulcache_compute(&skpv_cache, &skpv); + mlk_matvec_mul(&pkpv, &a, &skpv, &skpv_cache); + mlk_polyvec_tomont(&pkpv); - mlk_polyvec_add(pkpv, e); - mlk_polyvec_reduce(pkpv); - mlk_polyvec_reduce(skpv); + mlk_polyvec_add(&pkpv, &e); + mlk_polyvec_reduce(&pkpv); + mlk_polyvec_reduce(&skpv); - mlk_pack_sk(sk, skpv); - mlk_pack_pk(pk, pkpv, publicseed); + mlk_pack_sk(sk, &skpv); + mlk_pack_pk(pk, &pkpv, publicseed); /* Specification: Partially implements * @[FIPS203, Section 3.3, Destruction of intermediate values] */ mlk_zeroize(buf, sizeof(buf)); mlk_zeroize(coins_with_domain_separator, sizeof(coins_with_domain_separator)); - mlk_zeroize(a, sizeof(a)); + mlk_zeroize(&a, sizeof(a)); mlk_zeroize(&e, sizeof(e)); mlk_zeroize(&skpv, sizeof(skpv)); mlk_zeroize(&skpv_cache, sizeof(skpv_cache)); @@ -436,7 +469,7 @@ void mlk_indcpa_enc(uint8_t c[MLKEM_INDCPA_BYTES], mlk_poly v, k, epp; mlk_polyvec_mulcache sp_cache; - mlk_unpack_pk(pkpv, seed, pk); + mlk_unpack_pk(&pkpv, seed, pk); mlk_poly_frommsg(&k, m); /* @@ -447,44 +480,47 @@ void mlk_indcpa_enc(uint8_t c[MLKEM_INDCPA_BYTES], */ MLK_CT_TESTING_DECLASSIFY(seed, MLKEM_SYMBYTES); - mlk_gen_matrix(at, seed, 1 /* transpose */); + mlk_gen_matrix(&at, seed, 1 /* transpose */); #if MLKEM_K == 2 - mlk_poly_getnoise_eta1122_4x(&sp[0], &sp[1], &ep[0], &ep[1], coins, 0, 1, 2, - 3); + mlk_poly_getnoise_eta1122_4x(&sp.vec[0], &sp.vec[1], &ep.vec[0], &ep.vec[1], + coins, 0, 1, 2, 3); mlk_poly_getnoise_eta2(&epp, coins, 4); #elif MLKEM_K == 3 /* * In this call, only the first three output buffers are needed. * The last parameter is a dummy that's overwritten later. */ - mlk_poly_getnoise_eta1_4x(&sp[0], &sp[1], &sp[2], &b[0], coins, 0, 1, 2, - 0xFF); + mlk_poly_getnoise_eta1_4x(&sp.vec[0], &sp.vec[1], &sp.vec[2], &b.vec[0], + coins, 0, 1, 2, 0xFF); /* The fourth output buffer in this call _is_ used. */ - mlk_poly_getnoise_eta2_4x(&ep[0], &ep[1], &ep[2], &epp, coins, 3, 4, 5, 6); + mlk_poly_getnoise_eta2_4x(&ep.vec[0], &ep.vec[1], &ep.vec[2], &epp, coins, 3, + 4, 5, 6); #elif MLKEM_K == 4 - mlk_poly_getnoise_eta1_4x(&sp[0], &sp[1], &sp[2], &sp[3], coins, 0, 1, 2, 3); - mlk_poly_getnoise_eta2_4x(&ep[0], &ep[1], &ep[2], &ep[3], coins, 4, 5, 6, 7); + mlk_poly_getnoise_eta1_4x(&sp.vec[0], &sp.vec[1], &sp.vec[2], &sp.vec[3], + coins, 0, 1, 2, 3); + mlk_poly_getnoise_eta2_4x(&ep.vec[0], &ep.vec[1], &ep.vec[2], &ep.vec[3], + coins, 4, 5, 6, 7); mlk_poly_getnoise_eta2(&epp, coins, 8); -#endif +#endif /* MLKEM_K == 4 */ - mlk_polyvec_ntt(sp); + mlk_polyvec_ntt(&sp); - mlk_polyvec_mulcache_compute(sp_cache, sp); - mlk_matvec_mul(b, at, sp, sp_cache); - mlk_polyvec_basemul_acc_montgomery_cached(&v, pkpv, sp, sp_cache); + mlk_polyvec_mulcache_compute(&sp_cache, &sp); + mlk_matvec_mul(&b, &at, &sp, &sp_cache); + mlk_polyvec_basemul_acc_montgomery_cached(&v, &pkpv, &sp, &sp_cache); - mlk_polyvec_invntt_tomont(b); + mlk_polyvec_invntt_tomont(&b); mlk_poly_invntt_tomont(&v); - mlk_polyvec_add(b, ep); + mlk_polyvec_add(&b, &ep); mlk_poly_add(&v, &epp); mlk_poly_add(&v, &k); - mlk_polyvec_reduce(b); + mlk_polyvec_reduce(&b); mlk_poly_reduce(&v); - mlk_pack_ciphertext(c, b, &v); + mlk_pack_ciphertext(c, &b, &v); /* Specification: Partially implements * @[FIPS203, Section 3.3, Destruction of intermediate values] */ @@ -493,7 +529,7 @@ void mlk_indcpa_enc(uint8_t c[MLKEM_INDCPA_BYTES], mlk_zeroize(&sp_cache, sizeof(sp_cache)); mlk_zeroize(&b, sizeof(b)); mlk_zeroize(&v, sizeof(v)); - mlk_zeroize(at, sizeof(at)); + mlk_zeroize(&at, sizeof(at)); mlk_zeroize(&k, sizeof(k)); mlk_zeroize(&ep, sizeof(ep)); mlk_zeroize(&epp, sizeof(epp)); @@ -511,12 +547,12 @@ void mlk_indcpa_dec(uint8_t m[MLKEM_INDCPA_MSGBYTES], mlk_poly v, sb; mlk_polyvec_mulcache b_cache; - mlk_unpack_ciphertext(b, &v, c); - mlk_unpack_sk(skpv, sk); + mlk_unpack_ciphertext(&b, &v, c); + mlk_unpack_sk(&skpv, sk); - mlk_polyvec_ntt(b); - mlk_polyvec_mulcache_compute(b_cache, b); - mlk_polyvec_basemul_acc_montgomery_cached(&sb, skpv, b, b_cache); + mlk_polyvec_ntt(&b); + mlk_polyvec_mulcache_compute(&b_cache, &b); + mlk_polyvec_basemul_acc_montgomery_cached(&sb, &skpv, &b, &b_cache); mlk_poly_invntt_tomont(&sb); mlk_poly_sub(&v, &sb); @@ -542,4 +578,5 @@ void mlk_indcpa_dec(uint8_t m[MLKEM_INDCPA_MSGBYTES], #undef mlk_pack_ciphertext #undef mlk_unpack_ciphertext #undef mlk_matvec_mul +#undef mlk_polyvec_permute_bitrev_to_custom #undef mlk_polymat_permute_bitrev_to_custom diff --git a/mlkem/src/indcpa.h b/mlkem/src/indcpa.h index 4c44d0d411..dcc2ac7d13 100644 --- a/mlkem/src/indcpa.h +++ b/mlkem/src/indcpa.h @@ -39,15 +39,15 @@ * **************************************************/ MLK_INTERNAL_API -void mlk_gen_matrix(mlk_polymat a, const uint8_t seed[MLKEM_SYMBYTES], +void mlk_gen_matrix(mlk_polymat *a, const uint8_t seed[MLKEM_SYMBYTES], int transposed) __contract__( requires(memory_no_alias(a, sizeof(mlk_polymat))) requires(memory_no_alias(seed, MLKEM_SYMBYTES)) requires(transposed == 0 || transposed == 1) assigns(object_whole(a)) - ensures(forall(x, 0, MLKEM_K * MLKEM_K, - array_bound(a[x].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) + ensures(forall(x, 0, MLKEM_K, forall(y, 0, MLKEM_K, + array_bound(a->vec[x].vec[y].coeffs, 0, MLKEM_N, 0, MLKEM_Q)))) ); #define mlk_indcpa_keypair_derand MLK_NAMESPACE_K(indcpa_keypair_derand) diff --git a/mlkem/src/kem.c b/mlkem/src/kem.c index 57882c0c57..41d33fb4d1 100644 --- a/mlkem/src/kem.c +++ b/mlkem/src/kem.c @@ -47,9 +47,9 @@ int crypto_kem_check_pk(const uint8_t pk[MLKEM_INDCCA_PUBLICKEYBYTES]) mlk_polyvec p; uint8_t p_reencoded[MLKEM_POLYVECBYTES]; - mlk_polyvec_frombytes(p, pk); - mlk_polyvec_reduce(p); - mlk_polyvec_tobytes(p_reencoded, p); + mlk_polyvec_frombytes(&p, pk); + mlk_polyvec_reduce(&p); + mlk_polyvec_tobytes(p_reencoded, &p); /* We use a constant-time memcmp here to avoid having to * declassify the PK before the PCT has succeeded. */ diff --git a/mlkem/src/poly_k.c b/mlkem/src/poly_k.c index 15d916b627..f83fa35962 100644 --- a/mlkem/src/poly_k.c +++ b/mlkem/src/poly_k.c @@ -47,29 +47,29 @@ * in the range (-MLKEM_Q+1,...,MLKEM_Q-1). */ MLK_INTERNAL_API void mlk_polyvec_compress_du(uint8_t r[MLKEM_POLYVECCOMPRESSEDBYTES_DU], - const mlk_polyvec a) + const mlk_polyvec *a) { unsigned i; - mlk_assert_bound_2d(a, MLKEM_K, MLKEM_N, 0, MLKEM_Q); + mlk_assert_bound_2d(a->vec, MLKEM_K, MLKEM_N, 0, MLKEM_Q); for (i = 0; i < MLKEM_K; i++) { - mlk_poly_compress_du(r + i * MLKEM_POLYCOMPRESSEDBYTES_DU, &a[i]); + mlk_poly_compress_du(r + i * MLKEM_POLYCOMPRESSEDBYTES_DU, &a->vec[i]); } } /* Reference: `polyvec_decompress()` in the reference implementation @[REF]. */ MLK_INTERNAL_API -void mlk_polyvec_decompress_du(mlk_polyvec r, +void mlk_polyvec_decompress_du(mlk_polyvec *r, const uint8_t a[MLKEM_POLYVECCOMPRESSEDBYTES_DU]) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_decompress_du(&r[i], a + i * MLKEM_POLYCOMPRESSEDBYTES_DU); + mlk_poly_decompress_du(&r->vec[i], a + i * MLKEM_POLYCOMPRESSEDBYTES_DU); } - mlk_assert_bound_2d(r, MLKEM_K, MLKEM_N, 0, MLKEM_Q); + mlk_assert_bound_2d(r->vec, MLKEM_K, MLKEM_N, 0, MLKEM_Q); } /* Reference: `polyvec_tobytes()` in the reference implementation @[REF]. @@ -78,10 +78,10 @@ void mlk_polyvec_decompress_du(mlk_polyvec r, * The reference implementation works with coefficients * in the range (-MLKEM_Q+1,...,MLKEM_Q-1). */ MLK_INTERNAL_API -void mlk_polyvec_tobytes(uint8_t r[MLKEM_POLYVECBYTES], const mlk_polyvec a) +void mlk_polyvec_tobytes(uint8_t r[MLKEM_POLYVECBYTES], const mlk_polyvec *a) { unsigned i; - mlk_assert_bound_2d(a, MLKEM_K, MLKEM_N, 0, MLKEM_Q); + mlk_assert_bound_2d(a->vec, MLKEM_K, MLKEM_N, 0, MLKEM_Q); for (i = 0; i < MLKEM_K; i++) __loop__( @@ -89,34 +89,34 @@ void mlk_polyvec_tobytes(uint8_t r[MLKEM_POLYVECBYTES], const mlk_polyvec a) invariant(i <= MLKEM_K) ) { - mlk_poly_tobytes(&r[i * MLKEM_POLYBYTES], &a[i]); + mlk_poly_tobytes(&r[i * MLKEM_POLYBYTES], &a->vec[i]); } } /* Reference: `polyvec_frombytes()` in the reference implementation @[REF]. */ MLK_INTERNAL_API -void mlk_polyvec_frombytes(mlk_polyvec r, const uint8_t a[MLKEM_POLYVECBYTES]) +void mlk_polyvec_frombytes(mlk_polyvec *r, const uint8_t a[MLKEM_POLYVECBYTES]) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_frombytes(&r[i], a + i * MLKEM_POLYBYTES); + mlk_poly_frombytes(&r->vec[i], a + i * MLKEM_POLYBYTES); } - mlk_assert_bound_2d(r, MLKEM_K, MLKEM_N, 0, MLKEM_UINT12_LIMIT); + mlk_assert_bound_2d(r->vec, MLKEM_K, MLKEM_N, 0, MLKEM_UINT12_LIMIT); } /* Reference: `polyvec_ntt()` in the reference implementation @[REF]. */ MLK_INTERNAL_API -void mlk_polyvec_ntt(mlk_polyvec r) +void mlk_polyvec_ntt(mlk_polyvec *r) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_ntt(&r[i]); + mlk_poly_ntt(&r->vec[i]); } - mlk_assert_abs_bound_2d(r, MLKEM_K, MLKEM_N, MLK_NTT_BOUND); + mlk_assert_abs_bound_2d(r->vec, MLKEM_K, MLKEM_N, MLK_NTT_BOUND); } /* Reference: `polyvec_invntt_tomont()` in the reference implementation @[REF]. @@ -125,15 +125,15 @@ void mlk_polyvec_ntt(mlk_polyvec r) * the end. This allows us to drop a call to `poly_reduce()` * from the base multiplication. */ MLK_INTERNAL_API -void mlk_polyvec_invntt_tomont(mlk_polyvec r) +void mlk_polyvec_invntt_tomont(mlk_polyvec *r) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_invntt_tomont(&r[i]); + mlk_poly_invntt_tomont(&r->vec[i]); } - mlk_assert_abs_bound_2d(r, MLKEM_K, MLKEM_N, MLK_INVNTT_BOUND); + mlk_assert_abs_bound_2d(r->vec, MLKEM_K, MLKEM_N, MLK_INVNTT_BOUND); } /* Reference: `polyvec_basemul_acc_montgomery()` in the @@ -148,20 +148,20 @@ void mlk_polyvec_invntt_tomont(mlk_polyvec r) * more modular reductions since it reduces after every modular * multiplication. */ MLK_STATIC_TESTABLE void mlk_polyvec_basemul_acc_montgomery_cached_c( - mlk_poly *r, const mlk_polyvec a, const mlk_polyvec b, - const mlk_polyvec_mulcache b_cache) + mlk_poly *r, const mlk_polyvec *a, const mlk_polyvec *b, + const mlk_polyvec_mulcache *b_cache) __contract__( requires(memory_no_alias(r, sizeof(mlk_poly))) requires(memory_no_alias(a, sizeof(mlk_polyvec))) requires(memory_no_alias(b, sizeof(mlk_polyvec))) requires(memory_no_alias(b_cache, sizeof(mlk_polyvec_mulcache))) requires(forall(k1, 0, MLKEM_K, - array_bound(a[k1].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) + array_bound(a->vec[k1].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) assigns(object_whole(r)) ) { unsigned i; - mlk_assert_bound_2d(a, MLKEM_K, MLKEM_N, 0, MLKEM_UINT12_LIMIT); + mlk_assert_bound_2d(a->vec, MLKEM_K, MLKEM_N, 0, MLKEM_UINT12_LIMIT); for (i = 0; i < MLKEM_N / 2; i++) __loop__(invariant(i <= MLKEM_N / 2)) @@ -176,10 +176,10 @@ __contract__( t[1] <= ((int32_t) k * 2 * MLKEM_UINT12_LIMIT * 32768) && t[1] >= - ((int32_t) k * 2 * MLKEM_UINT12_LIMIT * 32768))) { - t[0] += (int32_t)a[k].coeffs[2 * i + 1] * b_cache[k].coeffs[i]; - t[0] += (int32_t)a[k].coeffs[2 * i] * b[k].coeffs[2 * i]; - t[1] += (int32_t)a[k].coeffs[2 * i] * b[k].coeffs[2 * i + 1]; - t[1] += (int32_t)a[k].coeffs[2 * i + 1] * b[k].coeffs[2 * i]; + t[0] += (int32_t)a->vec[k].coeffs[2 * i + 1] * b_cache->vec[k].coeffs[i]; + t[0] += (int32_t)a->vec[k].coeffs[2 * i] * b->vec[k].coeffs[2 * i]; + t[1] += (int32_t)a->vec[k].coeffs[2 * i] * b->vec[k].coeffs[2 * i + 1]; + t[1] += (int32_t)a->vec[k].coeffs[2 * i + 1] * b->vec[k].coeffs[2 * i]; } r->coeffs[2 * i + 0] = mlk_montgomery_reduce(t[0]); r->coeffs[2 * i + 1] = mlk_montgomery_reduce(t[1]); @@ -188,8 +188,8 @@ __contract__( MLK_INTERNAL_API void mlk_polyvec_basemul_acc_montgomery_cached( - mlk_poly *r, const mlk_polyvec a, const mlk_polyvec b, - const mlk_polyvec_mulcache b_cache) + mlk_poly *r, const mlk_polyvec *a, const mlk_polyvec *b, + const mlk_polyvec_mulcache *b_cache) { #if defined(MLK_USE_NATIVE_POLYVEC_BASEMUL_ACC_MONTGOMERY_CACHED) { @@ -225,12 +225,12 @@ void mlk_polyvec_basemul_acc_montgomery_cached( * multiplication cache ('mulcache'). This idea originates * from @[NeonNTT] and is used at the C level here. */ MLK_INTERNAL_API -void mlk_polyvec_mulcache_compute(mlk_polyvec_mulcache x, const mlk_polyvec a) +void mlk_polyvec_mulcache_compute(mlk_polyvec_mulcache *x, const mlk_polyvec *a) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_mulcache_compute(&x[i], &a[i]); + mlk_poly_mulcache_compute(&x->vec[i], &a->vec[i]); } } @@ -242,41 +242,41 @@ void mlk_polyvec_mulcache_compute(mlk_polyvec_mulcache x, const mlk_polyvec a) * This conditional addition is then dropped from all * polynomial compression functions instead (see `compress.c`). */ MLK_INTERNAL_API -void mlk_polyvec_reduce(mlk_polyvec r) +void mlk_polyvec_reduce(mlk_polyvec *r) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_reduce(&r[i]); + mlk_poly_reduce(&r->vec[i]); } - mlk_assert_bound_2d(r, MLKEM_K, MLKEM_N, 0, MLKEM_Q); + mlk_assert_bound_2d(r->vec, MLKEM_K, MLKEM_N, 0, MLKEM_Q); } /* Reference: `polyvec_add()` in the reference implementation @[REF]. * - We use destructive version (output=first input) to avoid * reasoning about aliasing in the CBMC specification */ MLK_INTERNAL_API -void mlk_polyvec_add(mlk_polyvec r, const mlk_polyvec b) +void mlk_polyvec_add(mlk_polyvec *r, const mlk_polyvec *b) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_add(&r[i], &b[i]); + mlk_poly_add(&r->vec[i], &b->vec[i]); } } /* Reference: `polyvec_tomont()` in the reference implementation @[REF]. */ MLK_INTERNAL_API -void mlk_polyvec_tomont(mlk_polyvec r) +void mlk_polyvec_tomont(mlk_polyvec *r) { unsigned i; for (i = 0; i < MLKEM_K; i++) { - mlk_poly_tomont(&r[i]); + mlk_poly_tomont(&r->vec[i]); } - mlk_assert_abs_bound_2d(r, MLKEM_K, MLKEM_N, MLKEM_Q); + mlk_assert_abs_bound_2d(r->vec, MLKEM_K, MLKEM_N, MLKEM_Q); } diff --git a/mlkem/src/poly_k.h b/mlkem/src/poly_k.h index db9c856d86..30847cd29f 100644 --- a/mlkem/src/poly_k.h +++ b/mlkem/src/poly_k.h @@ -29,9 +29,20 @@ #define mlk_polyvec_mulcache MLK_ADD_PARAM_SET(mlk_polyvec_mulcache) /* End of parameter set namespacing */ -typedef mlk_poly mlk_polyvec[MLKEM_K]; -typedef mlk_poly mlk_polymat[MLKEM_K * MLKEM_K]; -typedef mlk_poly_mulcache mlk_polyvec_mulcache[MLKEM_K]; +typedef struct +{ + mlk_poly vec[MLKEM_K]; +} MLK_ALIGN mlk_polyvec; + +typedef struct +{ + mlk_polyvec vec[MLKEM_K]; +} MLK_ALIGN mlk_polymat; + +typedef struct +{ + mlk_poly_mulcache vec[MLKEM_K]; +} MLK_ALIGN mlk_polyvec_mulcache; #define mlk_poly_compress_du MLK_NAMESPACE_K(poly_compress_du) /************************************************* @@ -200,12 +211,12 @@ __contract__( **************************************************/ MLK_INTERNAL_API void mlk_polyvec_compress_du(uint8_t r[MLKEM_POLYVECCOMPRESSEDBYTES_DU], - const mlk_polyvec a) + const mlk_polyvec *a) __contract__( requires(memory_no_alias(r, MLKEM_POLYVECCOMPRESSEDBYTES_DU)) requires(memory_no_alias(a, sizeof(mlk_polyvec))) requires(forall(k0, 0, MLKEM_K, - array_bound(a[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) + array_bound(a->vec[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) assigns(object_whole(r)) ); @@ -228,14 +239,14 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_decompress_du(mlk_polyvec r, +void mlk_polyvec_decompress_du(mlk_polyvec *r, const uint8_t a[MLKEM_POLYVECCOMPRESSEDBYTES_DU]) __contract__( requires(memory_no_alias(a, MLKEM_POLYVECCOMPRESSEDBYTES_DU)) requires(memory_no_alias(r, sizeof(mlk_polyvec))) assigns(object_whole(r)) ensures(forall(k0, 0, MLKEM_K, - array_bound(r[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) + array_bound(r->vec[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) ); #define mlk_polyvec_tobytes MLK_NAMESPACE_K(polyvec_tobytes) @@ -256,12 +267,12 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_tobytes(uint8_t r[MLKEM_POLYVECBYTES], const mlk_polyvec a) +void mlk_polyvec_tobytes(uint8_t r[MLKEM_POLYVECBYTES], const mlk_polyvec *a) __contract__( requires(memory_no_alias(a, sizeof(mlk_polyvec))) requires(memory_no_alias(r, MLKEM_POLYVECBYTES)) requires(forall(k0, 0, MLKEM_K, - array_bound(a[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) + array_bound(a->vec[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) assigns(object_whole(r)) ); @@ -284,13 +295,13 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_frombytes(mlk_polyvec r, const uint8_t a[MLKEM_POLYVECBYTES]) +void mlk_polyvec_frombytes(mlk_polyvec *r, const uint8_t a[MLKEM_POLYVECBYTES]) __contract__( requires(memory_no_alias(r, sizeof(mlk_polyvec))) requires(memory_no_alias(a, MLKEM_POLYVECBYTES)) assigns(object_whole(r)) ensures(forall(k0, 0, MLKEM_K, - array_bound(r[k0].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) + array_bound(r->vec[k0].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) ); #define mlk_polyvec_ntt MLK_NAMESPACE_K(polyvec_ntt) @@ -313,14 +324,14 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_ntt(mlk_polyvec r) +void mlk_polyvec_ntt(mlk_polyvec *r) __contract__( requires(memory_no_alias(r, sizeof(mlk_polyvec))) requires(forall(j, 0, MLKEM_K, - array_abs_bound(r[j].coeffs, 0, MLKEM_N, MLKEM_Q))) + array_abs_bound(r->vec[j].coeffs, 0, MLKEM_N, MLKEM_Q))) assigns(object_whole(r)) ensures(forall(j, 0, MLKEM_K, - array_abs_bound(r[j].coeffs, 0, MLKEM_N, MLK_NTT_BOUND))) + array_abs_bound(r->vec[j].coeffs, 0, MLKEM_N, MLK_NTT_BOUND))) ); #define mlk_polyvec_invntt_tomont MLK_NAMESPACE_K(polyvec_invntt_tomont) @@ -344,12 +355,12 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_invntt_tomont(mlk_polyvec r) +void mlk_polyvec_invntt_tomont(mlk_polyvec *r) __contract__( requires(memory_no_alias(r, sizeof(mlk_polyvec))) assigns(object_whole(r)) ensures(forall(j, 0, MLKEM_K, - array_abs_bound(r[j].coeffs, 0, MLKEM_N, MLK_INVNTT_BOUND))) + array_abs_bound(r->vec[j].coeffs, 0, MLKEM_N, MLK_INVNTT_BOUND))) ); #define mlk_polyvec_basemul_acc_montgomery_cached \ @@ -380,15 +391,15 @@ __contract__( **************************************************/ MLK_INTERNAL_API void mlk_polyvec_basemul_acc_montgomery_cached( - mlk_poly *r, const mlk_polyvec a, const mlk_polyvec b, - const mlk_polyvec_mulcache b_cache) + mlk_poly *r, const mlk_polyvec *a, const mlk_polyvec *b, + const mlk_polyvec_mulcache *b_cache) __contract__( requires(memory_no_alias(r, sizeof(mlk_poly))) requires(memory_no_alias(a, sizeof(mlk_polyvec))) requires(memory_no_alias(b, sizeof(mlk_polyvec))) requires(memory_no_alias(b_cache, sizeof(mlk_polyvec_mulcache))) requires(forall(k1, 0, MLKEM_K, - array_bound(a[k1].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) + array_bound(a->vec[k1].coeffs, 0, MLKEM_N, 0, MLKEM_UINT12_LIMIT))) assigns(object_whole(r)) ); @@ -423,7 +434,7 @@ __contract__( * higher level safety proofs, and thus not part of the spec. */ MLK_INTERNAL_API -void mlk_polyvec_mulcache_compute(mlk_polyvec_mulcache x, const mlk_polyvec a) +void mlk_polyvec_mulcache_compute(mlk_polyvec_mulcache *x, const mlk_polyvec *a) __contract__( requires(memory_no_alias(x, sizeof(mlk_polyvec_mulcache))) requires(memory_no_alias(a, sizeof(mlk_polyvec))) @@ -453,12 +464,12 @@ __contract__( * use of mlk_poly_reduce() in the context of (de)serialization. */ MLK_INTERNAL_API -void mlk_polyvec_reduce(mlk_polyvec r) +void mlk_polyvec_reduce(mlk_polyvec *r) __contract__( requires(memory_no_alias(r, sizeof(mlk_polyvec))) assigns(object_whole(r)) ensures(forall(k0, 0, MLKEM_K, - array_bound(r[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) + array_bound(r->vec[k0].coeffs, 0, MLKEM_N, 0, MLKEM_Q))) ); #define mlk_polyvec_add MLK_NAMESPACE_K(polyvec_add) @@ -485,16 +496,16 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_add(mlk_polyvec r, const mlk_polyvec b) +void mlk_polyvec_add(mlk_polyvec *r, const mlk_polyvec *b) __contract__( requires(memory_no_alias(r, sizeof(mlk_polyvec))) requires(memory_no_alias(b, sizeof(mlk_polyvec))) requires(forall(j0, 0, MLKEM_K, forall(k0, 0, MLKEM_N, - (int32_t)r[j0].coeffs[k0] + b[j0].coeffs[k0] <= INT16_MAX))) + (int32_t)r->vec[j0].coeffs[k0] + b->vec[j0].coeffs[k0] <= INT16_MAX))) requires(forall(j1, 0, MLKEM_K, forall(k1, 0, MLKEM_N, - (int32_t)r[j1].coeffs[k1] + b[j1].coeffs[k1] >= INT16_MIN))) + (int32_t)r->vec[j1].coeffs[k1] + b->vec[j1].coeffs[k1] >= INT16_MIN))) assigns(object_whole(r)) ); @@ -514,13 +525,13 @@ __contract__( * **************************************************/ MLK_INTERNAL_API -void mlk_polyvec_tomont(mlk_polyvec r) +void mlk_polyvec_tomont(mlk_polyvec *r) __contract__( requires(memory_no_alias(r, sizeof(mlk_polyvec))) assigns(memory_slice(r, sizeof(mlk_polyvec))) assigns(object_whole(r)) ensures(forall(j, 0, MLKEM_K, - array_abs_bound(r[j].coeffs, 0, MLKEM_N, MLKEM_Q))) + array_abs_bound(r->vec[j].coeffs, 0, MLKEM_N, MLKEM_Q))) ); #define mlk_poly_getnoise_eta1_4x MLK_NAMESPACE_K(poly_getnoise_eta1_4x) diff --git a/proofs/cbmc/gen_matrix/gen_matrix_harness.c b/proofs/cbmc/gen_matrix/gen_matrix_harness.c index b6005322de..1eb9f2b851 100644 --- a/proofs/cbmc/gen_matrix/gen_matrix_harness.c +++ b/proofs/cbmc/gen_matrix/gen_matrix_harness.c @@ -7,7 +7,7 @@ void harness(void) { - mlk_poly *a; + mlk_polymat *a; uint8_t *seed; int transposed; mlk_gen_matrix(a, seed, transposed); diff --git a/proofs/cbmc/gen_matrix_serial/gen_matrix_serial_harness.c b/proofs/cbmc/gen_matrix_serial/gen_matrix_serial_harness.c index b6005322de..1eb9f2b851 100644 --- a/proofs/cbmc/gen_matrix_serial/gen_matrix_serial_harness.c +++ b/proofs/cbmc/gen_matrix_serial/gen_matrix_serial_harness.c @@ -7,7 +7,7 @@ void harness(void) { - mlk_poly *a; + mlk_polymat *a; uint8_t *seed; int transposed; mlk_gen_matrix(a, seed, transposed); diff --git a/proofs/cbmc/matvec_mul/matvec_mul_harness.c b/proofs/cbmc/matvec_mul/matvec_mul_harness.c index 286e47e4ba..04b537afaa 100644 --- a/proofs/cbmc/matvec_mul/matvec_mul_harness.c +++ b/proofs/cbmc/matvec_mul/matvec_mul_harness.c @@ -6,12 +6,13 @@ #include "poly_k.h" #define mlk_matvec_mul MLK_NAMESPACE(matvec_mul) -void mlk_matvec_mul(mlk_polyvec out, const mlk_polymat a, mlk_polyvec const v, - mlk_polyvec_mulcache const vc); +void mlk_matvec_mul(mlk_polyvec *out, const mlk_polymat *a, + const mlk_polyvec *v, const mlk_polyvec_mulcache *vc); void harness(void) { - mlk_poly *out, *a, *v; - mlk_poly_mulcache *vc; + mlk_polyvec *out, *v; + mlk_polymat *a; + mlk_polyvec_mulcache *vc; mlk_matvec_mul(out, a, v, vc); } diff --git a/proofs/cbmc/polymat_permute_bitrev_to_custom/polymat_permute_bitrev_to_custom_harness.c b/proofs/cbmc/polymat_permute_bitrev_to_custom/polymat_permute_bitrev_to_custom_harness.c index c661b46012..9c78c53957 100644 --- a/proofs/cbmc/polymat_permute_bitrev_to_custom/polymat_permute_bitrev_to_custom_harness.c +++ b/proofs/cbmc/polymat_permute_bitrev_to_custom/polymat_permute_bitrev_to_custom_harness.c @@ -5,10 +5,10 @@ #include #include "poly_k.h" -void mlk_polymat_permute_bitrev_to_custom(mlk_polymat a); +void mlk_polymat_permute_bitrev_to_custom(mlk_polymat *a); void harness(void) { - mlk_poly *a; + mlk_polymat *a; mlk_polymat_permute_bitrev_to_custom(a); } diff --git a/proofs/cbmc/polyvec_add/polyvec_add_harness.c b/proofs/cbmc/polyvec_add/polyvec_add_harness.c index 4a6cdeca90..5923c177ff 100644 --- a/proofs/cbmc/polyvec_add/polyvec_add_harness.c +++ b/proofs/cbmc/polyvec_add/polyvec_add_harness.c @@ -6,6 +6,6 @@ void harness(void) { - mlk_poly *r, *b; + mlk_polyvec *r, *b; mlk_polyvec_add(r, b); } diff --git a/proofs/cbmc/polyvec_basemul_acc_montgomery_cached/polyvec_basemul_acc_montgomery_cached_harness.c b/proofs/cbmc/polyvec_basemul_acc_montgomery_cached/polyvec_basemul_acc_montgomery_cached_harness.c index 869e078421..0f8d7355bb 100644 --- a/proofs/cbmc/polyvec_basemul_acc_montgomery_cached/polyvec_basemul_acc_montgomery_cached_harness.c +++ b/proofs/cbmc/polyvec_basemul_acc_montgomery_cached/polyvec_basemul_acc_montgomery_cached_harness.c @@ -7,8 +7,8 @@ void harness(void) { mlk_poly *r; - mlk_poly *a, *b; - mlk_poly_mulcache *b_cached; + mlk_polyvec *a, *b; + mlk_polyvec_mulcache *b_cached; mlk_polyvec_basemul_acc_montgomery_cached(r, a, b, b_cached); } diff --git a/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_c/polyvec_basemul_acc_montgomery_cached_c_harness.c b/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_c/polyvec_basemul_acc_montgomery_cached_c_harness.c index a7fb4d0c0e..c9b806c857 100644 --- a/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_c/polyvec_basemul_acc_montgomery_cached_c_harness.c +++ b/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_c/polyvec_basemul_acc_montgomery_cached_c_harness.c @@ -7,13 +7,14 @@ #define mlk_polyvec_basemul_acc_montgomery_cached_c \ MLK_ADD_PARAM_SET(mlk_polyvec_basemul_acc_montgomery_cached_c) void mlk_polyvec_basemul_acc_montgomery_cached_c( - mlk_poly *r, const mlk_polyvec a, const mlk_polyvec b, - const mlk_polyvec_mulcache b_cache); + mlk_poly *r, const mlk_polyvec *a, const mlk_polyvec *b, + const mlk_polyvec_mulcache *b_cache); void harness(void) { - mlk_poly *r, *a, *b; - mlk_poly_mulcache *b_cache; + mlk_poly *r; + mlk_polyvec *a, *b; + mlk_polyvec_mulcache *b_cache; mlk_polyvec_basemul_acc_montgomery_cached_c(r, a, b, b_cache); } diff --git a/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_native/polyvec_basemul_acc_montgomery_cached_native_harness.c b/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_native/polyvec_basemul_acc_montgomery_cached_native_harness.c index 869e078421..0f8d7355bb 100644 --- a/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_native/polyvec_basemul_acc_montgomery_cached_native_harness.c +++ b/proofs/cbmc/polyvec_basemul_acc_montgomery_cached_native/polyvec_basemul_acc_montgomery_cached_native_harness.c @@ -7,8 +7,8 @@ void harness(void) { mlk_poly *r; - mlk_poly *a, *b; - mlk_poly_mulcache *b_cached; + mlk_polyvec *a, *b; + mlk_polyvec_mulcache *b_cached; mlk_polyvec_basemul_acc_montgomery_cached(r, a, b, b_cached); } diff --git a/proofs/cbmc/polyvec_compress_du/polyvec_compress_du_harness.c b/proofs/cbmc/polyvec_compress_du/polyvec_compress_du_harness.c index 5b7df4dd11..5fd0899be0 100644 --- a/proofs/cbmc/polyvec_compress_du/polyvec_compress_du_harness.c +++ b/proofs/cbmc/polyvec_compress_du/polyvec_compress_du_harness.c @@ -7,7 +7,7 @@ void harness(void) { - mlk_poly *r; + mlk_polyvec *r; uint8_t *a; mlk_polyvec_compress_du(a, r); diff --git a/proofs/cbmc/polyvec_decompress_du/polyvec_decompress_du_harness.c b/proofs/cbmc/polyvec_decompress_du/polyvec_decompress_du_harness.c index 853d555046..9b2dbc6da1 100644 --- a/proofs/cbmc/polyvec_decompress_du/polyvec_decompress_du_harness.c +++ b/proofs/cbmc/polyvec_decompress_du/polyvec_decompress_du_harness.c @@ -7,7 +7,7 @@ void harness(void) { - mlk_poly *a; + mlk_polyvec *a; uint8_t *r; mlk_polyvec_decompress_du(a, r); diff --git a/proofs/cbmc/polyvec_frombytes/polyvec_frombytes_harness.c b/proofs/cbmc/polyvec_frombytes/polyvec_frombytes_harness.c index 02e9d3d576..9a200adf63 100644 --- a/proofs/cbmc/polyvec_frombytes/polyvec_frombytes_harness.c +++ b/proofs/cbmc/polyvec_frombytes/polyvec_frombytes_harness.c @@ -6,7 +6,7 @@ void harness(void) { - mlk_poly *a; + mlk_polyvec *a; uint8_t *r; mlk_polyvec_frombytes(a, r); } diff --git a/proofs/cbmc/polyvec_invntt_tomont/polyvec_invntt_tomont_harness.c b/proofs/cbmc/polyvec_invntt_tomont/polyvec_invntt_tomont_harness.c index b86aff4f77..e8bf6e6546 100644 --- a/proofs/cbmc/polyvec_invntt_tomont/polyvec_invntt_tomont_harness.c +++ b/proofs/cbmc/polyvec_invntt_tomont/polyvec_invntt_tomont_harness.c @@ -6,6 +6,6 @@ void harness(void) { - mlk_poly *r; + mlk_polyvec *r; mlk_polyvec_invntt_tomont(r); } diff --git a/proofs/cbmc/polyvec_mulcache_compute/polyvec_mulcache_compute_harness.c b/proofs/cbmc/polyvec_mulcache_compute/polyvec_mulcache_compute_harness.c index 53d4178406..b560a52706 100644 --- a/proofs/cbmc/polyvec_mulcache_compute/polyvec_mulcache_compute_harness.c +++ b/proofs/cbmc/polyvec_mulcache_compute/polyvec_mulcache_compute_harness.c @@ -6,8 +6,8 @@ void harness(void) { - mlk_poly_mulcache *x; - mlk_poly *a; + mlk_polyvec_mulcache *x; + mlk_polyvec *a; mlk_polyvec_mulcache_compute(x, a); } diff --git a/proofs/cbmc/polyvec_ntt/polyvec_ntt_harness.c b/proofs/cbmc/polyvec_ntt/polyvec_ntt_harness.c index 3c4d15c5f4..03690ecc01 100644 --- a/proofs/cbmc/polyvec_ntt/polyvec_ntt_harness.c +++ b/proofs/cbmc/polyvec_ntt/polyvec_ntt_harness.c @@ -6,6 +6,6 @@ void harness(void) { - mlk_poly *r; + mlk_polyvec *r; mlk_polyvec_ntt(r); } diff --git a/proofs/cbmc/polyvec_permute_bitrev_to_custom/Makefile b/proofs/cbmc/polyvec_permute_bitrev_to_custom/Makefile new file mode 100644 index 0000000000..023b4db8f6 --- /dev/null +++ b/proofs/cbmc/polyvec_permute_bitrev_to_custom/Makefile @@ -0,0 +1,53 @@ +# Copyright (c) The mlkem-native project authors +# SPDX-License-Identifier: Apache-2.0 OR ISC OR MIT + +include ../Makefile_params.common + +HARNESS_ENTRY = harness +HARNESS_FILE = polyvec_permute_bitrev_to_custom_harness + +# This should be a unique identifier for this proof, and will appear on the +# Litani dashboard. It can be human-readable and contain spaces if you wish. +PROOF_UID = mlk_polyvec_permute_bitrev_to_custom + +DEFINES += +INCLUDES += +UNWINDSET += + +REMOVE_FUNCTION_BODY += + +PROOF_SOURCES += $(PROOFDIR)/$(HARNESS_FILE).c +PROJECT_SOURCES += $(SRCDIR)/mlkem/src/indcpa.c + +CHECK_FUNCTION_CONTRACTS=mlk_polyvec_permute_bitrev_to_custom +USE_FUNCTION_CONTRACTS= +APPLY_LOOP_CONTRACTS=on +USE_DYNAMIC_FRAMES=1 + +# Disable any setting of EXTERNAL_SAT_SOLVER, and choose SMT backend instead +EXTERNAL_SAT_SOLVER= +CBMCFLAGS=--smt2 + +FUNCTION_NAME = mlk_polyvec_permute_bitrev_to_custom + +# If this proof is found to consume huge amounts of RAM, you can set the +# EXPENSIVE variable. With new enough versions of the proof tools, this will +# restrict the number of EXPENSIVE CBMC jobs running at once. See the +# documentation in Makefile.common under the "Job Pools" heading for details. +# EXPENSIVE = true +CBMC_OBJECT_BITS = 10 + +# If you require access to a file-local ("static") function or object to conduct +# your proof, set the following (and do not include the original source file +# ("mlkem/src/poly.c") in PROJECT_SOURCES). +# REWRITTEN_SOURCES = $(PROOFDIR)/<__SOURCE_FILE_BASENAME__>.i +# include ../Makefile.common +# $(PROOFDIR)/<__SOURCE_FILE_BASENAME__>.i_SOURCE = $(SRCDIR)/mlkem/src/poly.c +# $(PROOFDIR)/<__SOURCE_FILE_BASENAME__>.i_FUNCTIONS = foo bar +# $(PROOFDIR)/<__SOURCE_FILE_BASENAME__>.i_OBJECTS = baz +# Care is required with variables on the left-hand side: REWRITTEN_SOURCES must +# be set before including Makefile.common, but any use of variables on the +# left-hand side requires those variables to be defined. Hence, _SOURCE, +# _FUNCTIONS, _OBJECTS is set after including Makefile.common. + +include ../Makefile.common diff --git a/proofs/cbmc/polymat_permute_bitrev_to_custom_native/polymat_permute_bitrev_to_custom_native_harness.c b/proofs/cbmc/polyvec_permute_bitrev_to_custom/polyvec_permute_bitrev_to_custom_harness.c similarity index 64% rename from proofs/cbmc/polymat_permute_bitrev_to_custom_native/polymat_permute_bitrev_to_custom_native_harness.c rename to proofs/cbmc/polyvec_permute_bitrev_to_custom/polyvec_permute_bitrev_to_custom_harness.c index c661b46012..a51b65b742 100644 --- a/proofs/cbmc/polymat_permute_bitrev_to_custom_native/polymat_permute_bitrev_to_custom_native_harness.c +++ b/proofs/cbmc/polyvec_permute_bitrev_to_custom/polyvec_permute_bitrev_to_custom_harness.c @@ -5,10 +5,10 @@ #include #include "poly_k.h" -void mlk_polymat_permute_bitrev_to_custom(mlk_polymat a); +void mlk_polyvec_permute_bitrev_to_custom(mlk_polyvec *v); void harness(void) { - mlk_poly *a; - mlk_polymat_permute_bitrev_to_custom(a); + mlk_polyvec *v; + mlk_polyvec_permute_bitrev_to_custom(v); } diff --git a/proofs/cbmc/polymat_permute_bitrev_to_custom_native/Makefile b/proofs/cbmc/polyvec_permute_bitrev_to_custom_native/Makefile similarity index 70% rename from proofs/cbmc/polymat_permute_bitrev_to_custom_native/Makefile rename to proofs/cbmc/polyvec_permute_bitrev_to_custom_native/Makefile index ffc285b3fc..cf9352db10 100644 --- a/proofs/cbmc/polymat_permute_bitrev_to_custom_native/Makefile +++ b/proofs/cbmc/polyvec_permute_bitrev_to_custom_native/Makefile @@ -4,21 +4,22 @@ include ../Makefile_params.common HARNESS_ENTRY = harness -HARNESS_FILE = polymat_permute_bitrev_to_custom_native_harness +HARNESS_FILE = polyvec_permute_bitrev_to_custom_native_harness # This should be a unique identifier for this proof, and will appear on the # Litani dashboard. It can be human-readable and contain spaces if you wish. -PROOF_UID = mlk_polymat_permute_bitrev_to_custom_native +PROOF_UID = mlk_polyvec_permute_bitrev_to_custom_native DEFINES += -DMLK_CONFIG_USE_NATIVE_BACKEND_ARITH -DMLK_CONFIG_ARITH_BACKEND_FILE="\"dummy_backend.h\"" INCLUDES += +UNWINDSET += REMOVE_FUNCTION_BODY += PROOF_SOURCES += $(PROOFDIR)/$(HARNESS_FILE).c PROJECT_SOURCES += $(SRCDIR)/mlkem/src/indcpa.c -CHECK_FUNCTION_CONTRACTS=mlk_polymat_permute_bitrev_to_custom +CHECK_FUNCTION_CONTRACTS=mlk_polyvec_permute_bitrev_to_custom USE_FUNCTION_CONTRACTS= mlk_poly_permute_bitrev_to_custom APPLY_LOOP_CONTRACTS=on USE_DYNAMIC_FRAMES=1 @@ -27,25 +28,14 @@ USE_DYNAMIC_FRAMES=1 EXTERNAL_SAT_SOLVER= CBMCFLAGS=--smt2 -# For this proof we tell CBMC to -# - not decompose arrays into their individual cells -# - to slice constraints that are not in the cone of influence of the proof obligations -# These options simplify them modelling of arrays and produce much more compact -# SMT files, leaving all array-type reasoning to the SMT solver. -# -# For functions that use large and multi-dimensional arrays, this yields -# a substantial improvement in proof performance. -# CBMCFLAGS += --no-array-field-sensitivity -# CBMCFLAGS += --slice-formula - -FUNCTION_NAME = mlk_polymat_permute_bitrev_to_custom_native +FUNCTION_NAME = mlk_polyvec_permute_bitrev_to_custom_native # If this proof is found to consume huge amounts of RAM, you can set the # EXPENSIVE variable. With new enough versions of the proof tools, this will # restrict the number of EXPENSIVE CBMC jobs running at once. See the # documentation in Makefile.common under the "Job Pools" heading for details. # EXPENSIVE = true -CBMC_OBJECT_BITS = 11 +CBMC_OBJECT_BITS = 10 # If you require access to a file-local ("static") function or object to conduct # your proof, set the following (and do not include the original source file diff --git a/proofs/cbmc/polyvec_permute_bitrev_to_custom_native/polyvec_permute_bitrev_to_custom_native_harness.c b/proofs/cbmc/polyvec_permute_bitrev_to_custom_native/polyvec_permute_bitrev_to_custom_native_harness.c new file mode 100644 index 0000000000..a51b65b742 --- /dev/null +++ b/proofs/cbmc/polyvec_permute_bitrev_to_custom_native/polyvec_permute_bitrev_to_custom_native_harness.c @@ -0,0 +1,14 @@ +// Copyright (c) The mlkem-native project authors +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT-0 + +#include +#include "poly_k.h" + +void mlk_polyvec_permute_bitrev_to_custom(mlk_polyvec *v); + +void harness(void) +{ + mlk_polyvec *v; + mlk_polyvec_permute_bitrev_to_custom(v); +} diff --git a/proofs/cbmc/polyvec_reduce/polyvec_reduce_harness.c b/proofs/cbmc/polyvec_reduce/polyvec_reduce_harness.c index a3d0fb4d83..26b38a3636 100644 --- a/proofs/cbmc/polyvec_reduce/polyvec_reduce_harness.c +++ b/proofs/cbmc/polyvec_reduce/polyvec_reduce_harness.c @@ -6,6 +6,6 @@ void harness(void) { - mlk_poly *a; + mlk_polyvec *a; mlk_polyvec_reduce(a); } diff --git a/proofs/cbmc/polyvec_tobytes/polyvec_tobytes_harness.c b/proofs/cbmc/polyvec_tobytes/polyvec_tobytes_harness.c index 9ccb5961ae..16616aa1aa 100644 --- a/proofs/cbmc/polyvec_tobytes/polyvec_tobytes_harness.c +++ b/proofs/cbmc/polyvec_tobytes/polyvec_tobytes_harness.c @@ -6,7 +6,7 @@ void harness(void) { - mlk_poly *a; + mlk_polyvec *a; uint8_t *r; mlk_polyvec_tobytes(r, a); } diff --git a/proofs/cbmc/polyvec_tomont/polyvec_tomont_harness.c b/proofs/cbmc/polyvec_tomont/polyvec_tomont_harness.c index 9561889df4..72ee9743b2 100644 --- a/proofs/cbmc/polyvec_tomont/polyvec_tomont_harness.c +++ b/proofs/cbmc/polyvec_tomont/polyvec_tomont_harness.c @@ -6,6 +6,6 @@ void harness(void) { - mlk_poly *a; + mlk_polyvec *a; mlk_polyvec_tomont(a); } diff --git a/test/bench_components_mlkem.c b/test/bench_components_mlkem.c index 26858d627c..2802bf3112 100644 --- a/test/bench_components_mlkem.c +++ b/test/bench_components_mlkem.c @@ -147,52 +147,52 @@ static int bench(void) /* mlk_polyvec */ /* mlk_polyvec_compress_du */ BENCH("mlk_polyvec_compress_du", - mlk_polyvec_compress_du((uint8_t *)data0, (const mlk_poly *)data1)) + mlk_polyvec_compress_du((uint8_t *)data0, (const mlk_polyvec *)data1)) /* mlk_polyvec_decompress_du */ BENCH("mlk_polyvec_decompress_du", - mlk_polyvec_decompress_du((mlk_poly *)data0, (uint8_t *)data1)) + mlk_polyvec_decompress_du((mlk_polyvec *)data0, (uint8_t *)data1)) /* mlk_polyvec_tobytes */ BENCH("mlk_polyvec_tobytes", - mlk_polyvec_tobytes((uint8_t *)data0, (const mlk_poly *)data1)) + mlk_polyvec_tobytes((uint8_t *)data0, (const mlk_polyvec *)data1)) /* mlk_polyvec_frombytes */ BENCH("mlk_polyvec_frombytes", - mlk_polyvec_frombytes((mlk_poly *)data0, (uint8_t *)data1)) + mlk_polyvec_frombytes((mlk_polyvec *)data0, (uint8_t *)data1)) /* mlk_polyvec_ntt */ - BENCH("mlk_polyvec_ntt", mlk_polyvec_ntt((mlk_poly *)data0)) + BENCH("mlk_polyvec_ntt", mlk_polyvec_ntt((mlk_polyvec *)data0)) /* mlk_polyvec_invntt_tomont */ BENCH("mlk_polyvec_invntt_tomont", - mlk_polyvec_invntt_tomont((mlk_poly *)data0)) + mlk_polyvec_invntt_tomont((mlk_polyvec *)data0)) /* mlk_polyvec_basemul_acc_montgomery_cached */ BENCH("mlk_polyvec_basemul_acc_montgomery_cached", mlk_polyvec_basemul_acc_montgomery_cached( - (mlk_poly *)data0, (const mlk_poly *)data1, (const mlk_poly *)data2, - (const mlk_poly_mulcache *)data3)) + (mlk_poly *)data0, (const mlk_polyvec *)data1, + (const mlk_polyvec *)data2, (const mlk_polyvec_mulcache *)data3)) /* mlk_polyvec_mulcache_compute */ BENCH("mlk_polyvec_mulcache_compute", - mlk_polyvec_mulcache_compute((mlk_poly_mulcache *)data0, - (const mlk_poly *)data1)) + mlk_polyvec_mulcache_compute((mlk_polyvec_mulcache *)data0, + (const mlk_polyvec *)data1)) /* mlk_polyvec_reduce */ - BENCH("mlk_polyvec_reduce", mlk_polyvec_reduce((mlk_poly *)data0)) + BENCH("mlk_polyvec_reduce", mlk_polyvec_reduce((mlk_polyvec *)data0)) /* mlk_polyvec_add */ BENCH("mlk_polyvec_add", - mlk_polyvec_add((mlk_poly *)data0, (const mlk_poly *)data1)) + mlk_polyvec_add((mlk_polyvec *)data0, (const mlk_polyvec *)data1)) /* mlk_polyvec_tomont */ - BENCH("mlk_polyvec_tomont", mlk_polyvec_tomont((mlk_poly *)data0)) + BENCH("mlk_polyvec_tomont", mlk_polyvec_tomont((mlk_polyvec *)data0)) /* indcpa */ /* mlk_gen_matrix */ BENCH("mlk_gen_matrix", - mlk_gen_matrix((mlk_poly *)data0, (uint8_t *)data1, 0)) + mlk_gen_matrix((mlk_polymat *)data0, (uint8_t *)data1, 0)) #if defined(MLK_ARITH_BACKEND_AARCH64) diff --git a/test/test_unit.c b/test/test_unit.c index 56ed1bafb7..82a011537b 100644 --- a/test/test_unit.c +++ b/test/test_unit.c @@ -29,8 +29,8 @@ void mlk_poly_invntt_tomont_c(mlk_poly *r); void mlk_poly_tobytes_c(uint8_t r[MLKEM_POLYBYTES], const mlk_poly *a); void mlk_poly_frombytes_c(mlk_poly *r, const uint8_t a[MLKEM_POLYBYTES]); void mlk_polyvec_basemul_acc_montgomery_cached_c( - mlk_poly *r, const mlk_polyvec a, const mlk_polyvec b, - const mlk_polyvec_mulcache b_cache); + mlk_poly *r, const mlk_polyvec *a, const mlk_polyvec *b, + const mlk_polyvec_mulcache *b_cache); void mlk_poly_mulcache_compute_c(mlk_poly_mulcache *x, const mlk_poly *a); #define CHECK(x) \ @@ -509,24 +509,24 @@ static int test_polyvec_basemul_core(const int16_t *a, const int16_t *b, /* Copy test data to structures */ for (i = 0; i < MLKEM_K; i++) { - memcpy(test_a[i].coeffs, &a[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); - memcpy(test_b[i].coeffs, &b[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); - memcpy(ref_a[i].coeffs, &a[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); - memcpy(ref_b[i].coeffs, &b[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); + memcpy(test_a.vec[i].coeffs, &a[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); + memcpy(test_b.vec[i].coeffs, &b[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); + memcpy(ref_a.vec[i].coeffs, &a[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); + memcpy(ref_b.vec[i].coeffs, &b[i * MLKEM_N], MLKEM_N * sizeof(int16_t)); #ifdef MLK_USE_NATIVE_NTT_CUSTOM_ORDER - mlk_poly_permute_bitrev_to_custom(test_a[i].coeffs); - mlk_poly_permute_bitrev_to_custom(test_b[i].coeffs); + mlk_poly_permute_bitrev_to_custom(test_a.vec[i].coeffs); + mlk_poly_permute_bitrev_to_custom(test_b.vec[i].coeffs); #endif - mlk_poly_mulcache_compute_c(&ref_cache[i], &ref_b[i]); - mlk_poly_mulcache_compute(&test_cache[i], &test_b[i]); + mlk_poly_mulcache_compute_c(&ref_cache.vec[i], &ref_b.vec[i]); + mlk_poly_mulcache_compute(&test_cache.vec[i], &test_b.vec[i]); } - mlk_polyvec_basemul_acc_montgomery_cached(&test_result, test_a, test_b, - test_cache); - mlk_polyvec_basemul_acc_montgomery_cached_c(&ref_result, ref_a, ref_b, - ref_cache); + mlk_polyvec_basemul_acc_montgomery_cached(&test_result, &test_a, &test_b, + &test_cache); + mlk_polyvec_basemul_acc_montgomery_cached_c(&ref_result, &ref_a, &ref_b, + &ref_cache); #ifdef MLK_USE_NATIVE_NTT_CUSTOM_ORDER mlk_poly_permute_bitrev_to_custom(ref_result.coeffs);