ntt.rs 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. use std::arch::x86_64::*;
  2. use crate::{
  3. arith::*,
  4. number_theory::*,
  5. params::*,
  6. };
  7. pub fn powers_of_primitive_root(root: u64, modulus: u64, poly_len_log2: usize) -> Vec<u64> {
  8. let poly_len = 1usize << poly_len_log2;
  9. let mut root_powers = vec![0u64; poly_len];
  10. let mut power = root;
  11. for i in 1..poly_len {
  12. let idx = reverse_bits(i as u64, poly_len_log2) as usize;
  13. root_powers[idx] = power;
  14. power = multiply_uint_mod(power, root, modulus);
  15. }
  16. root_powers[0] = 1;
  17. root_powers
  18. }
  19. pub fn scale_powers_u64(modulus: u64, poly_len: usize, inp: &[u64]) -> Vec<u64> {
  20. let mut scaled_powers = vec![0; poly_len];
  21. for i in 0..poly_len {
  22. let wide_val = (inp[i] as u128) << 64u128;
  23. let quotient = wide_val / (modulus as u128);
  24. scaled_powers[i] = quotient as u64;
  25. }
  26. scaled_powers
  27. }
  28. pub fn scale_powers_u32(modulus: u32, poly_len: usize, inp: &[u64]) -> Vec<u64> {
  29. let mut scaled_powers = vec![0; poly_len];
  30. for i in 0..poly_len {
  31. let wide_val = inp[i] << 32;
  32. let quotient = wide_val / (modulus as u64);
  33. scaled_powers[i] = (quotient as u32) as u64;
  34. }
  35. scaled_powers
  36. }
  37. pub fn build_ntt_tables(poly_len: usize, moduli: &[u64]) -> Vec<Vec<Vec<u64>>> {
  38. let poly_len_log2 = log2(poly_len as u64) as usize;
  39. let mut output: Vec<Vec<Vec<u64>>> = vec![Vec::new(); moduli.len()];
  40. for coeff_mod in 0..moduli.len() {
  41. let modulus = moduli[coeff_mod];
  42. let modulus_as_u32 = modulus.try_into().unwrap();
  43. let root = get_minimal_primitive_root(2 * poly_len as u64, modulus).unwrap();
  44. let inv_root = invert_uint_mod(root, modulus).unwrap();
  45. let root_powers = powers_of_primitive_root(root, modulus, poly_len_log2);
  46. let scaled_root_powers = scale_powers_u32(modulus_as_u32, poly_len, root_powers.as_slice());
  47. let mut inv_root_powers = powers_of_primitive_root(inv_root, modulus, poly_len_log2);
  48. for i in 0..poly_len {
  49. inv_root_powers[i] = div2_uint_mod(inv_root_powers[i], modulus);
  50. }
  51. let scaled_inv_root_powers =
  52. scale_powers_u32(modulus_as_u32, poly_len, inv_root_powers.as_slice());
  53. output[coeff_mod] = vec![
  54. root_powers,
  55. scaled_root_powers,
  56. inv_root_powers,
  57. scaled_inv_root_powers,
  58. ];
  59. }
  60. output
  61. }
  62. #[cfg(not(target_feature = "avx2"))]
  63. pub fn ntt_forward(params: &Params, operand_overall: &mut [u64]) {
  64. let log_n = params.poly_len_log2;
  65. let n = 1 << log_n;
  66. for coeff_mod in 0..params.crt_count {
  67. let operand = &mut operand_overall[coeff_mod*n..coeff_mod*n+n];
  68. let forward_table = params.get_ntt_forward_table(coeff_mod);
  69. let forward_table_prime = params.get_ntt_forward_prime_table(coeff_mod);
  70. let modulus_small = params.moduli[coeff_mod] as u32;
  71. let two_times_modulus_small: u32 = 2 * modulus_small;
  72. for mm in 0..log_n {
  73. let m = 1 << mm;
  74. let t = n >> (mm + 1);
  75. let mut it = operand.chunks_exact_mut(2 * t);
  76. for i in 0..m {
  77. let w = forward_table[m+i];
  78. let w_prime = forward_table_prime[m+i];
  79. let op = it.next().unwrap();
  80. for j in 0..t {
  81. let x: u32 = op[j] as u32;
  82. let y: u32 = op[t + j] as u32;
  83. let curr_x: u32 = x - (two_times_modulus_small * ((x >= two_times_modulus_small) as u32));
  84. let q_tmp: u64 = ((y as u64) * (w_prime as u64)) >> 32u64;
  85. let q_new = w * (y as u64) - q_tmp * (modulus_small as u64);
  86. op[j] = curr_x as u64 + q_new;
  87. op[t + j] = curr_x as u64 + ((two_times_modulus_small as u64) - q_new);
  88. }
  89. }
  90. }
  91. for i in 0..n {
  92. operand[i] -= ((operand[i] >= two_times_modulus_small as u64) as u64) * two_times_modulus_small as u64;
  93. operand[i] -= ((operand[i] >= modulus_small as u64) as u64) * modulus_small as u64;
  94. }
  95. }
  96. }
  97. #[cfg(target_feature = "avx2")]
  98. pub fn ntt_forward(params: &Params, operand_overall: &mut [u64]) {
  99. let log_n = params.poly_len_log2;
  100. let n = 1 << log_n;
  101. for coeff_mod in 0..params.crt_count {
  102. let operand = &mut operand_overall[coeff_mod*n..coeff_mod*n+n];
  103. let forward_table = params.get_ntt_forward_table(coeff_mod);
  104. let forward_table_prime = params.get_ntt_forward_prime_table(coeff_mod);
  105. let modulus_small = params.moduli[coeff_mod] as u32;
  106. let two_times_modulus_small: u32 = 2 * modulus_small;
  107. for mm in 0..log_n {
  108. let m = 1 << mm;
  109. let t = n >> (mm + 1);
  110. let mut it = operand.chunks_exact_mut(2 * t);
  111. for i in 0..m {
  112. let w = forward_table[m+i];
  113. let w_prime = forward_table_prime[m+i];
  114. let op = it.next().unwrap();
  115. if t < 4 {
  116. for j in 0..t {
  117. let x: u32 = op[j] as u32;
  118. let y: u32 = op[t + j] as u32;
  119. let curr_x: u32 = x - (two_times_modulus_small * ((x >= two_times_modulus_small) as u32));
  120. let q_tmp: u64 = ((y as u64) * (w_prime as u64)) >> 32u64;
  121. let q_new = w * (y as u64) - q_tmp * (modulus_small as u64);
  122. op[j] = curr_x as u64 + q_new;
  123. op[t + j] = curr_x as u64 + ((two_times_modulus_small as u64) - q_new);
  124. }
  125. } else {
  126. unsafe {
  127. for j in (0..t).step_by(4) {
  128. // Use AVX2 here
  129. let p_x = &mut op[j] as *mut u64;
  130. let p_y = &mut op[j + t] as *mut u64;
  131. let x = _mm256_loadu_si256(p_x as *const __m256i);
  132. let y = _mm256_loadu_si256(p_y as *const __m256i);
  133. let cmp_val = _mm256_set1_epi64x(two_times_modulus_small as i64);
  134. let gt_mask = _mm256_cmpgt_epi64(x, cmp_val);
  135. let to_subtract = _mm256_and_si256(gt_mask, cmp_val);
  136. let curr_x = _mm256_sub_epi64(x, to_subtract);
  137. // uint32_t q_val = ((y) * (uint64_t)(Wprime)) >> 32;
  138. let w_prime_vec = _mm256_set1_epi64x(w_prime as i64);
  139. let product = _mm256_mul_epu32(y, w_prime_vec);
  140. let q_val = _mm256_srli_epi64(product, 32);
  141. // q_val = W * y - q_val * modulus_small;
  142. let w_vec = _mm256_set1_epi64x(w as i64);
  143. let w_times_y = _mm256_mul_epu32(y, w_vec);
  144. let modulus_small_vec = _mm256_set1_epi64x(modulus_small as i64);
  145. let q_scaled = _mm256_mul_epu32(q_val, modulus_small_vec);
  146. let q_final = _mm256_sub_epi64(w_times_y, q_scaled);
  147. let new_x = _mm256_add_epi64(curr_x, q_final);
  148. let q_final_inverted = _mm256_sub_epi64(cmp_val, q_final);
  149. let new_y = _mm256_add_epi64(curr_x, q_final_inverted);
  150. _mm256_storeu_si256(p_x as *mut __m256i, new_x);
  151. _mm256_storeu_si256(p_y as *mut __m256i, new_y);
  152. }
  153. }
  154. }
  155. }
  156. }
  157. for i in (0..n).step_by(4) {
  158. unsafe {
  159. let p_x = &mut operand[i] as *mut u64;
  160. let cmp_val1 = _mm256_set1_epi64x(two_times_modulus_small as i64);
  161. let mut x = _mm256_loadu_si256(p_x as *const __m256i);
  162. let mut gt_mask = _mm256_cmpgt_epi64(x, cmp_val1);
  163. let mut to_subtract = _mm256_and_si256(gt_mask, cmp_val1);
  164. x = _mm256_sub_epi64(x, to_subtract);
  165. let cmp_val2 = _mm256_set1_epi64x(modulus_small as i64);
  166. gt_mask = _mm256_cmpgt_epi64(x, cmp_val2);
  167. to_subtract = _mm256_and_si256(gt_mask, cmp_val2);
  168. x = _mm256_sub_epi64(x, to_subtract);
  169. _mm256_storeu_si256(p_x as *mut __m256i, x);
  170. }
  171. }
  172. }
  173. }
  174. #[cfg(not(target_feature = "avx2"))]
  175. pub fn ntt_inverse(params: &Params, operand_overall: &mut [u64]) {
  176. for coeff_mod in 0..params.crt_count {
  177. let n = params.poly_len;
  178. let operand = &mut operand_overall[coeff_mod*n..coeff_mod*n+n];
  179. let inverse_table = params.get_ntt_inverse_table(coeff_mod);
  180. let inverse_table_prime = params.get_ntt_inverse_prime_table(coeff_mod);
  181. let modulus = params.moduli[coeff_mod];
  182. let two_times_modulus: u64 = 2 * modulus;
  183. for mm in (0..params.poly_len_log2).rev() {
  184. let h = 1 << mm;
  185. let t = n >> (mm + 1);
  186. let mut it = operand.chunks_exact_mut(2 * t);
  187. for i in 0..h {
  188. let w = inverse_table[h+i];
  189. let w_prime = inverse_table_prime[h+i];
  190. let op = it.next().unwrap();
  191. for j in 0..t {
  192. let x = op[j];
  193. let y = op[t + j];
  194. let t_tmp = two_times_modulus - y + x;
  195. let curr_x = x + y - (two_times_modulus * (((x << 1) >= t_tmp) as u64));
  196. let h_tmp = (t_tmp * w_prime) >> 32;
  197. let res_x= (curr_x + (modulus * ((t_tmp & 1) as u64))) >> 1;
  198. let res_y= w * t_tmp - h_tmp * modulus;
  199. op[j] = res_x;
  200. op[t + j] = res_y;
  201. }
  202. }
  203. }
  204. for i in 0..n {
  205. operand[i] -= ((operand[i] >= two_times_modulus) as u64) * two_times_modulus;
  206. operand[i] -= ((operand[i] >= modulus) as u64) * modulus;
  207. }
  208. }
  209. }
  210. #[cfg(target_feature = "avx2")]
  211. pub fn ntt_inverse(params: &Params, operand_overall: &mut [u64]) {
  212. for coeff_mod in 0..params.crt_count {
  213. let n = params.poly_len;
  214. let operand = &mut operand_overall[coeff_mod*n..coeff_mod*n+n];
  215. let inverse_table = params.get_ntt_inverse_table(coeff_mod);
  216. let inverse_table_prime = params.get_ntt_inverse_prime_table(coeff_mod);
  217. let modulus = params.moduli[coeff_mod];
  218. let two_times_modulus: u64 = 2 * modulus;
  219. for mm in (0..params.poly_len_log2).rev() {
  220. let h = 1 << mm;
  221. let t = n >> (mm + 1);
  222. let mut it = operand.chunks_exact_mut(2 * t);
  223. for i in 0..h {
  224. let w = inverse_table[h+i];
  225. let w_prime = inverse_table_prime[h+i];
  226. let op = it.next().unwrap();
  227. if t < 4 {
  228. for j in 0..t {
  229. let x = op[j];
  230. let y = op[t + j];
  231. let t_tmp = two_times_modulus - y + x;
  232. let curr_x = x + y - (two_times_modulus * (((x << 1) >= t_tmp) as u64));
  233. let h_tmp = (t_tmp * w_prime) >> 32;
  234. let res_x= (curr_x + (modulus * ((t_tmp & 1) as u64))) >> 1;
  235. let res_y= w * t_tmp - h_tmp * modulus;
  236. op[j] = res_x;
  237. op[t + j] = res_y;
  238. }
  239. } else {
  240. unsafe {
  241. for j in (0..t).step_by(4) {
  242. // Use AVX2 here
  243. let p_x = &mut op[j] as *mut u64;
  244. let p_y = &mut op[j + t] as *mut u64;
  245. let x = _mm256_loadu_si256(p_x as *const __m256i);
  246. let y = _mm256_loadu_si256(p_y as *const __m256i);
  247. let modulus_vec = _mm256_set1_epi64x(modulus as i64);
  248. let two_times_modulus_vec = _mm256_set1_epi64x(two_times_modulus as i64);
  249. let mut t_tmp = _mm256_set1_epi64x(two_times_modulus as i64);
  250. t_tmp = _mm256_sub_epi64(t_tmp, y);
  251. t_tmp = _mm256_add_epi64(t_tmp, x);
  252. let gt_mask = _mm256_cmpgt_epi64(_mm256_slli_epi64(x, 1), t_tmp);
  253. let to_subtract = _mm256_and_si256(gt_mask, two_times_modulus_vec);
  254. let mut curr_x = _mm256_add_epi64(x, y);
  255. curr_x = _mm256_sub_epi64(curr_x, to_subtract);
  256. let w_prime_vec = _mm256_set1_epi64x(w_prime as i64);
  257. let mut h_tmp = _mm256_mul_epu32(t_tmp, w_prime_vec);
  258. h_tmp = _mm256_srli_epi64(h_tmp, 32);
  259. let and_mask = _mm256_set_epi64x(1, 1, 1, 1);
  260. let eq_mask = _mm256_cmpeq_epi64(_mm256_and_si256(t_tmp, and_mask), and_mask);
  261. let to_add = _mm256_and_si256(eq_mask, modulus_vec);
  262. let new_x = _mm256_srli_epi64(_mm256_add_epi64(curr_x, to_add), 1);
  263. let w_vec = _mm256_set1_epi64x(w as i64);
  264. let w_times_t_tmp = _mm256_mul_epu32(t_tmp, w_vec);
  265. let h_tmp_times_modulus = _mm256_mul_epu32(h_tmp, modulus_vec);
  266. let new_y = _mm256_sub_epi64(w_times_t_tmp, h_tmp_times_modulus);
  267. _mm256_storeu_si256(p_x as *mut __m256i, new_x);
  268. _mm256_storeu_si256(p_y as *mut __m256i, new_y);
  269. }
  270. }
  271. }
  272. }
  273. }
  274. for i in 0..n {
  275. operand[i] -= ((operand[i] >= two_times_modulus) as u64) * two_times_modulus;
  276. operand[i] -= ((operand[i] >= modulus) as u64) * modulus;
  277. }
  278. }
  279. }
  280. #[cfg(test)]
  281. mod test {
  282. use super::*;
  283. use rand::Rng;
  284. use crate::util::*;
  285. fn get_params() -> Params {
  286. Params::init(2048, vec![268369921u64, 249561089u64])
  287. }
  288. const REF_VAL: u64 = 519370102;
  289. #[test]
  290. fn build_ntt_tables_correct() {
  291. let moduli = [268369921u64, 249561089u64];
  292. let poly_len = 2048usize;
  293. let res = build_ntt_tables(poly_len, moduli.as_slice());
  294. assert_eq!(res.len(), 2);
  295. assert_eq!(res[0].len(), 4);
  296. assert_eq!(res[0][0].len(), poly_len);
  297. assert_eq!(res[0][2][0], 134184961u64);
  298. assert_eq!(res[0][2][1], 96647580u64);
  299. let mut x1 = 0u64;
  300. for i in 0..res.len() {
  301. for j in 0..res[0].len() {
  302. for k in 0..res[0][0].len() {
  303. x1 ^= res[i][j][k];
  304. }
  305. }
  306. }
  307. assert_eq!(x1, REF_VAL);
  308. }
  309. #[test]
  310. fn ntt_forward_correct() {
  311. let params = get_params();
  312. let mut v1 = vec![0; 2*2048];
  313. v1[0] = 100;
  314. v1[2048] = 100;
  315. ntt_forward(&params, v1.as_mut_slice());
  316. assert_eq!(v1[50], 100);
  317. assert_eq!(v1[2048 + 50], 100);
  318. }
  319. #[test]
  320. fn ntt_correct() {
  321. let params = get_params();
  322. let mut v1 = vec![0; params.crt_count * params.poly_len];
  323. let mut rng = rand::thread_rng();
  324. for i in 0..params.crt_count {
  325. for j in 0..params.poly_len {
  326. let idx = calc_index(&[i, j], &[params.crt_count, params.poly_len]);
  327. let val: u64 = rng.gen();
  328. v1[idx] = val % params.moduli[i];
  329. }
  330. }
  331. let mut v2 = v1.clone();
  332. ntt_forward(&params, v2.as_mut_slice());
  333. ntt_inverse(&params, v2.as_mut_slice());
  334. for i in 0..params.crt_count*params.poly_len {
  335. assert_eq!(v1[i], v2[i]);
  336. }
  337. }
  338. #[test]
  339. fn calc_index_correct() {
  340. assert_eq!(calc_index(&[2, 3, 4], &[10, 10, 100]), 2304);
  341. assert_eq!(calc_index(&[2, 3, 4], &[3, 5, 7]), 95);
  342. }
  343. }