server.rs 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. #[cfg(target_feature = "avx2")]
  2. use std::arch::x86_64::*;
  3. #[cfg(target_feature = "avx2")]
  4. use crate::aligned_memory::*;
  5. use crate::arith::*;
  6. use crate::gadget::*;
  7. use crate::params::*;
  8. use crate::poly::*;
  9. use crate::util::*;
  10. pub fn coefficient_expansion(
  11. v: &mut Vec<PolyMatrixNTT>,
  12. g: usize,
  13. stopround: usize,
  14. params: &Params,
  15. v_w_left: &Vec<PolyMatrixNTT>,
  16. v_w_right: &Vec<PolyMatrixNTT>,
  17. v_neg1: &Vec<PolyMatrixNTT>,
  18. max_bits_to_gen_right: usize,
  19. ) {
  20. let poly_len = params.poly_len;
  21. let mut ct = PolyMatrixRaw::zero(params, 2, 1);
  22. let mut ct_auto = PolyMatrixRaw::zero(params, 2, 1);
  23. let mut ct_auto_1 = PolyMatrixRaw::zero(params, 1, 1);
  24. let mut ct_auto_1_ntt = PolyMatrixNTT::zero(params, 1, 1);
  25. let mut ginv_ct_left = PolyMatrixRaw::zero(params, params.t_exp_left, 1);
  26. let mut ginv_ct_left_ntt = PolyMatrixNTT::zero(params, params.t_exp_left, 1);
  27. let mut ginv_ct_right = PolyMatrixRaw::zero(params, params.t_exp_right, 1);
  28. let mut ginv_ct_right_ntt = PolyMatrixNTT::zero(params, params.t_exp_right, 1);
  29. let mut w_times_ginv_ct = PolyMatrixNTT::zero(params, 2, 1);
  30. for r in 0..g {
  31. let num_in = 1 << r;
  32. let num_out = 2 * num_in;
  33. let t = (poly_len / (1 << r)) + 1;
  34. let neg1 = &v_neg1[r];
  35. for i in 0..num_out {
  36. if stopround > 0 && i % 2 == 1 && r > stopround
  37. || (r == stopround && i / 2 >= max_bits_to_gen_right)
  38. {
  39. continue;
  40. }
  41. let (w, _gadget_dim, gi_ct, gi_ct_ntt) = match i % 2 {
  42. 0 => (
  43. &v_w_left[r],
  44. params.t_exp_left,
  45. &mut ginv_ct_left,
  46. &mut ginv_ct_left_ntt,
  47. ),
  48. 1 | _ => (
  49. &v_w_right[r],
  50. params.t_exp_right,
  51. &mut ginv_ct_right,
  52. &mut ginv_ct_right_ntt,
  53. ),
  54. };
  55. if i < num_in {
  56. let (src, dest) = v.split_at_mut(num_in);
  57. scalar_multiply(&mut dest[i], neg1, &src[i]);
  58. }
  59. from_ntt(&mut ct, &v[i]);
  60. automorph(&mut ct_auto, &ct, t);
  61. gadget_invert_rdim(gi_ct, &ct_auto, 1);
  62. to_ntt_no_reduce(gi_ct_ntt, &gi_ct);
  63. ct_auto_1
  64. .data
  65. .as_mut_slice()
  66. .copy_from_slice(ct_auto.get_poly(1, 0));
  67. to_ntt(&mut ct_auto_1_ntt, &ct_auto_1);
  68. multiply(&mut w_times_ginv_ct, w, &gi_ct_ntt);
  69. let mut idx = 0;
  70. for j in 0..2 {
  71. for n in 0..params.crt_count {
  72. for z in 0..poly_len {
  73. let sum = v[i].data[idx]
  74. + w_times_ginv_ct.data[idx]
  75. + j * ct_auto_1_ntt.data[n * poly_len + z];
  76. v[i].data[idx] = barrett_coeff_u64(params, sum, n);
  77. idx += 1;
  78. }
  79. }
  80. }
  81. }
  82. }
  83. }
  84. pub fn regev_to_gsw<'a>(
  85. v_gsw: &mut Vec<PolyMatrixNTT<'a>>,
  86. v_inp: &Vec<PolyMatrixNTT<'a>>,
  87. v: &PolyMatrixNTT<'a>,
  88. params: &'a Params,
  89. idx_factor: usize,
  90. idx_offset: usize,
  91. ) {
  92. assert!(v.rows == 2);
  93. assert!(v.cols == 2 * params.t_conv);
  94. let mut ginv_c_inp = PolyMatrixRaw::zero(params, 2 * params.t_conv, 1);
  95. let mut ginv_c_inp_ntt = PolyMatrixNTT::zero(params, 2 * params.t_conv, 1);
  96. let mut tmp_ct_raw = PolyMatrixRaw::zero(params, 2, 1);
  97. let mut tmp_ct = PolyMatrixNTT::zero(params, 2, 1);
  98. for i in 0..params.db_dim_2 {
  99. let ct = &mut v_gsw[i];
  100. for j in 0..params.t_gsw {
  101. let idx_ct = i * params.t_gsw + j;
  102. let idx_inp = idx_factor * (idx_ct) + idx_offset;
  103. ct.copy_into(&v_inp[idx_inp], 0, 2 * j + 1);
  104. from_ntt(&mut tmp_ct_raw, &v_inp[idx_inp]);
  105. gadget_invert(&mut ginv_c_inp, &tmp_ct_raw);
  106. to_ntt(&mut ginv_c_inp_ntt, &ginv_c_inp);
  107. multiply(&mut tmp_ct, v, &ginv_c_inp_ntt);
  108. ct.copy_into(&tmp_ct, 0, 2 * j);
  109. }
  110. }
  111. }
  112. pub const MAX_SUMMED: usize = 1 << 6;
  113. pub const PACKED_OFFSET_2: i32 = 32;
  114. #[cfg(target_feature = "avx2")]
  115. pub fn multiply_reg_by_database(
  116. out: &mut Vec<PolyMatrixNTT>,
  117. db: &[u64],
  118. v_firstdim: &[u64],
  119. params: &Params,
  120. dim0: usize,
  121. num_per: usize,
  122. ) {
  123. let ct_rows = 2;
  124. let ct_cols = 1;
  125. let pt_rows = 1;
  126. let pt_cols = 1;
  127. assert!(dim0 * ct_rows >= MAX_SUMMED);
  128. let mut sums_out_n0_u64 = AlignedMemory64::new(4);
  129. let mut sums_out_n2_u64 = AlignedMemory64::new(4);
  130. for z in 0..params.poly_len {
  131. let idx_a_base = z * (ct_cols * dim0 * ct_rows);
  132. let mut idx_b_base = z * (num_per * pt_cols * dim0 * pt_rows);
  133. for i in 0..num_per {
  134. for c in 0..pt_cols {
  135. let inner_limit = MAX_SUMMED;
  136. let outer_limit = dim0 * ct_rows / inner_limit;
  137. let mut sums_out_n0_u64_acc = [0u64, 0, 0, 0];
  138. let mut sums_out_n2_u64_acc = [0u64, 0, 0, 0];
  139. for o_jm in 0..outer_limit {
  140. unsafe {
  141. let mut sums_out_n0 = _mm256_setzero_si256();
  142. let mut sums_out_n2 = _mm256_setzero_si256();
  143. for i_jm in 0..inner_limit / 4 {
  144. let jm = o_jm * inner_limit + (4 * i_jm);
  145. let b_inp_1 = *db.get_unchecked(idx_b_base) as i64;
  146. idx_b_base += 1;
  147. let b_inp_2 = *db.get_unchecked(idx_b_base) as i64;
  148. idx_b_base += 1;
  149. let b = _mm256_set_epi64x(b_inp_2, b_inp_2, b_inp_1, b_inp_1);
  150. let v_a = v_firstdim.get_unchecked(idx_a_base + jm) as *const u64;
  151. let a = _mm256_load_si256(v_a as *const __m256i);
  152. let a_lo = a;
  153. let a_hi_hi = _mm256_srli_epi64(a, PACKED_OFFSET_2);
  154. let b_lo = b;
  155. let b_hi_hi = _mm256_srli_epi64(b, PACKED_OFFSET_2);
  156. sums_out_n0 =
  157. _mm256_add_epi64(sums_out_n0, _mm256_mul_epu32(a_lo, b_lo));
  158. sums_out_n2 =
  159. _mm256_add_epi64(sums_out_n2, _mm256_mul_epu32(a_hi_hi, b_hi_hi));
  160. }
  161. // reduce here, otherwise we will overflow
  162. _mm256_store_si256(
  163. sums_out_n0_u64.as_mut_ptr() as *mut __m256i,
  164. sums_out_n0,
  165. );
  166. _mm256_store_si256(
  167. sums_out_n2_u64.as_mut_ptr() as *mut __m256i,
  168. sums_out_n2,
  169. );
  170. for idx in 0..4 {
  171. let val = sums_out_n0_u64[idx];
  172. sums_out_n0_u64_acc[idx] = barrett_coeff_u64(params, val + sums_out_n0_u64_acc[idx], 0);
  173. }
  174. for idx in 0..4 {
  175. let val = sums_out_n2_u64[idx];
  176. sums_out_n2_u64_acc[idx] = barrett_coeff_u64(params, val + sums_out_n2_u64_acc[idx], 1);
  177. }
  178. }
  179. }
  180. for idx in 0..4 {
  181. sums_out_n0_u64_acc[idx] = barrett_coeff_u64(params, sums_out_n0_u64_acc[idx], 0);
  182. sums_out_n2_u64_acc[idx] = barrett_coeff_u64(params, sums_out_n2_u64_acc[idx], 1);
  183. }
  184. // output n0
  185. let (crt_count, poly_len) = (params.crt_count, params.poly_len);
  186. let mut n = 0;
  187. let mut idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  188. out[i].data[idx_c] =
  189. barrett_coeff_u64(params, sums_out_n0_u64_acc[0] + sums_out_n0_u64_acc[2], 0);
  190. idx_c += pt_cols * crt_count * poly_len;
  191. out[i].data[idx_c] =
  192. barrett_coeff_u64(params, sums_out_n0_u64_acc[1] + sums_out_n0_u64_acc[3], 0);
  193. // output n1
  194. n = 1;
  195. idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  196. out[i].data[idx_c] =
  197. barrett_coeff_u64(params, sums_out_n2_u64_acc[0] + sums_out_n2_u64_acc[2], 1);
  198. idx_c += pt_cols * crt_count * poly_len;
  199. out[i].data[idx_c] =
  200. barrett_coeff_u64(params, sums_out_n2_u64_acc[1] + sums_out_n2_u64_acc[3], 1);
  201. }
  202. }
  203. }
  204. }
  205. pub fn generate_random_db_and_get_item<'a>(
  206. params: &'a Params,
  207. item_idx: usize,
  208. ) -> (PolyMatrixRaw<'a>, Vec<u64>) {
  209. let mut rng = get_seeded_rng();
  210. let trials = params.n * params.n;
  211. let dim0 = 1 << params.db_dim_1;
  212. let num_per = 1 << params.db_dim_2;
  213. let num_items = dim0 * num_per;
  214. let db_size_words = trials * num_items * params.poly_len;
  215. let mut v = vec![0u64; db_size_words];
  216. let mut item = PolyMatrixRaw::zero(params, params.n, params.n);
  217. for trial in 0..trials {
  218. for i in 0..num_items {
  219. let ii = i % num_per;
  220. let j = i / num_per;
  221. let mut db_item = PolyMatrixRaw::random_rng(params, 1, 1, &mut rng);
  222. db_item.reduce_mod(params.pt_modulus);
  223. if i == item_idx {
  224. item.copy_into(&db_item, trial / params.n, trial % params.n);
  225. }
  226. for z in 0..params.poly_len {
  227. db_item.data[z] = recenter_mod(db_item.data[z], params.pt_modulus, params.modulus);
  228. }
  229. let db_item_ntt = db_item.ntt();
  230. for z in 0..params.poly_len {
  231. let idx_dst = calc_index(
  232. &[trial, z, ii, j],
  233. &[trials, params.poly_len, num_per, dim0],
  234. );
  235. v[idx_dst] = db_item_ntt.data[z]
  236. | (db_item_ntt.data[params.poly_len + z] << PACKED_OFFSET_2);
  237. }
  238. }
  239. }
  240. (item, v)
  241. }
  242. pub fn fold_ciphertexts(
  243. params: &Params,
  244. v_cts: &mut Vec<PolyMatrixRaw>,
  245. v_folding: &Vec<PolyMatrixNTT>,
  246. v_folding_neg: &Vec<PolyMatrixNTT>
  247. ) {
  248. let further_dims = log2(v_cts.len() as u64) as usize;
  249. let ell = v_folding[0].cols / 2;
  250. let mut ginv_c = PolyMatrixRaw::zero(&params, 2 * ell, 1);
  251. let mut ginv_c_ntt = PolyMatrixNTT::zero(&params, 2 * ell, 1);
  252. let mut prod = PolyMatrixNTT::zero(&params, 2, 1);
  253. let mut sum = PolyMatrixNTT::zero(&params, 2, 1);
  254. let mut num_per = v_cts.len();
  255. for cur_dim in 0..further_dims {
  256. num_per = num_per / 2;
  257. for i in 0..num_per {
  258. gadget_invert(&mut ginv_c, &v_cts[i]);
  259. to_ntt(&mut ginv_c_ntt, &ginv_c);
  260. multiply(&mut prod, &v_folding_neg[further_dims - 1 - cur_dim], &ginv_c_ntt);
  261. gadget_invert(&mut ginv_c, &v_cts[num_per + i]);
  262. to_ntt(&mut ginv_c_ntt, &ginv_c);
  263. multiply(&mut sum, &v_folding[further_dims - 1 - cur_dim], &ginv_c_ntt);
  264. add_into(&mut sum, &prod);
  265. from_ntt(&mut v_cts[i], &sum);
  266. }
  267. }
  268. }
  269. #[cfg(test)]
  270. mod test {
  271. use super::*;
  272. use crate::{client::*};
  273. use rand::{prelude::StdRng, Rng};
  274. fn get_params() -> Params {
  275. let mut params = get_expansion_testing_params();
  276. params.db_dim_1 = 6;
  277. params.db_dim_2 = 2;
  278. params.t_exp_right = 8;
  279. params
  280. }
  281. fn dec_reg<'a>(
  282. params: &'a Params,
  283. ct: &PolyMatrixNTT<'a>,
  284. client: &mut Client<'a, StdRng>,
  285. scale_k: u64,
  286. ) -> u64 {
  287. let dec = client.decrypt_matrix_reg(ct).raw();
  288. let mut val = dec.data[0] as i64;
  289. if val >= (params.modulus / 2) as i64 {
  290. val -= params.modulus as i64;
  291. }
  292. let val_rounded = f64::round(val as f64 / scale_k as f64) as i64;
  293. if val_rounded == 0 {
  294. 0
  295. } else {
  296. 1
  297. }
  298. }
  299. fn dec_gsw<'a>(
  300. params: &'a Params,
  301. ct: &PolyMatrixNTT<'a>,
  302. client: &mut Client<'a, StdRng>,
  303. ) -> u64 {
  304. let dec = client.decrypt_matrix_reg(ct).raw();
  305. let idx = (params.t_gsw - 1) * params.poly_len + params.poly_len; // this offset should encode a large value
  306. let mut val = dec.data[idx] as i64;
  307. if val >= (params.modulus / 2) as i64 {
  308. val -= params.modulus as i64;
  309. }
  310. if val < 100 {
  311. 0
  312. } else {
  313. 1
  314. }
  315. }
  316. #[test]
  317. fn coefficient_expansion_is_correct() {
  318. let params = get_params();
  319. let v_neg1 = params.get_v_neg1();
  320. let mut seeded_rng = get_seeded_rng();
  321. let mut client = Client::init(&params, &mut seeded_rng);
  322. let public_params = client.generate_keys();
  323. let mut v = Vec::new();
  324. for _ in 0..(1 << (params.db_dim_1 + 1)) {
  325. v.push(PolyMatrixNTT::zero(&params, 2, 1));
  326. }
  327. let target = 7;
  328. let scale_k = params.modulus / params.pt_modulus;
  329. let mut sigma = PolyMatrixRaw::zero(&params, 1, 1);
  330. sigma.data[target] = scale_k;
  331. v[0] = client.encrypt_matrix_reg(&sigma.ntt());
  332. let test_ct = client.encrypt_matrix_reg(&sigma.ntt());
  333. let v_w_left = public_params.v_expansion_left.unwrap();
  334. let v_w_right = public_params.v_expansion_right.unwrap();
  335. coefficient_expansion(
  336. &mut v,
  337. client.g,
  338. client.stop_round,
  339. &params,
  340. &v_w_left,
  341. &v_w_right,
  342. &v_neg1,
  343. params.t_gsw * params.db_dim_2,
  344. );
  345. assert_eq!(dec_reg(&params, &test_ct, &mut client, scale_k), 0);
  346. for i in 0..v.len() {
  347. if i == target {
  348. assert_eq!(dec_reg(&params, &v[i], &mut client, scale_k), 1);
  349. } else {
  350. assert_eq!(dec_reg(&params, &v[i], &mut client, scale_k), 0);
  351. }
  352. }
  353. }
  354. #[test]
  355. fn regev_to_gsw_is_correct() {
  356. let mut params = get_params();
  357. params.db_dim_2 = 1;
  358. let mut seeded_rng = get_seeded_rng();
  359. let mut client = Client::init(&params, &mut seeded_rng);
  360. let public_params = client.generate_keys();
  361. let mut enc_constant = |val| {
  362. let mut sigma = PolyMatrixRaw::zero(&params, 1, 1);
  363. sigma.data[0] = val;
  364. client.encrypt_matrix_reg(&sigma.ntt())
  365. };
  366. let v = &public_params.v_conversion.unwrap()[0];
  367. let bits_per = get_bits_per(&params, params.t_gsw);
  368. let mut v_inp_1 = Vec::new();
  369. let mut v_inp_0 = Vec::new();
  370. for i in 0..params.t_gsw {
  371. let val = 1u64 << (bits_per * i);
  372. v_inp_1.push(enc_constant(val));
  373. v_inp_0.push(enc_constant(0));
  374. }
  375. let mut v_gsw = Vec::new();
  376. v_gsw.push(PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw));
  377. regev_to_gsw(&mut v_gsw, &v_inp_1, v, &params, 1, 0);
  378. assert_eq!(dec_gsw(&params, &v_gsw[0], &mut client), 1);
  379. regev_to_gsw(&mut v_gsw, &v_inp_0, v, &params, 1, 0);
  380. assert_eq!(dec_gsw(&params, &v_gsw[0], &mut client), 0);
  381. }
  382. #[test]
  383. fn multiply_reg_by_database_is_correct() {
  384. let params = get_params();
  385. let mut seeded_rng = get_seeded_rng();
  386. let dim0 = 1 << params.db_dim_1;
  387. let num_per = 1 << params.db_dim_2;
  388. let scale_k = params.modulus / params.pt_modulus;
  389. let target_idx = seeded_rng.gen::<usize>() % (dim0 * num_per);
  390. let target_idx_dim0 = target_idx / num_per;
  391. let target_idx_num_per = target_idx % num_per;
  392. let mut client = Client::init(&params, &mut seeded_rng);
  393. _ = client.generate_keys();
  394. let (corr_item, db) = generate_random_db_and_get_item(&params, target_idx);
  395. let mut v_reg = Vec::new();
  396. for i in 0..dim0 {
  397. let val = if i == target_idx_dim0 { scale_k } else { 0 };
  398. let sigma = PolyMatrixRaw::single_value(&params, val).ntt();
  399. v_reg.push(client.encrypt_matrix_reg(&sigma));
  400. }
  401. let v_reg_sz = dim0 * 2 * params.poly_len;
  402. let mut v_reg_reoriented = AlignedMemory64::new(v_reg_sz);
  403. reorient_reg_ciphertexts(&params, v_reg_reoriented.as_mut_slice(), &v_reg);
  404. let mut out = Vec::with_capacity(num_per);
  405. for _ in 0..dim0 {
  406. out.push(PolyMatrixNTT::zero(&params, 2, 1));
  407. }
  408. multiply_reg_by_database(&mut out, db.as_slice(), v_reg_reoriented.as_slice(), &params, dim0, num_per);
  409. // decrypt
  410. let dec = client.decrypt_matrix_reg(&out[target_idx_num_per]).raw();
  411. let mut dec_rescaled = PolyMatrixRaw::zero(&params, 1, 1);
  412. for z in 0..params.poly_len {
  413. dec_rescaled.data[z] = rescale(dec.data[z], params.modulus, params.pt_modulus);
  414. }
  415. for z in 0..params.poly_len {
  416. // println!("{:?} {:?}", dec_rescaled.data[z], corr_item.data[z]);
  417. assert_eq!(dec_rescaled.data[z], corr_item.data[z]);
  418. }
  419. }
  420. #[test]
  421. fn fold_ciphertexts_is_correct() {
  422. let params = get_params();
  423. let mut seeded_rng = get_seeded_rng();
  424. let dim0 = 1 << params.db_dim_1;
  425. let num_per = 1 << params.db_dim_2;
  426. let scale_k = params.modulus / params.pt_modulus;
  427. let target_idx = seeded_rng.gen::<usize>() % (dim0 * num_per);
  428. let target_idx_num_per = target_idx % num_per;
  429. let mut client = Client::init(&params, &mut seeded_rng);
  430. _ = client.generate_keys();
  431. let mut v_reg = Vec::new();
  432. for i in 0..num_per {
  433. let val = if i == target_idx_num_per { scale_k } else { 0 };
  434. let sigma = PolyMatrixRaw::single_value(&params, val).ntt();
  435. v_reg.push(client.encrypt_matrix_reg(&sigma));
  436. }
  437. let mut v_reg_raw = Vec::new();
  438. for i in 0..num_per {
  439. v_reg_raw.push(v_reg[i].raw());
  440. }
  441. let bits_per = get_bits_per(&params, params.t_gsw);
  442. let mut v_folding = Vec::new();
  443. for i in 0..params.db_dim_2 {
  444. let bit = ((target_idx_num_per as u64) & (1 << (i as u64))) >> (i as u64);
  445. let mut ct_gsw = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  446. for j in 0..params.t_gsw {
  447. let value = (1u64 << (bits_per * j)) * bit;
  448. let sigma = PolyMatrixRaw::single_value(&params, value);
  449. let sigma_ntt = to_ntt_alloc(&sigma);
  450. let ct = client.encrypt_matrix_reg(&sigma_ntt);
  451. ct_gsw.copy_into(&ct, 0, 2 * j + 1);
  452. let prod = &to_ntt_alloc(&client.sk_reg) * &sigma_ntt;
  453. let ct = &client.encrypt_matrix_reg(&prod);
  454. ct_gsw.copy_into(&ct, 0, 2 * j);
  455. }
  456. v_folding.push(ct_gsw);
  457. }
  458. let gadget_ntt = build_gadget(&params, 2, 2 * params.t_gsw).ntt();
  459. let mut v_folding_neg = Vec::new();
  460. let mut ct_gsw_inv = PolyMatrixRaw::zero(&params, 2, 2 * params.t_gsw);
  461. for i in 0..params.db_dim_2 {
  462. invert(&mut ct_gsw_inv, &v_folding[i].raw());
  463. let mut ct_gsw_neg = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  464. add(&mut ct_gsw_neg, &gadget_ntt, &ct_gsw_inv.ntt());
  465. v_folding_neg.push(ct_gsw_neg);
  466. }
  467. fold_ciphertexts(
  468. &params,
  469. &mut v_reg_raw,
  470. &v_folding,
  471. &v_folding_neg
  472. );
  473. // decrypt
  474. assert_eq!(dec_reg(&params, &v_reg_raw[0].ntt(), &mut client, scale_k), 1);
  475. }
  476. }