server.rs 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. #[cfg(target_feature = "avx2")]
  2. use std::arch::x86_64::*;
  3. #[cfg(target_feature = "avx2")]
  4. use crate::aligned_memory::*;
  5. use crate::arith::*;
  6. use crate::aligned_memory::*;
  7. use crate::client::PublicParameters;
  8. use crate::client::Query;
  9. use crate::gadget::*;
  10. use crate::params::*;
  11. use crate::poly::*;
  12. use crate::util::*;
  13. pub fn coefficient_expansion(
  14. v: &mut Vec<PolyMatrixNTT>,
  15. g: usize,
  16. stop_round: usize,
  17. params: &Params,
  18. v_w_left: &Vec<PolyMatrixNTT>,
  19. v_w_right: &Vec<PolyMatrixNTT>,
  20. v_neg1: &Vec<PolyMatrixNTT>,
  21. max_bits_to_gen_right: usize,
  22. ) {
  23. let poly_len = params.poly_len;
  24. let mut ct = PolyMatrixRaw::zero(params, 2, 1);
  25. let mut ct_auto = PolyMatrixRaw::zero(params, 2, 1);
  26. let mut ct_auto_1 = PolyMatrixRaw::zero(params, 1, 1);
  27. let mut ct_auto_1_ntt = PolyMatrixNTT::zero(params, 1, 1);
  28. let mut ginv_ct_left = PolyMatrixRaw::zero(params, params.t_exp_left, 1);
  29. let mut ginv_ct_left_ntt = PolyMatrixNTT::zero(params, params.t_exp_left, 1);
  30. let mut ginv_ct_right = PolyMatrixRaw::zero(params, params.t_exp_right, 1);
  31. let mut ginv_ct_right_ntt = PolyMatrixNTT::zero(params, params.t_exp_right, 1);
  32. let mut w_times_ginv_ct = PolyMatrixNTT::zero(params, 2, 1);
  33. for r in 0..g {
  34. let num_in = 1 << r;
  35. let num_out = 2 * num_in;
  36. let t = (poly_len / (1 << r)) + 1;
  37. let neg1 = &v_neg1[r];
  38. for i in 0..num_out {
  39. if stop_round > 0 && i % 2 == 1 && r > stop_round
  40. || (r == stop_round && i / 2 >= max_bits_to_gen_right)
  41. {
  42. continue;
  43. }
  44. let (w, _gadget_dim, gi_ct, gi_ct_ntt) = match i % 2 {
  45. 0 => (
  46. &v_w_left[r],
  47. params.t_exp_left,
  48. &mut ginv_ct_left,
  49. &mut ginv_ct_left_ntt,
  50. ),
  51. 1 | _ => (
  52. &v_w_right[r],
  53. params.t_exp_right,
  54. &mut ginv_ct_right,
  55. &mut ginv_ct_right_ntt,
  56. ),
  57. };
  58. if i < num_in {
  59. let (src, dest) = v.split_at_mut(num_in);
  60. scalar_multiply(&mut dest[i], neg1, &src[i]);
  61. }
  62. from_ntt(&mut ct, &v[i]);
  63. automorph(&mut ct_auto, &ct, t);
  64. gadget_invert_rdim(gi_ct, &ct_auto, 1);
  65. to_ntt_no_reduce(gi_ct_ntt, &gi_ct);
  66. ct_auto_1
  67. .data
  68. .as_mut_slice()
  69. .copy_from_slice(ct_auto.get_poly(1, 0));
  70. to_ntt(&mut ct_auto_1_ntt, &ct_auto_1);
  71. multiply(&mut w_times_ginv_ct, w, &gi_ct_ntt);
  72. let mut idx = 0;
  73. for j in 0..2 {
  74. for n in 0..params.crt_count {
  75. for z in 0..poly_len {
  76. let sum = v[i].data[idx]
  77. + w_times_ginv_ct.data[idx]
  78. + j * ct_auto_1_ntt.data[n * poly_len + z];
  79. v[i].data[idx] = barrett_coeff_u64(params, sum, n);
  80. idx += 1;
  81. }
  82. }
  83. }
  84. }
  85. }
  86. }
  87. pub fn regev_to_gsw<'a>(
  88. v_gsw: &mut Vec<PolyMatrixNTT<'a>>,
  89. v_inp: &Vec<PolyMatrixNTT<'a>>,
  90. v: &PolyMatrixNTT<'a>,
  91. params: &'a Params,
  92. idx_factor: usize,
  93. idx_offset: usize,
  94. ) {
  95. assert!(v.rows == 2);
  96. assert!(v.cols == 2 * params.t_conv);
  97. let mut ginv_c_inp = PolyMatrixRaw::zero(params, 2 * params.t_conv, 1);
  98. let mut ginv_c_inp_ntt = PolyMatrixNTT::zero(params, 2 * params.t_conv, 1);
  99. let mut tmp_ct_raw = PolyMatrixRaw::zero(params, 2, 1);
  100. let mut tmp_ct = PolyMatrixNTT::zero(params, 2, 1);
  101. for i in 0..params.db_dim_2 {
  102. let ct = &mut v_gsw[i];
  103. for j in 0..params.t_gsw {
  104. let idx_ct = i * params.t_gsw + j;
  105. let idx_inp = idx_factor * (idx_ct) + idx_offset;
  106. ct.copy_into(&v_inp[idx_inp], 0, 2 * j + 1);
  107. from_ntt(&mut tmp_ct_raw, &v_inp[idx_inp]);
  108. gadget_invert(&mut ginv_c_inp, &tmp_ct_raw);
  109. to_ntt(&mut ginv_c_inp_ntt, &ginv_c_inp);
  110. multiply(&mut tmp_ct, v, &ginv_c_inp_ntt);
  111. ct.copy_into(&tmp_ct, 0, 2 * j);
  112. }
  113. }
  114. }
  115. pub const MAX_SUMMED: usize = 1 << 6;
  116. pub const PACKED_OFFSET_2: i32 = 32;
  117. #[cfg(target_feature = "avx2")]
  118. pub fn multiply_reg_by_database(
  119. out: &mut Vec<PolyMatrixNTT>,
  120. db: &[u64],
  121. v_firstdim: &[u64],
  122. params: &Params,
  123. dim0: usize,
  124. num_per: usize,
  125. ) {
  126. let ct_rows = 2;
  127. let ct_cols = 1;
  128. let pt_rows = 1;
  129. let pt_cols = 1;
  130. assert!(dim0 * ct_rows >= MAX_SUMMED);
  131. let mut sums_out_n0_u64 = AlignedMemory64::new(4);
  132. let mut sums_out_n2_u64 = AlignedMemory64::new(4);
  133. for z in 0..params.poly_len {
  134. let idx_a_base = z * (ct_cols * dim0 * ct_rows);
  135. let mut idx_b_base = z * (num_per * pt_cols * dim0 * pt_rows);
  136. for i in 0..num_per {
  137. for c in 0..pt_cols {
  138. let inner_limit = MAX_SUMMED;
  139. let outer_limit = dim0 * ct_rows / inner_limit;
  140. let mut sums_out_n0_u64_acc = [0u64, 0, 0, 0];
  141. let mut sums_out_n2_u64_acc = [0u64, 0, 0, 0];
  142. for o_jm in 0..outer_limit {
  143. unsafe {
  144. let mut sums_out_n0 = _mm256_setzero_si256();
  145. let mut sums_out_n2 = _mm256_setzero_si256();
  146. for i_jm in 0..inner_limit / 4 {
  147. let jm = o_jm * inner_limit + (4 * i_jm);
  148. let b_inp_1 = *db.get_unchecked(idx_b_base) as i64;
  149. idx_b_base += 1;
  150. let b_inp_2 = *db.get_unchecked(idx_b_base) as i64;
  151. idx_b_base += 1;
  152. let b = _mm256_set_epi64x(b_inp_2, b_inp_2, b_inp_1, b_inp_1);
  153. let v_a = v_firstdim.get_unchecked(idx_a_base + jm) as *const u64;
  154. let a = _mm256_load_si256(v_a as *const __m256i);
  155. let a_lo = a;
  156. let a_hi_hi = _mm256_srli_epi64(a, PACKED_OFFSET_2);
  157. let b_lo = b;
  158. let b_hi_hi = _mm256_srli_epi64(b, PACKED_OFFSET_2);
  159. sums_out_n0 =
  160. _mm256_add_epi64(sums_out_n0, _mm256_mul_epu32(a_lo, b_lo));
  161. sums_out_n2 =
  162. _mm256_add_epi64(sums_out_n2, _mm256_mul_epu32(a_hi_hi, b_hi_hi));
  163. }
  164. // reduce here, otherwise we will overflow
  165. _mm256_store_si256(
  166. sums_out_n0_u64.as_mut_ptr() as *mut __m256i,
  167. sums_out_n0,
  168. );
  169. _mm256_store_si256(
  170. sums_out_n2_u64.as_mut_ptr() as *mut __m256i,
  171. sums_out_n2,
  172. );
  173. for idx in 0..4 {
  174. let val = sums_out_n0_u64[idx];
  175. sums_out_n0_u64_acc[idx] = barrett_coeff_u64(params, val + sums_out_n0_u64_acc[idx], 0);
  176. }
  177. for idx in 0..4 {
  178. let val = sums_out_n2_u64[idx];
  179. sums_out_n2_u64_acc[idx] = barrett_coeff_u64(params, val + sums_out_n2_u64_acc[idx], 1);
  180. }
  181. }
  182. }
  183. for idx in 0..4 {
  184. sums_out_n0_u64_acc[idx] = barrett_coeff_u64(params, sums_out_n0_u64_acc[idx], 0);
  185. sums_out_n2_u64_acc[idx] = barrett_coeff_u64(params, sums_out_n2_u64_acc[idx], 1);
  186. }
  187. // output n0
  188. let (crt_count, poly_len) = (params.crt_count, params.poly_len);
  189. let mut n = 0;
  190. let mut idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  191. out[i].data[idx_c] =
  192. barrett_coeff_u64(params, sums_out_n0_u64_acc[0] + sums_out_n0_u64_acc[2], 0);
  193. idx_c += pt_cols * crt_count * poly_len;
  194. out[i].data[idx_c] =
  195. barrett_coeff_u64(params, sums_out_n0_u64_acc[1] + sums_out_n0_u64_acc[3], 0);
  196. // output n1
  197. n = 1;
  198. idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  199. out[i].data[idx_c] =
  200. barrett_coeff_u64(params, sums_out_n2_u64_acc[0] + sums_out_n2_u64_acc[2], 1);
  201. idx_c += pt_cols * crt_count * poly_len;
  202. out[i].data[idx_c] =
  203. barrett_coeff_u64(params, sums_out_n2_u64_acc[1] + sums_out_n2_u64_acc[3], 1);
  204. }
  205. }
  206. }
  207. }
  208. #[cfg(not(target_feature = "avx2"))]
  209. pub fn multiply_reg_by_database(
  210. out: &mut Vec<PolyMatrixNTT>,
  211. db: &[u64],
  212. v_firstdim: &[u64],
  213. params: &Params,
  214. dim0: usize,
  215. num_per: usize,
  216. ) {
  217. let ct_rows = 2;
  218. let ct_cols = 1;
  219. let pt_rows = 1;
  220. let pt_cols = 1;
  221. for z in 0..params.poly_len {
  222. let idx_a_base = z * (ct_cols * dim0 * ct_rows);
  223. let mut idx_b_base = z * (num_per * pt_cols * dim0 * pt_rows);
  224. for i in 0..num_per {
  225. for c in 0..pt_cols {
  226. let mut sums_out_n0_0 = 0u128;
  227. let mut sums_out_n0_1 = 0u128;
  228. let mut sums_out_n1_0 = 0u128;
  229. let mut sums_out_n1_1 = 0u128;
  230. for jm in 0..(dim0 * pt_rows) {
  231. let b = db[idx_b_base];
  232. idx_b_base += 1;
  233. let v_a0 = v_firstdim[idx_a_base + jm * ct_rows];
  234. let v_a1 = v_firstdim[idx_a_base + jm * ct_rows + 1];
  235. let b_lo = b as u32;
  236. let b_hi = (b >> 32) as u32;
  237. let v_a0_lo = v_a0 as u32;
  238. let v_a0_hi = (v_a0 >> 32) as u32;
  239. let v_a1_lo = v_a1 as u32;
  240. let v_a1_hi = (v_a1 >> 32) as u32;
  241. // do n0
  242. sums_out_n0_0 += ((v_a0_lo as u64) * (b_lo as u64)) as u128;
  243. sums_out_n0_1 += ((v_a1_lo as u64) * (b_lo as u64)) as u128;
  244. // do n1
  245. sums_out_n1_0 += ((v_a0_hi as u64) * (b_hi as u64)) as u128;
  246. sums_out_n1_1 += ((v_a1_hi as u64) * (b_hi as u64)) as u128;
  247. }
  248. // output n0
  249. let (crt_count, poly_len) = (params.crt_count, params.poly_len);
  250. let mut n = 0;
  251. let mut idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  252. out[i].data[idx_c] = (sums_out_n0_0 % (params.moduli[0] as u128)) as u64;
  253. idx_c += pt_cols * crt_count * poly_len;
  254. out[i].data[idx_c] = (sums_out_n0_1 % (params.moduli[0] as u128)) as u64;
  255. // output n1
  256. n = 1;
  257. idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  258. out[i].data[idx_c] = (sums_out_n1_0 % (params.moduli[1] as u128)) as u64;
  259. idx_c += pt_cols * crt_count * poly_len;
  260. out[i].data[idx_c] = (sums_out_n1_1 % (params.moduli[1] as u128)) as u64;
  261. }
  262. }
  263. }
  264. }
  265. pub fn generate_random_db_and_get_item<'a>(
  266. params: &'a Params,
  267. item_idx: usize,
  268. ) -> (PolyMatrixRaw<'a>, AlignedMemory64) {
  269. let mut rng = get_seeded_rng();
  270. let trials = params.n * params.n;
  271. let dim0 = 1 << params.db_dim_1;
  272. let num_per = 1 << params.db_dim_2;
  273. let num_items = dim0 * num_per;
  274. let db_size_words = trials * num_items * params.poly_len;
  275. let mut v = AlignedMemory64::new(db_size_words);
  276. let mut item = PolyMatrixRaw::zero(params, params.n, params.n);
  277. for trial in 0..trials {
  278. for i in 0..num_items {
  279. let ii = i % num_per;
  280. let j = i / num_per;
  281. let mut db_item = PolyMatrixRaw::random_rng(params, 1, 1, &mut rng);
  282. db_item.reduce_mod(params.pt_modulus);
  283. if i == item_idx {
  284. item.copy_into(&db_item, trial / params.n, trial % params.n);
  285. }
  286. for z in 0..params.poly_len {
  287. db_item.data[z] = recenter_mod(db_item.data[z], params.pt_modulus, params.modulus);
  288. }
  289. let db_item_ntt = db_item.ntt();
  290. for z in 0..params.poly_len {
  291. let idx_dst = calc_index(
  292. &[trial, z, ii, j],
  293. &[trials, params.poly_len, num_per, dim0],
  294. );
  295. v[idx_dst] = db_item_ntt.data[z]
  296. | (db_item_ntt.data[params.poly_len + z] << PACKED_OFFSET_2);
  297. }
  298. }
  299. }
  300. (item, v)
  301. }
  302. pub fn fold_ciphertexts(
  303. params: &Params,
  304. v_cts: &mut Vec<PolyMatrixRaw>,
  305. v_folding: &Vec<PolyMatrixNTT>,
  306. v_folding_neg: &Vec<PolyMatrixNTT>
  307. ) {
  308. let further_dims = log2(v_cts.len() as u64) as usize;
  309. let ell = v_folding[0].cols / 2;
  310. let mut ginv_c = PolyMatrixRaw::zero(&params, 2 * ell, 1);
  311. let mut ginv_c_ntt = PolyMatrixNTT::zero(&params, 2 * ell, 1);
  312. let mut prod = PolyMatrixNTT::zero(&params, 2, 1);
  313. let mut sum = PolyMatrixNTT::zero(&params, 2, 1);
  314. let mut num_per = v_cts.len();
  315. for cur_dim in 0..further_dims {
  316. num_per = num_per / 2;
  317. for i in 0..num_per {
  318. gadget_invert(&mut ginv_c, &v_cts[i]);
  319. to_ntt(&mut ginv_c_ntt, &ginv_c);
  320. multiply(&mut prod, &v_folding_neg[further_dims - 1 - cur_dim], &ginv_c_ntt);
  321. gadget_invert(&mut ginv_c, &v_cts[num_per + i]);
  322. to_ntt(&mut ginv_c_ntt, &ginv_c);
  323. multiply(&mut sum, &v_folding[further_dims - 1 - cur_dim], &ginv_c_ntt);
  324. add_into(&mut sum, &prod);
  325. from_ntt(&mut v_cts[i], &sum);
  326. }
  327. }
  328. }
  329. pub fn pack<'a>(
  330. params: &'a Params,
  331. v_ct: &Vec<PolyMatrixRaw>,
  332. v_w: &Vec<PolyMatrixNTT>
  333. ) -> PolyMatrixNTT<'a> {
  334. assert!(v_ct.len() >= params.n * params.n);
  335. assert!(v_w.len() == params.n);
  336. assert!(v_ct[0].rows == 2);
  337. assert!(v_ct[0].cols == 1);
  338. assert!(v_w[0].rows == (params.n + 1));
  339. assert!(v_w[0].cols == params.t_conv);
  340. let mut result = PolyMatrixNTT::zero(params, params.n + 1, params.n);
  341. let mut ginv = PolyMatrixRaw::zero(params, params.t_conv, 1);
  342. let mut ginv_nttd = PolyMatrixNTT::zero(params, params.t_conv, 1);
  343. let mut prod = PolyMatrixNTT::zero(params, params.n + 1, 1);
  344. let mut ct_1 = PolyMatrixRaw::zero(params, 1, 1);
  345. let mut ct_2 = PolyMatrixRaw::zero(params, 1, 1);
  346. let mut ct_2_ntt = PolyMatrixNTT::zero(params, 1, 1);
  347. for c in 0..params.n {
  348. let mut v_int = PolyMatrixNTT::zero(&params, params.n + 1, 1);
  349. for r in 0..params.n {
  350. let w = &v_w[r];
  351. let ct = &v_ct[r * params.n + c];
  352. ct_1.get_poly_mut(0, 0).copy_from_slice(ct.get_poly(0, 0));
  353. ct_2.get_poly_mut(0, 0).copy_from_slice(ct.get_poly(1, 0));
  354. to_ntt(&mut ct_2_ntt, &ct_2);
  355. gadget_invert(&mut ginv, &ct_1);
  356. to_ntt(&mut ginv_nttd, &ginv);
  357. multiply(&mut prod, &w, &ginv_nttd);
  358. add_into_at(&mut v_int, &ct_2_ntt, 1 + r, 0);
  359. add_into(&mut v_int, &prod);
  360. }
  361. result.copy_into(&v_int, 0, c);
  362. }
  363. result
  364. }
  365. pub fn encode(
  366. params: &Params,
  367. v_packed_ct: &Vec<PolyMatrixRaw>
  368. ) -> Vec<u8> {
  369. let q1 = 4 * params.pt_modulus;
  370. let q1_bits = log2_ceil(q1) as usize;
  371. let q2 = Q2_VALUES[params.q2_bits as usize];
  372. let q2_bits = params.q2_bits as usize;
  373. let num_bits = params.instances *
  374. (
  375. (q2_bits * params.n * params.poly_len) +
  376. (q1_bits * params.n * params.n * params.poly_len)
  377. );
  378. let round_to = 64;
  379. let num_bytes_rounded_up = ((num_bits + round_to - 1) / round_to) * round_to / 8;
  380. let mut result = vec![0u8; num_bytes_rounded_up];
  381. let mut bit_offs = 0;
  382. for instance in 0..params.instances {
  383. let packed_ct = &v_packed_ct[instance];
  384. let mut first_row = packed_ct.submatrix(0, 0, 1, packed_ct.cols);
  385. let mut rest_rows = packed_ct.submatrix(1, 0, packed_ct.rows - 1, packed_ct.cols);
  386. first_row.apply_func(|x| { rescale(x, params.modulus, q2) });
  387. rest_rows.apply_func(|x| { rescale(x, params.modulus, q1) });
  388. let data = result.as_mut_slice();
  389. for i in 0..params.n * params.poly_len {
  390. write_arbitrary_bits(data, first_row.data[i], bit_offs, q2_bits);
  391. bit_offs += q2_bits;
  392. }
  393. for i in 0..params.n * params.n * params.poly_len {
  394. write_arbitrary_bits(data, rest_rows.data[i], bit_offs, q1_bits);
  395. bit_offs += q1_bits;
  396. }
  397. }
  398. result
  399. }
  400. pub fn get_v_folding_neg<'a>(
  401. params: &'a Params,
  402. v_folding: &Vec<PolyMatrixNTT>,
  403. ) -> Vec<PolyMatrixNTT<'a>> {
  404. let gadget_ntt = build_gadget(&params, 2, 2 * params.t_gsw).ntt(); // TODO: make this better
  405. let mut v_folding_neg = Vec::new();
  406. let mut ct_gsw_inv = PolyMatrixRaw::zero(&params, 2, 2 * params.t_gsw);
  407. for i in 0..params.db_dim_2 {
  408. invert(&mut ct_gsw_inv, &v_folding[i].raw());
  409. let mut ct_gsw_neg = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  410. add(&mut ct_gsw_neg, &gadget_ntt, &ct_gsw_inv.ntt());
  411. v_folding_neg.push(ct_gsw_neg);
  412. }
  413. v_folding_neg
  414. }
  415. pub fn expand_query<'a>(
  416. params: &'a Params,
  417. public_params: &PublicParameters<'a>,
  418. query: &Query<'a>,
  419. ) -> (AlignedMemory64, Vec<PolyMatrixNTT<'a>>) {
  420. let dim0 = 1 << params.db_dim_1;
  421. let further_dims = params.db_dim_2;
  422. let mut v_reg_reoriented;
  423. let mut v_folding;
  424. let num_bits_to_gen = params.t_gsw * further_dims + dim0;
  425. let g = log2_ceil_usize(num_bits_to_gen);
  426. let right_expanded = params.t_gsw * further_dims;
  427. let stop_round = log2_ceil_usize(right_expanded);
  428. let mut v = Vec::new();
  429. for _ in 0..(1 << g) {
  430. v.push(PolyMatrixNTT::zero(params, 2, 1));
  431. }
  432. v[0].copy_into(&query.ct.as_ref().unwrap().ntt(), 0, 0);
  433. let v_conversion = &public_params.v_conversion.as_ref().unwrap()[0];
  434. let v_w_left = public_params.v_expansion_left.as_ref().unwrap();
  435. let v_w_right = public_params.v_expansion_right.as_ref().unwrap();
  436. let v_neg1 = params.get_v_neg1();
  437. coefficient_expansion(
  438. &mut v,
  439. g,
  440. stop_round,
  441. params,
  442. &v_w_left,
  443. &v_w_right,
  444. &v_neg1,
  445. params.t_gsw * params.db_dim_2,
  446. );
  447. let mut v_reg_inp = Vec::with_capacity(dim0);
  448. for i in 0..dim0 {
  449. v_reg_inp.push(v[2 * i].clone());
  450. }
  451. let mut v_gsw_inp = Vec::with_capacity(right_expanded);
  452. for i in 0..right_expanded {
  453. v_gsw_inp.push(v[2 * i + 1].clone());
  454. }
  455. let v_reg_sz = dim0 * 2 * params.poly_len;
  456. v_reg_reoriented = AlignedMemory64::new(v_reg_sz);
  457. reorient_reg_ciphertexts(params, v_reg_reoriented.as_mut_slice(), &v_reg_inp);
  458. v_folding = Vec::new();
  459. for _ in 0..params.db_dim_2 {
  460. v_folding.push(PolyMatrixNTT::zero(params, 2, 2 * params.t_gsw));
  461. }
  462. regev_to_gsw(&mut v_folding, &v_gsw_inp, &v_conversion, params, 1, 0);
  463. (v_reg_reoriented, v_folding)
  464. }
  465. pub fn process_query(
  466. params: &Params,
  467. public_params: &PublicParameters,
  468. query: &Query,
  469. db: &[u64],
  470. ) -> Vec<u8> {
  471. let dim0 = 1 << params.db_dim_1;
  472. let num_per = 1 << params.db_dim_2;
  473. let db_slice_sz = dim0 * num_per * params.poly_len;
  474. let v_packing = public_params.v_packing.as_ref();
  475. let mut v_reg_reoriented;
  476. let v_folding;
  477. if params.expand_queries {
  478. (v_reg_reoriented, v_folding) =
  479. expand_query(params, public_params, query);
  480. } else {
  481. v_reg_reoriented = AlignedMemory64::new(query.v_buf.as_ref().unwrap().len());
  482. v_reg_reoriented.as_mut_slice().copy_from_slice(query.v_buf.as_ref().unwrap());
  483. v_folding = query.v_ct.as_ref().unwrap().clone().iter()
  484. .map(|x| { x.ntt() })
  485. .collect();
  486. }
  487. let v_folding_neg = get_v_folding_neg(params, &v_folding);
  488. let mut intermediate = Vec::with_capacity(num_per);
  489. let mut intermediate_raw = Vec::with_capacity(num_per);
  490. for _ in 0..num_per {
  491. intermediate.push(PolyMatrixNTT::zero(params, 2, 1));
  492. intermediate_raw.push(PolyMatrixRaw::zero(params, 2, 1));
  493. }
  494. let mut v_ct = Vec::new();
  495. for trial in 0..(params.n * params.n) {
  496. let cur_db = &db[(db_slice_sz * trial)..(db_slice_sz * trial + db_slice_sz)];
  497. multiply_reg_by_database(&mut intermediate, cur_db, v_reg_reoriented.as_slice(), params, dim0, num_per);
  498. for i in 0..intermediate.len() {
  499. from_ntt(&mut intermediate_raw[i], &intermediate[i]);
  500. }
  501. fold_ciphertexts(
  502. params,
  503. &mut intermediate_raw,
  504. &v_folding,
  505. &v_folding_neg
  506. );
  507. v_ct.push(intermediate_raw[0].clone());
  508. }
  509. let packed_ct = pack(
  510. params,
  511. &v_ct,
  512. &v_packing,
  513. );
  514. let mut v_packed_ct = Vec::new();
  515. v_packed_ct.push(packed_ct.raw());
  516. encode(params, &v_packed_ct)
  517. }
  518. #[cfg(test)]
  519. mod test {
  520. use super::*;
  521. use crate::{client::*};
  522. use rand::{prelude::StdRng, Rng};
  523. fn get_params() -> Params {
  524. let mut params = get_expansion_testing_params();
  525. params.db_dim_1 = 6;
  526. params.db_dim_2 = 2;
  527. params.t_exp_right = 8;
  528. params
  529. }
  530. fn dec_reg<'a>(
  531. params: &'a Params,
  532. ct: &PolyMatrixNTT<'a>,
  533. client: &mut Client<'a, StdRng>,
  534. scale_k: u64,
  535. ) -> u64 {
  536. let dec = client.decrypt_matrix_reg(ct).raw();
  537. let mut val = dec.data[0] as i64;
  538. if val >= (params.modulus / 2) as i64 {
  539. val -= params.modulus as i64;
  540. }
  541. let val_rounded = f64::round(val as f64 / scale_k as f64) as i64;
  542. if val_rounded == 0 {
  543. 0
  544. } else {
  545. 1
  546. }
  547. }
  548. fn dec_gsw<'a>(
  549. params: &'a Params,
  550. ct: &PolyMatrixNTT<'a>,
  551. client: &mut Client<'a, StdRng>,
  552. ) -> u64 {
  553. let dec = client.decrypt_matrix_reg(ct).raw();
  554. let idx = 2 * (params.t_gsw - 1) * params.poly_len + params.poly_len; // this offset should encode a large value
  555. let mut val = dec.data[idx] as i64;
  556. if val >= (params.modulus / 2) as i64 {
  557. val -= params.modulus as i64;
  558. }
  559. if i64::abs(val) < (1i64 << 10) {
  560. 0
  561. } else {
  562. 1
  563. }
  564. }
  565. #[test]
  566. fn coefficient_expansion_is_correct() {
  567. let params = get_params();
  568. let v_neg1 = params.get_v_neg1();
  569. let mut seeded_rng = get_seeded_rng();
  570. let mut client = Client::init(&params, &mut seeded_rng);
  571. let public_params = client.generate_keys();
  572. let mut v = Vec::new();
  573. for _ in 0..(1 << (params.db_dim_1 + 1)) {
  574. v.push(PolyMatrixNTT::zero(&params, 2, 1));
  575. }
  576. let target = 7;
  577. let scale_k = params.modulus / params.pt_modulus;
  578. let mut sigma = PolyMatrixRaw::zero(&params, 1, 1);
  579. sigma.data[target] = scale_k;
  580. v[0] = client.encrypt_matrix_reg(&sigma.ntt());
  581. let test_ct = client.encrypt_matrix_reg(&sigma.ntt());
  582. let v_w_left = public_params.v_expansion_left.unwrap();
  583. let v_w_right = public_params.v_expansion_right.unwrap();
  584. coefficient_expansion(
  585. &mut v,
  586. client.g,
  587. client.stop_round,
  588. &params,
  589. &v_w_left,
  590. &v_w_right,
  591. &v_neg1,
  592. params.t_gsw * params.db_dim_2,
  593. );
  594. assert_eq!(dec_reg(&params, &test_ct, &mut client, scale_k), 0);
  595. for i in 0..v.len() {
  596. if i == target {
  597. assert_eq!(dec_reg(&params, &v[i], &mut client, scale_k), 1);
  598. } else {
  599. assert_eq!(dec_reg(&params, &v[i], &mut client, scale_k), 0);
  600. }
  601. }
  602. }
  603. #[test]
  604. fn regev_to_gsw_is_correct() {
  605. let mut params = get_params();
  606. params.db_dim_2 = 1;
  607. let mut seeded_rng = get_seeded_rng();
  608. let mut client = Client::init(&params, &mut seeded_rng);
  609. let public_params = client.generate_keys();
  610. let mut enc_constant = |val| {
  611. let mut sigma = PolyMatrixRaw::zero(&params, 1, 1);
  612. sigma.data[0] = val;
  613. client.encrypt_matrix_reg(&sigma.ntt())
  614. };
  615. let v = &public_params.v_conversion.unwrap()[0];
  616. let bits_per = get_bits_per(&params, params.t_gsw);
  617. let mut v_inp_1 = Vec::new();
  618. let mut v_inp_0 = Vec::new();
  619. for i in 0..params.t_gsw {
  620. let val = 1u64 << (bits_per * i);
  621. v_inp_1.push(enc_constant(val));
  622. v_inp_0.push(enc_constant(0));
  623. }
  624. let mut v_gsw = Vec::new();
  625. v_gsw.push(PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw));
  626. regev_to_gsw(&mut v_gsw, &v_inp_1, v, &params, 1, 0);
  627. assert_eq!(dec_gsw(&params, &v_gsw[0], &mut client), 1);
  628. regev_to_gsw(&mut v_gsw, &v_inp_0, v, &params, 1, 0);
  629. assert_eq!(dec_gsw(&params, &v_gsw[0], &mut client), 0);
  630. }
  631. #[test]
  632. fn multiply_reg_by_database_is_correct() {
  633. let params = get_params();
  634. let mut seeded_rng = get_seeded_rng();
  635. let dim0 = 1 << params.db_dim_1;
  636. let num_per = 1 << params.db_dim_2;
  637. let scale_k = params.modulus / params.pt_modulus;
  638. let target_idx = seeded_rng.gen::<usize>() % (dim0 * num_per);
  639. let target_idx_dim0 = target_idx / num_per;
  640. let target_idx_num_per = target_idx % num_per;
  641. let mut client = Client::init(&params, &mut seeded_rng);
  642. _ = client.generate_keys();
  643. let (corr_item, db) = generate_random_db_and_get_item(&params, target_idx);
  644. let mut v_reg = Vec::new();
  645. for i in 0..dim0 {
  646. let val = if i == target_idx_dim0 { scale_k } else { 0 };
  647. let sigma = PolyMatrixRaw::single_value(&params, val).ntt();
  648. v_reg.push(client.encrypt_matrix_reg(&sigma));
  649. }
  650. let v_reg_sz = dim0 * 2 * params.poly_len;
  651. let mut v_reg_reoriented = AlignedMemory64::new(v_reg_sz);
  652. reorient_reg_ciphertexts(&params, v_reg_reoriented.as_mut_slice(), &v_reg);
  653. let mut out = Vec::with_capacity(num_per);
  654. for _ in 0..dim0 {
  655. out.push(PolyMatrixNTT::zero(&params, 2, 1));
  656. }
  657. multiply_reg_by_database(&mut out, db.as_slice(), v_reg_reoriented.as_slice(), &params, dim0, num_per);
  658. // decrypt
  659. let dec = client.decrypt_matrix_reg(&out[target_idx_num_per]).raw();
  660. let mut dec_rescaled = PolyMatrixRaw::zero(&params, 1, 1);
  661. for z in 0..params.poly_len {
  662. dec_rescaled.data[z] = rescale(dec.data[z], params.modulus, params.pt_modulus);
  663. }
  664. for z in 0..params.poly_len {
  665. // println!("{:?} {:?}", dec_rescaled.data[z], corr_item.data[z]);
  666. assert_eq!(dec_rescaled.data[z], corr_item.data[z]);
  667. }
  668. }
  669. #[test]
  670. fn fold_ciphertexts_is_correct() {
  671. let params = get_params();
  672. let mut seeded_rng = get_seeded_rng();
  673. let dim0 = 1 << params.db_dim_1;
  674. let num_per = 1 << params.db_dim_2;
  675. let scale_k = params.modulus / params.pt_modulus;
  676. let target_idx = seeded_rng.gen::<usize>() % (dim0 * num_per);
  677. let target_idx_num_per = target_idx % num_per;
  678. let mut client = Client::init(&params, &mut seeded_rng);
  679. _ = client.generate_keys();
  680. let mut v_reg = Vec::new();
  681. for i in 0..num_per {
  682. let val = if i == target_idx_num_per { scale_k } else { 0 };
  683. let sigma = PolyMatrixRaw::single_value(&params, val).ntt();
  684. v_reg.push(client.encrypt_matrix_reg(&sigma));
  685. }
  686. let mut v_reg_raw = Vec::new();
  687. for i in 0..num_per {
  688. v_reg_raw.push(v_reg[i].raw());
  689. }
  690. let bits_per = get_bits_per(&params, params.t_gsw);
  691. let mut v_folding = Vec::new();
  692. for i in 0..params.db_dim_2 {
  693. let bit = ((target_idx_num_per as u64) & (1 << (i as u64))) >> (i as u64);
  694. let mut ct_gsw = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  695. for j in 0..params.t_gsw {
  696. let value = (1u64 << (bits_per * j)) * bit;
  697. let sigma = PolyMatrixRaw::single_value(&params, value);
  698. let sigma_ntt = to_ntt_alloc(&sigma);
  699. let ct = client.encrypt_matrix_reg(&sigma_ntt);
  700. ct_gsw.copy_into(&ct, 0, 2 * j + 1);
  701. let prod = &to_ntt_alloc(&client.sk_reg) * &sigma_ntt;
  702. let ct = &client.encrypt_matrix_reg(&prod);
  703. ct_gsw.copy_into(&ct, 0, 2 * j);
  704. }
  705. v_folding.push(ct_gsw);
  706. }
  707. let gadget_ntt = build_gadget(&params, 2, 2 * params.t_gsw).ntt();
  708. let mut v_folding_neg = Vec::new();
  709. let mut ct_gsw_inv = PolyMatrixRaw::zero(&params, 2, 2 * params.t_gsw);
  710. for i in 0..params.db_dim_2 {
  711. invert(&mut ct_gsw_inv, &v_folding[i].raw());
  712. let mut ct_gsw_neg = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  713. add(&mut ct_gsw_neg, &gadget_ntt, &ct_gsw_inv.ntt());
  714. v_folding_neg.push(ct_gsw_neg);
  715. }
  716. fold_ciphertexts(
  717. &params,
  718. &mut v_reg_raw,
  719. &v_folding,
  720. &v_folding_neg
  721. );
  722. // decrypt
  723. assert_eq!(dec_reg(&params, &v_reg_raw[0].ntt(), &mut client, scale_k), 1);
  724. }
  725. #[test]
  726. fn full_protocol_is_correct() {
  727. let params = get_params();
  728. let mut seeded_rng = get_seeded_rng();
  729. let target_idx = seeded_rng.gen::<usize>() % (params.db_dim_1 + params.db_dim_2);
  730. let mut client = Client::init(&params, &mut seeded_rng);
  731. let public_params = client.generate_keys();
  732. let query = client.generate_query(target_idx);
  733. let (corr_item, db) = generate_random_db_and_get_item(&params, target_idx);
  734. let response = process_query(&params, &public_params, &query, db.as_slice());
  735. let result = client.decode_response(response.as_slice());
  736. let p_bits = log2_ceil(params.pt_modulus) as usize;
  737. let corr_result = corr_item.to_vec(p_bits, params.poly_len);
  738. for z in 0..corr_result.len() {
  739. assert_eq!(result[z], corr_result[z]);
  740. }
  741. }
  742. }