server.rs 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906
  1. #[cfg(target_feature = "avx2")]
  2. use std::arch::x86_64::*;
  3. use crate::arith::*;
  4. use crate::aligned_memory::*;
  5. use crate::client::PublicParameters;
  6. use crate::client::Query;
  7. use crate::gadget::*;
  8. use crate::params::*;
  9. use crate::poly::*;
  10. use crate::util::*;
  11. pub fn coefficient_expansion(
  12. v: &mut Vec<PolyMatrixNTT>,
  13. g: usize,
  14. stop_round: usize,
  15. params: &Params,
  16. v_w_left: &Vec<PolyMatrixNTT>,
  17. v_w_right: &Vec<PolyMatrixNTT>,
  18. v_neg1: &Vec<PolyMatrixNTT>,
  19. max_bits_to_gen_right: usize,
  20. ) {
  21. let poly_len = params.poly_len;
  22. let mut ct = PolyMatrixRaw::zero(params, 2, 1);
  23. let mut ct_auto = PolyMatrixRaw::zero(params, 2, 1);
  24. let mut ct_auto_1 = PolyMatrixRaw::zero(params, 1, 1);
  25. let mut ct_auto_1_ntt = PolyMatrixNTT::zero(params, 1, 1);
  26. let mut ginv_ct_left = PolyMatrixRaw::zero(params, params.t_exp_left, 1);
  27. let mut ginv_ct_left_ntt = PolyMatrixNTT::zero(params, params.t_exp_left, 1);
  28. let mut ginv_ct_right = PolyMatrixRaw::zero(params, params.t_exp_right, 1);
  29. let mut ginv_ct_right_ntt = PolyMatrixNTT::zero(params, params.t_exp_right, 1);
  30. let mut w_times_ginv_ct = PolyMatrixNTT::zero(params, 2, 1);
  31. for r in 0..g {
  32. let num_in = 1 << r;
  33. let num_out = 2 * num_in;
  34. let t = (poly_len / (1 << r)) + 1;
  35. let neg1 = &v_neg1[r];
  36. for i in 0..num_out {
  37. if stop_round > 0 && i % 2 == 1 && r > stop_round
  38. || (r == stop_round && i / 2 >= max_bits_to_gen_right)
  39. {
  40. continue;
  41. }
  42. let (w, _gadget_dim, gi_ct, gi_ct_ntt) = match i % 2 {
  43. 0 => (
  44. &v_w_left[r],
  45. params.t_exp_left,
  46. &mut ginv_ct_left,
  47. &mut ginv_ct_left_ntt,
  48. ),
  49. 1 | _ => (
  50. &v_w_right[r],
  51. params.t_exp_right,
  52. &mut ginv_ct_right,
  53. &mut ginv_ct_right_ntt,
  54. ),
  55. };
  56. if i < num_in {
  57. let (src, dest) = v.split_at_mut(num_in);
  58. scalar_multiply(&mut dest[i], neg1, &src[i]);
  59. }
  60. from_ntt(&mut ct, &v[i]);
  61. automorph(&mut ct_auto, &ct, t);
  62. gadget_invert_rdim(gi_ct, &ct_auto, 1);
  63. to_ntt_no_reduce(gi_ct_ntt, &gi_ct);
  64. ct_auto_1
  65. .data
  66. .as_mut_slice()
  67. .copy_from_slice(ct_auto.get_poly(1, 0));
  68. to_ntt(&mut ct_auto_1_ntt, &ct_auto_1);
  69. multiply(&mut w_times_ginv_ct, w, &gi_ct_ntt);
  70. let mut idx = 0;
  71. for j in 0..2 {
  72. for n in 0..params.crt_count {
  73. for z in 0..poly_len {
  74. let sum = v[i].data[idx]
  75. + w_times_ginv_ct.data[idx]
  76. + j * ct_auto_1_ntt.data[n * poly_len + z];
  77. v[i].data[idx] = barrett_coeff_u64(params, sum, n);
  78. idx += 1;
  79. }
  80. }
  81. }
  82. }
  83. }
  84. }
  85. pub fn regev_to_gsw<'a>(
  86. v_gsw: &mut Vec<PolyMatrixNTT<'a>>,
  87. v_inp: &Vec<PolyMatrixNTT<'a>>,
  88. v: &PolyMatrixNTT<'a>,
  89. params: &'a Params,
  90. idx_factor: usize,
  91. idx_offset: usize,
  92. ) {
  93. assert!(v.rows == 2);
  94. assert!(v.cols == 2 * params.t_conv);
  95. let mut ginv_c_inp = PolyMatrixRaw::zero(params, 2 * params.t_conv, 1);
  96. let mut ginv_c_inp_ntt = PolyMatrixNTT::zero(params, 2 * params.t_conv, 1);
  97. let mut tmp_ct_raw = PolyMatrixRaw::zero(params, 2, 1);
  98. let mut tmp_ct = PolyMatrixNTT::zero(params, 2, 1);
  99. for i in 0..params.db_dim_2 {
  100. let ct = &mut v_gsw[i];
  101. for j in 0..params.t_gsw {
  102. let idx_ct = i * params.t_gsw + j;
  103. let idx_inp = idx_factor * (idx_ct) + idx_offset;
  104. ct.copy_into(&v_inp[idx_inp], 0, 2 * j + 1);
  105. from_ntt(&mut tmp_ct_raw, &v_inp[idx_inp]);
  106. gadget_invert(&mut ginv_c_inp, &tmp_ct_raw);
  107. to_ntt(&mut ginv_c_inp_ntt, &ginv_c_inp);
  108. multiply(&mut tmp_ct, v, &ginv_c_inp_ntt);
  109. ct.copy_into(&tmp_ct, 0, 2 * j);
  110. }
  111. }
  112. }
  113. pub const MAX_SUMMED: usize = 1 << 6;
  114. pub const PACKED_OFFSET_2: i32 = 32;
  115. #[cfg(target_feature = "avx2")]
  116. pub fn multiply_reg_by_database(
  117. out: &mut Vec<PolyMatrixNTT>,
  118. db: &[u64],
  119. v_firstdim: &[u64],
  120. params: &Params,
  121. dim0: usize,
  122. num_per: usize,
  123. ) {
  124. let ct_rows = 2;
  125. let ct_cols = 1;
  126. let pt_rows = 1;
  127. let pt_cols = 1;
  128. assert!(dim0 * ct_rows >= MAX_SUMMED);
  129. let mut sums_out_n0_u64 = AlignedMemory64::new(4);
  130. let mut sums_out_n2_u64 = AlignedMemory64::new(4);
  131. for z in 0..params.poly_len {
  132. let idx_a_base = z * (ct_cols * dim0 * ct_rows);
  133. let mut idx_b_base = z * (num_per * pt_cols * dim0 * pt_rows);
  134. for i in 0..num_per {
  135. for c in 0..pt_cols {
  136. let inner_limit = MAX_SUMMED;
  137. let outer_limit = dim0 * ct_rows / inner_limit;
  138. let mut sums_out_n0_u64_acc = [0u64, 0, 0, 0];
  139. let mut sums_out_n2_u64_acc = [0u64, 0, 0, 0];
  140. for o_jm in 0..outer_limit {
  141. unsafe {
  142. let mut sums_out_n0 = _mm256_setzero_si256();
  143. let mut sums_out_n2 = _mm256_setzero_si256();
  144. for i_jm in 0..inner_limit / 4 {
  145. let jm = o_jm * inner_limit + (4 * i_jm);
  146. let b_inp_1 = *db.get_unchecked(idx_b_base) as i64;
  147. idx_b_base += 1;
  148. let b_inp_2 = *db.get_unchecked(idx_b_base) as i64;
  149. idx_b_base += 1;
  150. let b = _mm256_set_epi64x(b_inp_2, b_inp_2, b_inp_1, b_inp_1);
  151. let v_a = v_firstdim.get_unchecked(idx_a_base + jm) as *const u64;
  152. let a = _mm256_load_si256(v_a as *const __m256i);
  153. let a_lo = a;
  154. let a_hi_hi = _mm256_srli_epi64(a, PACKED_OFFSET_2);
  155. let b_lo = b;
  156. let b_hi_hi = _mm256_srli_epi64(b, PACKED_OFFSET_2);
  157. sums_out_n0 =
  158. _mm256_add_epi64(sums_out_n0, _mm256_mul_epu32(a_lo, b_lo));
  159. sums_out_n2 =
  160. _mm256_add_epi64(sums_out_n2, _mm256_mul_epu32(a_hi_hi, b_hi_hi));
  161. }
  162. // reduce here, otherwise we will overflow
  163. _mm256_store_si256(
  164. sums_out_n0_u64.as_mut_ptr() as *mut __m256i,
  165. sums_out_n0,
  166. );
  167. _mm256_store_si256(
  168. sums_out_n2_u64.as_mut_ptr() as *mut __m256i,
  169. sums_out_n2,
  170. );
  171. for idx in 0..4 {
  172. let val = sums_out_n0_u64[idx];
  173. sums_out_n0_u64_acc[idx] = barrett_coeff_u64(params, val + sums_out_n0_u64_acc[idx], 0);
  174. }
  175. for idx in 0..4 {
  176. let val = sums_out_n2_u64[idx];
  177. sums_out_n2_u64_acc[idx] = barrett_coeff_u64(params, val + sums_out_n2_u64_acc[idx], 1);
  178. }
  179. }
  180. }
  181. for idx in 0..4 {
  182. sums_out_n0_u64_acc[idx] = barrett_coeff_u64(params, sums_out_n0_u64_acc[idx], 0);
  183. sums_out_n2_u64_acc[idx] = barrett_coeff_u64(params, sums_out_n2_u64_acc[idx], 1);
  184. }
  185. // output n0
  186. let (crt_count, poly_len) = (params.crt_count, params.poly_len);
  187. let mut n = 0;
  188. let mut idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  189. out[i].data[idx_c] =
  190. barrett_coeff_u64(params, sums_out_n0_u64_acc[0] + sums_out_n0_u64_acc[2], 0);
  191. idx_c += pt_cols * crt_count * poly_len;
  192. out[i].data[idx_c] =
  193. barrett_coeff_u64(params, sums_out_n0_u64_acc[1] + sums_out_n0_u64_acc[3], 0);
  194. // output n1
  195. n = 1;
  196. idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  197. out[i].data[idx_c] =
  198. barrett_coeff_u64(params, sums_out_n2_u64_acc[0] + sums_out_n2_u64_acc[2], 1);
  199. idx_c += pt_cols * crt_count * poly_len;
  200. out[i].data[idx_c] =
  201. barrett_coeff_u64(params, sums_out_n2_u64_acc[1] + sums_out_n2_u64_acc[3], 1);
  202. }
  203. }
  204. }
  205. }
  206. #[cfg(not(target_feature = "avx2"))]
  207. pub fn multiply_reg_by_database(
  208. out: &mut Vec<PolyMatrixNTT>,
  209. db: &[u64],
  210. v_firstdim: &[u64],
  211. params: &Params,
  212. dim0: usize,
  213. num_per: usize,
  214. ) {
  215. let ct_rows = 2;
  216. let ct_cols = 1;
  217. let pt_rows = 1;
  218. let pt_cols = 1;
  219. for z in 0..params.poly_len {
  220. let idx_a_base = z * (ct_cols * dim0 * ct_rows);
  221. let mut idx_b_base = z * (num_per * pt_cols * dim0 * pt_rows);
  222. for i in 0..num_per {
  223. for c in 0..pt_cols {
  224. let mut sums_out_n0_0 = 0u128;
  225. let mut sums_out_n0_1 = 0u128;
  226. let mut sums_out_n1_0 = 0u128;
  227. let mut sums_out_n1_1 = 0u128;
  228. for jm in 0..(dim0 * pt_rows) {
  229. let b = db[idx_b_base];
  230. idx_b_base += 1;
  231. let v_a0 = v_firstdim[idx_a_base + jm * ct_rows];
  232. let v_a1 = v_firstdim[idx_a_base + jm * ct_rows + 1];
  233. let b_lo = b as u32;
  234. let b_hi = (b >> 32) as u32;
  235. let v_a0_lo = v_a0 as u32;
  236. let v_a0_hi = (v_a0 >> 32) as u32;
  237. let v_a1_lo = v_a1 as u32;
  238. let v_a1_hi = (v_a1 >> 32) as u32;
  239. // do n0
  240. sums_out_n0_0 += ((v_a0_lo as u64) * (b_lo as u64)) as u128;
  241. sums_out_n0_1 += ((v_a1_lo as u64) * (b_lo as u64)) as u128;
  242. // do n1
  243. sums_out_n1_0 += ((v_a0_hi as u64) * (b_hi as u64)) as u128;
  244. sums_out_n1_1 += ((v_a1_hi as u64) * (b_hi as u64)) as u128;
  245. }
  246. // output n0
  247. let (crt_count, poly_len) = (params.crt_count, params.poly_len);
  248. let mut n = 0;
  249. let mut idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  250. out[i].data[idx_c] = (sums_out_n0_0 % (params.moduli[0] as u128)) as u64;
  251. idx_c += pt_cols * crt_count * poly_len;
  252. out[i].data[idx_c] = (sums_out_n0_1 % (params.moduli[0] as u128)) as u64;
  253. // output n1
  254. n = 1;
  255. idx_c = c * (crt_count * poly_len) + n * (poly_len) + z;
  256. out[i].data[idx_c] = (sums_out_n1_0 % (params.moduli[1] as u128)) as u64;
  257. idx_c += pt_cols * crt_count * poly_len;
  258. out[i].data[idx_c] = (sums_out_n1_1 % (params.moduli[1] as u128)) as u64;
  259. }
  260. }
  261. }
  262. }
  263. pub fn generate_random_db_and_get_item<'a>(
  264. params: &'a Params,
  265. item_idx: usize,
  266. ) -> (PolyMatrixRaw<'a>, AlignedMemory64) {
  267. let mut rng = get_seeded_rng();
  268. let instances = params.instances;
  269. let trials = params.n * params.n;
  270. let dim0 = 1 << params.db_dim_1;
  271. let num_per = 1 << params.db_dim_2;
  272. let num_items = dim0 * num_per;
  273. let db_size_words = instances * trials * num_items * params.poly_len;
  274. let mut v = AlignedMemory64::new(db_size_words);
  275. let mut tmp_item_ntt = PolyMatrixNTT::zero(params, 1, 1);
  276. let mut item = PolyMatrixRaw::zero(params, params.n, params.n);
  277. for instance in 0..instances {
  278. println!("Instance {:?}", instance);
  279. for trial in 0..trials {
  280. println!("Trial {:?}", trial);
  281. for i in 0..num_items {
  282. let ii = i % num_per;
  283. let j = i / num_per;
  284. let mut db_item = PolyMatrixRaw::random_rng(params, 1, 1, &mut rng);
  285. db_item.reduce_mod(params.pt_modulus);
  286. if i == item_idx && instance == 0 {
  287. item.copy_into(&db_item, trial / params.n, trial % params.n);
  288. }
  289. for z in 0..params.poly_len {
  290. db_item.data[z] = recenter_mod(db_item.data[z], params.pt_modulus, params.modulus);
  291. }
  292. let db_item_ntt = db_item.ntt();
  293. for z in 0..params.poly_len {
  294. let idx_dst = calc_index(
  295. &[instance, trial, z, ii, j],
  296. &[instances, trials, params.poly_len, num_per, dim0],
  297. );
  298. v[idx_dst] = db_item_ntt.data[z]
  299. | (db_item_ntt.data[params.poly_len + z] << PACKED_OFFSET_2);
  300. }
  301. }
  302. }
  303. }
  304. (item, v)
  305. }
  306. pub fn fold_ciphertexts(
  307. params: &Params,
  308. v_cts: &mut Vec<PolyMatrixRaw>,
  309. v_folding: &Vec<PolyMatrixNTT>,
  310. v_folding_neg: &Vec<PolyMatrixNTT>
  311. ) {
  312. let further_dims = log2(v_cts.len() as u64) as usize;
  313. let ell = v_folding[0].cols / 2;
  314. let mut ginv_c = PolyMatrixRaw::zero(&params, 2 * ell, 1);
  315. let mut ginv_c_ntt = PolyMatrixNTT::zero(&params, 2 * ell, 1);
  316. let mut prod = PolyMatrixNTT::zero(&params, 2, 1);
  317. let mut sum = PolyMatrixNTT::zero(&params, 2, 1);
  318. let mut num_per = v_cts.len();
  319. for cur_dim in 0..further_dims {
  320. num_per = num_per / 2;
  321. for i in 0..num_per {
  322. gadget_invert(&mut ginv_c, &v_cts[i]);
  323. to_ntt(&mut ginv_c_ntt, &ginv_c);
  324. multiply(&mut prod, &v_folding_neg[further_dims - 1 - cur_dim], &ginv_c_ntt);
  325. gadget_invert(&mut ginv_c, &v_cts[num_per + i]);
  326. to_ntt(&mut ginv_c_ntt, &ginv_c);
  327. multiply(&mut sum, &v_folding[further_dims - 1 - cur_dim], &ginv_c_ntt);
  328. add_into(&mut sum, &prod);
  329. from_ntt(&mut v_cts[i], &sum);
  330. }
  331. }
  332. }
  333. pub fn pack<'a>(
  334. params: &'a Params,
  335. v_ct: &Vec<PolyMatrixRaw>,
  336. v_w: &Vec<PolyMatrixNTT>
  337. ) -> PolyMatrixNTT<'a> {
  338. assert!(v_ct.len() >= params.n * params.n);
  339. assert!(v_w.len() == params.n);
  340. assert!(v_ct[0].rows == 2);
  341. assert!(v_ct[0].cols == 1);
  342. assert!(v_w[0].rows == (params.n + 1));
  343. assert!(v_w[0].cols == params.t_conv);
  344. let mut result = PolyMatrixNTT::zero(params, params.n + 1, params.n);
  345. let mut ginv = PolyMatrixRaw::zero(params, params.t_conv, 1);
  346. let mut ginv_nttd = PolyMatrixNTT::zero(params, params.t_conv, 1);
  347. let mut prod = PolyMatrixNTT::zero(params, params.n + 1, 1);
  348. let mut ct_1 = PolyMatrixRaw::zero(params, 1, 1);
  349. let mut ct_2 = PolyMatrixRaw::zero(params, 1, 1);
  350. let mut ct_2_ntt = PolyMatrixNTT::zero(params, 1, 1);
  351. for c in 0..params.n {
  352. let mut v_int = PolyMatrixNTT::zero(&params, params.n + 1, 1);
  353. for r in 0..params.n {
  354. let w = &v_w[r];
  355. let ct = &v_ct[r * params.n + c];
  356. ct_1.get_poly_mut(0, 0).copy_from_slice(ct.get_poly(0, 0));
  357. ct_2.get_poly_mut(0, 0).copy_from_slice(ct.get_poly(1, 0));
  358. to_ntt(&mut ct_2_ntt, &ct_2);
  359. gadget_invert(&mut ginv, &ct_1);
  360. to_ntt(&mut ginv_nttd, &ginv);
  361. multiply(&mut prod, &w, &ginv_nttd);
  362. add_into_at(&mut v_int, &ct_2_ntt, 1 + r, 0);
  363. add_into(&mut v_int, &prod);
  364. }
  365. result.copy_into(&v_int, 0, c);
  366. }
  367. result
  368. }
  369. pub fn encode(
  370. params: &Params,
  371. v_packed_ct: &Vec<PolyMatrixRaw>
  372. ) -> Vec<u8> {
  373. let q1 = 4 * params.pt_modulus;
  374. let q1_bits = log2_ceil(q1) as usize;
  375. let q2 = Q2_VALUES[params.q2_bits as usize];
  376. let q2_bits = params.q2_bits as usize;
  377. let num_bits = params.instances *
  378. (
  379. (q2_bits * params.n * params.poly_len) +
  380. (q1_bits * params.n * params.n * params.poly_len)
  381. );
  382. let round_to = 64;
  383. let num_bytes_rounded_up = ((num_bits + round_to - 1) / round_to) * round_to / 8;
  384. let mut result = vec![0u8; num_bytes_rounded_up];
  385. let mut bit_offs = 0;
  386. for instance in 0..params.instances {
  387. let packed_ct = &v_packed_ct[instance];
  388. let mut first_row = packed_ct.submatrix(0, 0, 1, packed_ct.cols);
  389. let mut rest_rows = packed_ct.submatrix(1, 0, packed_ct.rows - 1, packed_ct.cols);
  390. first_row.apply_func(|x| { rescale(x, params.modulus, q2) });
  391. rest_rows.apply_func(|x| { rescale(x, params.modulus, q1) });
  392. let data = result.as_mut_slice();
  393. for i in 0..params.n * params.poly_len {
  394. write_arbitrary_bits(data, first_row.data[i], bit_offs, q2_bits);
  395. bit_offs += q2_bits;
  396. }
  397. for i in 0..params.n * params.n * params.poly_len {
  398. write_arbitrary_bits(data, rest_rows.data[i], bit_offs, q1_bits);
  399. bit_offs += q1_bits;
  400. }
  401. }
  402. result
  403. }
  404. pub fn get_v_folding_neg<'a>(
  405. params: &'a Params,
  406. v_folding: &Vec<PolyMatrixNTT>,
  407. ) -> Vec<PolyMatrixNTT<'a>> {
  408. let gadget_ntt = build_gadget(&params, 2, 2 * params.t_gsw).ntt(); // TODO: make this better
  409. let mut v_folding_neg = Vec::new();
  410. let mut ct_gsw_inv = PolyMatrixRaw::zero(&params, 2, 2 * params.t_gsw);
  411. for i in 0..params.db_dim_2 {
  412. invert(&mut ct_gsw_inv, &v_folding[i].raw());
  413. let mut ct_gsw_neg = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  414. add(&mut ct_gsw_neg, &gadget_ntt, &ct_gsw_inv.ntt());
  415. v_folding_neg.push(ct_gsw_neg);
  416. }
  417. v_folding_neg
  418. }
  419. pub fn expand_query<'a>(
  420. params: &'a Params,
  421. public_params: &PublicParameters<'a>,
  422. query: &Query<'a>,
  423. ) -> (AlignedMemory64, Vec<PolyMatrixNTT<'a>>) {
  424. let dim0 = 1 << params.db_dim_1;
  425. let further_dims = params.db_dim_2;
  426. let mut v_reg_reoriented;
  427. let mut v_folding;
  428. let num_bits_to_gen = params.t_gsw * further_dims + dim0;
  429. let g = log2_ceil_usize(num_bits_to_gen);
  430. let right_expanded = params.t_gsw * further_dims;
  431. let stop_round = log2_ceil_usize(right_expanded);
  432. let mut v = Vec::new();
  433. for _ in 0..(1 << g) {
  434. v.push(PolyMatrixNTT::zero(params, 2, 1));
  435. }
  436. v[0].copy_into(&query.ct.as_ref().unwrap().ntt(), 0, 0);
  437. let v_conversion = &public_params.v_conversion.as_ref().unwrap()[0];
  438. let v_w_left = public_params.v_expansion_left.as_ref().unwrap();
  439. let v_w_right = public_params.v_expansion_right.as_ref().unwrap();
  440. let v_neg1 = params.get_v_neg1();
  441. coefficient_expansion(
  442. &mut v,
  443. g,
  444. stop_round,
  445. params,
  446. &v_w_left,
  447. &v_w_right,
  448. &v_neg1,
  449. params.t_gsw * params.db_dim_2,
  450. );
  451. let mut v_reg_inp = Vec::with_capacity(dim0);
  452. for i in 0..dim0 {
  453. v_reg_inp.push(v[2 * i].clone());
  454. }
  455. let mut v_gsw_inp = Vec::with_capacity(right_expanded);
  456. for i in 0..right_expanded {
  457. v_gsw_inp.push(v[2 * i + 1].clone());
  458. }
  459. let v_reg_sz = dim0 * 2 * params.poly_len;
  460. v_reg_reoriented = AlignedMemory64::new(v_reg_sz);
  461. reorient_reg_ciphertexts(params, v_reg_reoriented.as_mut_slice(), &v_reg_inp);
  462. v_folding = Vec::new();
  463. for _ in 0..params.db_dim_2 {
  464. v_folding.push(PolyMatrixNTT::zero(params, 2, 2 * params.t_gsw));
  465. }
  466. regev_to_gsw(&mut v_folding, &v_gsw_inp, &v_conversion, params, 1, 0);
  467. (v_reg_reoriented, v_folding)
  468. }
  469. pub fn process_query(
  470. params: &Params,
  471. public_params: &PublicParameters,
  472. query: &Query,
  473. db: &[u64],
  474. ) -> Vec<u8> {
  475. let dim0 = 1 << params.db_dim_1;
  476. let num_per = 1 << params.db_dim_2;
  477. let db_slice_sz = dim0 * num_per * params.poly_len;
  478. let v_packing = public_params.v_packing.as_ref();
  479. let mut v_reg_reoriented;
  480. let v_folding;
  481. if params.expand_queries {
  482. (v_reg_reoriented, v_folding) =
  483. expand_query(params, public_params, query);
  484. } else {
  485. v_reg_reoriented = AlignedMemory64::new(query.v_buf.as_ref().unwrap().len());
  486. v_reg_reoriented.as_mut_slice().copy_from_slice(query.v_buf.as_ref().unwrap());
  487. v_folding = query.v_ct.as_ref().unwrap().clone().iter()
  488. .map(|x| { x.ntt() })
  489. .collect();
  490. }
  491. let v_folding_neg = get_v_folding_neg(params, &v_folding);
  492. let mut intermediate = Vec::with_capacity(num_per);
  493. let mut intermediate_raw = Vec::with_capacity(num_per);
  494. for _ in 0..num_per {
  495. intermediate.push(PolyMatrixNTT::zero(params, 2, 1));
  496. intermediate_raw.push(PolyMatrixRaw::zero(params, 2, 1));
  497. }
  498. let mut v_packed_ct = Vec::new();
  499. for instance in 0..params.instances {
  500. let mut v_ct = Vec::new();
  501. for trial in 0..(params.n * params.n) {
  502. let idx = (instance * (params.n * params.n) + trial) * db_slice_sz;
  503. let cur_db = &db[idx..(idx + db_slice_sz)];
  504. multiply_reg_by_database(&mut intermediate, cur_db, v_reg_reoriented.as_slice(), params, dim0, num_per);
  505. for i in 0..intermediate.len() {
  506. from_ntt(&mut intermediate_raw[i], &intermediate[i]);
  507. }
  508. fold_ciphertexts(
  509. params,
  510. &mut intermediate_raw,
  511. &v_folding,
  512. &v_folding_neg
  513. );
  514. v_ct.push(intermediate_raw[0].clone());
  515. }
  516. let packed_ct = pack(
  517. params,
  518. &v_ct,
  519. &v_packing,
  520. );
  521. v_packed_ct.push(packed_ct.raw());
  522. }
  523. encode(params, &v_packed_ct)
  524. }
  525. #[cfg(test)]
  526. mod test {
  527. use super::*;
  528. use crate::{client::*};
  529. use rand::{prelude::SmallRng, Rng};
  530. fn get_params() -> Params {
  531. let mut params = get_expansion_testing_params();
  532. params.db_dim_1 = 6;
  533. params.db_dim_2 = 2;
  534. params.t_exp_right = 8;
  535. params
  536. }
  537. fn dec_reg<'a>(
  538. params: &'a Params,
  539. ct: &PolyMatrixNTT<'a>,
  540. client: &mut Client<'a, SmallRng>,
  541. scale_k: u64,
  542. ) -> u64 {
  543. let dec = client.decrypt_matrix_reg(ct).raw();
  544. let mut val = dec.data[0] as i64;
  545. if val >= (params.modulus / 2) as i64 {
  546. val -= params.modulus as i64;
  547. }
  548. let val_rounded = f64::round(val as f64 / scale_k as f64) as i64;
  549. if val_rounded == 0 {
  550. 0
  551. } else {
  552. 1
  553. }
  554. }
  555. fn dec_gsw<'a>(
  556. params: &'a Params,
  557. ct: &PolyMatrixNTT<'a>,
  558. client: &mut Client<'a, SmallRng>,
  559. ) -> u64 {
  560. let dec = client.decrypt_matrix_reg(ct).raw();
  561. let idx = 2 * (params.t_gsw - 1) * params.poly_len + params.poly_len; // this offset should encode a large value
  562. let mut val = dec.data[idx] as i64;
  563. if val >= (params.modulus / 2) as i64 {
  564. val -= params.modulus as i64;
  565. }
  566. if i64::abs(val) < (1i64 << 10) {
  567. 0
  568. } else {
  569. 1
  570. }
  571. }
  572. #[test]
  573. fn coefficient_expansion_is_correct() {
  574. let params = get_params();
  575. let v_neg1 = params.get_v_neg1();
  576. let mut seeded_rng = get_seeded_rng();
  577. let mut client = Client::init(&params, &mut seeded_rng);
  578. let public_params = client.generate_keys();
  579. let mut v = Vec::new();
  580. for _ in 0..(1 << (params.db_dim_1 + 1)) {
  581. v.push(PolyMatrixNTT::zero(&params, 2, 1));
  582. }
  583. let target = 7;
  584. let scale_k = params.modulus / params.pt_modulus;
  585. let mut sigma = PolyMatrixRaw::zero(&params, 1, 1);
  586. sigma.data[target] = scale_k;
  587. v[0] = client.encrypt_matrix_reg(&sigma.ntt());
  588. let test_ct = client.encrypt_matrix_reg(&sigma.ntt());
  589. let v_w_left = public_params.v_expansion_left.unwrap();
  590. let v_w_right = public_params.v_expansion_right.unwrap();
  591. coefficient_expansion(
  592. &mut v,
  593. client.g,
  594. client.stop_round,
  595. &params,
  596. &v_w_left,
  597. &v_w_right,
  598. &v_neg1,
  599. params.t_gsw * params.db_dim_2,
  600. );
  601. assert_eq!(dec_reg(&params, &test_ct, &mut client, scale_k), 0);
  602. for i in 0..v.len() {
  603. if i == target {
  604. assert_eq!(dec_reg(&params, &v[i], &mut client, scale_k), 1);
  605. } else {
  606. assert_eq!(dec_reg(&params, &v[i], &mut client, scale_k), 0);
  607. }
  608. }
  609. }
  610. #[test]
  611. fn regev_to_gsw_is_correct() {
  612. let mut params = get_params();
  613. params.db_dim_2 = 1;
  614. let mut seeded_rng = get_seeded_rng();
  615. let mut client = Client::init(&params, &mut seeded_rng);
  616. let public_params = client.generate_keys();
  617. let mut enc_constant = |val| {
  618. let mut sigma = PolyMatrixRaw::zero(&params, 1, 1);
  619. sigma.data[0] = val;
  620. client.encrypt_matrix_reg(&sigma.ntt())
  621. };
  622. let v = &public_params.v_conversion.unwrap()[0];
  623. let bits_per = get_bits_per(&params, params.t_gsw);
  624. let mut v_inp_1 = Vec::new();
  625. let mut v_inp_0 = Vec::new();
  626. for i in 0..params.t_gsw {
  627. let val = 1u64 << (bits_per * i);
  628. v_inp_1.push(enc_constant(val));
  629. v_inp_0.push(enc_constant(0));
  630. }
  631. let mut v_gsw = Vec::new();
  632. v_gsw.push(PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw));
  633. regev_to_gsw(&mut v_gsw, &v_inp_1, v, &params, 1, 0);
  634. assert_eq!(dec_gsw(&params, &v_gsw[0], &mut client), 1);
  635. regev_to_gsw(&mut v_gsw, &v_inp_0, v, &params, 1, 0);
  636. assert_eq!(dec_gsw(&params, &v_gsw[0], &mut client), 0);
  637. }
  638. #[test]
  639. fn multiply_reg_by_database_is_correct() {
  640. let params = get_params();
  641. let mut seeded_rng = get_seeded_rng();
  642. let dim0 = 1 << params.db_dim_1;
  643. let num_per = 1 << params.db_dim_2;
  644. let scale_k = params.modulus / params.pt_modulus;
  645. let target_idx = seeded_rng.gen::<usize>() % (dim0 * num_per);
  646. let target_idx_dim0 = target_idx / num_per;
  647. let target_idx_num_per = target_idx % num_per;
  648. let mut client = Client::init(&params, &mut seeded_rng);
  649. _ = client.generate_keys();
  650. let (corr_item, db) = generate_random_db_and_get_item(&params, target_idx);
  651. let mut v_reg = Vec::new();
  652. for i in 0..dim0 {
  653. let val = if i == target_idx_dim0 { scale_k } else { 0 };
  654. let sigma = PolyMatrixRaw::single_value(&params, val).ntt();
  655. v_reg.push(client.encrypt_matrix_reg(&sigma));
  656. }
  657. let v_reg_sz = dim0 * 2 * params.poly_len;
  658. let mut v_reg_reoriented = AlignedMemory64::new(v_reg_sz);
  659. reorient_reg_ciphertexts(&params, v_reg_reoriented.as_mut_slice(), &v_reg);
  660. let mut out = Vec::with_capacity(num_per);
  661. for _ in 0..dim0 {
  662. out.push(PolyMatrixNTT::zero(&params, 2, 1));
  663. }
  664. multiply_reg_by_database(&mut out, db.as_slice(), v_reg_reoriented.as_slice(), &params, dim0, num_per);
  665. // decrypt
  666. let dec = client.decrypt_matrix_reg(&out[target_idx_num_per]).raw();
  667. let mut dec_rescaled = PolyMatrixRaw::zero(&params, 1, 1);
  668. for z in 0..params.poly_len {
  669. dec_rescaled.data[z] = rescale(dec.data[z], params.modulus, params.pt_modulus);
  670. }
  671. for z in 0..params.poly_len {
  672. // println!("{:?} {:?}", dec_rescaled.data[z], corr_item.data[z]);
  673. assert_eq!(dec_rescaled.data[z], corr_item.data[z]);
  674. }
  675. }
  676. #[test]
  677. fn fold_ciphertexts_is_correct() {
  678. let params = get_params();
  679. let mut seeded_rng = get_seeded_rng();
  680. let dim0 = 1 << params.db_dim_1;
  681. let num_per = 1 << params.db_dim_2;
  682. let scale_k = params.modulus / params.pt_modulus;
  683. let target_idx = seeded_rng.gen::<usize>() % (dim0 * num_per);
  684. let target_idx_num_per = target_idx % num_per;
  685. let mut client = Client::init(&params, &mut seeded_rng);
  686. _ = client.generate_keys();
  687. let mut v_reg = Vec::new();
  688. for i in 0..num_per {
  689. let val = if i == target_idx_num_per { scale_k } else { 0 };
  690. let sigma = PolyMatrixRaw::single_value(&params, val).ntt();
  691. v_reg.push(client.encrypt_matrix_reg(&sigma));
  692. }
  693. let mut v_reg_raw = Vec::new();
  694. for i in 0..num_per {
  695. v_reg_raw.push(v_reg[i].raw());
  696. }
  697. let bits_per = get_bits_per(&params, params.t_gsw);
  698. let mut v_folding = Vec::new();
  699. for i in 0..params.db_dim_2 {
  700. let bit = ((target_idx_num_per as u64) & (1 << (i as u64))) >> (i as u64);
  701. let mut ct_gsw = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  702. for j in 0..params.t_gsw {
  703. let value = (1u64 << (bits_per * j)) * bit;
  704. let sigma = PolyMatrixRaw::single_value(&params, value);
  705. let sigma_ntt = to_ntt_alloc(&sigma);
  706. let ct = client.encrypt_matrix_reg(&sigma_ntt);
  707. ct_gsw.copy_into(&ct, 0, 2 * j + 1);
  708. let prod = &to_ntt_alloc(&client.sk_reg) * &sigma_ntt;
  709. let ct = &client.encrypt_matrix_reg(&prod);
  710. ct_gsw.copy_into(&ct, 0, 2 * j);
  711. }
  712. v_folding.push(ct_gsw);
  713. }
  714. let gadget_ntt = build_gadget(&params, 2, 2 * params.t_gsw).ntt();
  715. let mut v_folding_neg = Vec::new();
  716. let mut ct_gsw_inv = PolyMatrixRaw::zero(&params, 2, 2 * params.t_gsw);
  717. for i in 0..params.db_dim_2 {
  718. invert(&mut ct_gsw_inv, &v_folding[i].raw());
  719. let mut ct_gsw_neg = PolyMatrixNTT::zero(&params, 2, 2 * params.t_gsw);
  720. add(&mut ct_gsw_neg, &gadget_ntt, &ct_gsw_inv.ntt());
  721. v_folding_neg.push(ct_gsw_neg);
  722. }
  723. fold_ciphertexts(
  724. &params,
  725. &mut v_reg_raw,
  726. &v_folding,
  727. &v_folding_neg
  728. );
  729. // decrypt
  730. assert_eq!(dec_reg(&params, &v_reg_raw[0].ntt(), &mut client, scale_k), 1);
  731. }
  732. fn full_protocol_is_correct_for_params(params: &Params) {
  733. let mut seeded_rng = get_seeded_rng();
  734. let target_idx = seeded_rng.gen::<usize>() % (params.db_dim_1 + params.db_dim_2);
  735. let mut client = Client::init(params, &mut seeded_rng);
  736. let public_params = client.generate_keys();
  737. let query = client.generate_query(target_idx);
  738. let (corr_item, db) = generate_random_db_and_get_item(params, target_idx);
  739. let response = process_query(params, &public_params, &query, db.as_slice());
  740. let result = client.decode_response(response.as_slice());
  741. let p_bits = log2_ceil(params.pt_modulus) as usize;
  742. let corr_result = corr_item.to_vec(p_bits, params.poly_len);
  743. for z in 0..corr_result.len() {
  744. assert_eq!(result[z], corr_result[z]);
  745. }
  746. }
  747. #[test]
  748. fn full_protocol_is_correct() {
  749. full_protocol_is_correct_for_params(&get_params());
  750. }
  751. // #[test]
  752. // fn full_protocol_is_correct_20_256() {
  753. // full_protocol_is_correct_for_params(&params_from_json(&CFG_20_256.replace("'", "\"")));
  754. // }
  755. // #[test]
  756. // fn full_protocol_is_correct_16_100000() {
  757. // full_protocol_is_correct_for_params(&params_from_json(&CFG_16_100000.replace("'", "\"")));
  758. // }
  759. }