dpfgen.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. struct dpfP2
  2. {
  3. __m128i root;
  4. __m128i CW[32];
  5. uint8_t cwt_L[32];
  6. uint8_t cwt_R[32];
  7. };
  8. void generate_random_targets(uint8_t ** target_share_read, size_t n_threads, bool party, size_t expo)
  9. {
  10. for(size_t i = 0; i < n_threads; i++)
  11. {
  12. target_share_read[i] = new uint8_t[64];
  13. }
  14. for(size_t j = 0; j < 64; ++j)
  15. {
  16. for(size_t i = 0; i < n_threads; ++i)
  17. {
  18. uint8_t random_value;
  19. arc4random_buf(&random_value, sizeof(uint8_t));
  20. target_share_read[i][j] = random_value;//rand();
  21. target_share_read[i][j] = target_share_read[i][j] % 2;
  22. //if(party) target_share_read[i][expo-2] = 1;
  23. }
  24. }
  25. }
  26. struct cw_construction
  27. {
  28. __m128i rand_b, gamma_b;
  29. uint8_t bit_b;
  30. };
  31. struct BlindsCW
  32. {
  33. __m128i blinded_message;
  34. uint8_t blinded_bit;
  35. };
  36. struct reconstructioncw
  37. {
  38. __m128i cw;
  39. uint8_t cwbit[2];
  40. };
  41. void compute_CW(cw_construction computecw_array, size_t ind, size_t layer,tcp::socket& sout, __m128i L, __m128i R, uint8_t bit, __m128i & CW, uint8_t &cwt_L, uint8_t &cwt_R)
  42. {
  43. reconstructioncw cwsent, cwrecv;
  44. __m128i rand_b = computecw_array.rand_b; // computecw.rand_b;
  45. __m128i gamma_b = computecw_array.gamma_b; // computecw.gamma_b;
  46. uint8_t bit_b = computecw_array.bit_b; // computecw.bit_b;
  47. uint8_t blinded_bit, blinded_bit_read;
  48. blinded_bit = bit ^ bit_b;
  49. __m128i blinded_L = L ^ R ^ rand_b;
  50. __m128i blinded_L_read;
  51. BlindsCW blinds_sent, blinds_recv;
  52. blinds_sent.blinded_bit = blinded_bit;
  53. blinds_sent.blinded_message = blinded_L;
  54. //exchange blinded shares for OSWAP.
  55. boost::asio::write(sout, boost::asio::buffer(&blinds_sent, sizeof(BlindsCW)));
  56. boost::asio::read(sout, boost::asio::buffer(&blinds_recv, sizeof(BlindsCW)));
  57. blinded_bit_read = blinds_recv.blinded_bit;
  58. blinded_L_read = blinds_recv.blinded_message;
  59. cwsent.cw = R ^ gamma_b;
  60. if(bit)
  61. {
  62. cwsent.cw ^= (L ^ R ^ blinded_L_read);
  63. }
  64. if(blinded_bit_read)
  65. {
  66. cwsent.cw ^= rand_b;
  67. }
  68. cwsent.cwbit[0] = get_lsb(L) ^ bit;//advice[0];
  69. cwsent.cwbit[1] = get_lsb(R) ^ bit;//advice[1];
  70. boost::asio::write(sout, boost::asio::buffer(&cwsent, sizeof(cwsent)));
  71. boost::asio::read(sout, boost::asio::buffer(&cwrecv, sizeof(cwrecv)));
  72. cwrecv.cw ^= cwsent.cw;
  73. cwrecv.cwbit[0] ^= (cwsent.cwbit[0] ^ 1);
  74. cwrecv.cwbit[1] ^= (cwsent.cwbit[1]);
  75. cwt_L = cwrecv.cwbit[0];
  76. cwt_R = cwrecv.cwbit[1];
  77. CW = cwrecv.cw;
  78. // The following asserts the correctness of ComputeCW
  79. #ifdef DEBUG
  80. uint8_t bit_reconstruction;
  81. boost::asio::write(sout, boost::asio::buffer(&bit, sizeof(bit)));
  82. boost::asio::read(sout, boost::asio::buffer(&bit_reconstruction, sizeof(bit_reconstruction)));
  83. bit_reconstruction = bit ^ bit_reconstruction;
  84. __m128i L_reconstruction;
  85. boost::asio::write(sout, boost::asio::buffer(&L, sizeof(L)));
  86. boost::asio::read(sout, boost::asio::buffer(&L_reconstruction, sizeof(L_reconstruction)));
  87. L_reconstruction = L ^ L_reconstruction;
  88. __m128i R_reconstruction;
  89. boost::asio::write(sout, boost::asio::buffer(&R, sizeof(R)));
  90. boost::asio::read(sout, boost::asio::buffer(&R_reconstruction, sizeof(R_reconstruction)));
  91. R_reconstruction = R ^ R_reconstruction;
  92. __m128i CW_debug;
  93. if(bit_reconstruction != 0)
  94. {
  95. CW_debug = L_reconstruction;
  96. }
  97. else
  98. {
  99. CW_debug = R_reconstruction;
  100. }
  101. assert(CW_debug[0] == CW[0]);
  102. assert(CW_debug[1] == CW[1]);
  103. #endif
  104. }
  105. template<typename node_t, typename prgkey_t>
  106. static inline void traverse(const prgkey_t & prgkey, const node_t & seed, node_t s[2])
  107. {
  108. dpf::PRG(prgkey, clear_lsb(seed, 0b11), s, 2);
  109. } // dpf::expand
  110. /**
  111. * @brief
  112. *
  113. * @param nodes_per_leaf
  114. * @param depth
  115. * @param nbits
  116. * @param nodes_in_interval
  117. * @param prgkey The PRG Key
  118. * @param target_share
  119. * @param socketsPb Array of sockets to write to Pb
  120. * @param socketsP2 Array of sockets to write to P2
  121. * @param from
  122. * @param to
  123. * @param output
  124. * @param _t
  125. * @param final_correction_word the final correction word is written in to this
  126. * @param party Party
  127. * @param socket_no
  128. */
  129. inline void create_dpfs ( bool reading, size_t db_nitems, const AES_KEY& prgkey, uint8_t target_share[64], std::vector<socket_t>& socketsPb, std::vector<socket_t>& socketsP2,
  130. const size_t from, const size_t to, __m128i * output, int8_t * _t, __m128i& final_correction_word,
  131. cw_construction computecw_array, dpfP2 * dpf_instance,
  132. bool party, size_t socket_no, size_t ind = 0)
  133. {
  134. const size_t bits_per_leaf = std::is_same<leaf_t, bool>::value ? 1 : sizeof(leaf_t) * CHAR_BIT;
  135. const bool is_packed = (sizeof(leaf_t) < sizeof(node_t));
  136. const size_t nodes_per_leaf = is_packed ? 1 : std::ceil(static_cast<double>(bits_per_leaf) / (sizeof(node_t) * CHAR_BIT));
  137. const size_t depth = std::ceil(std::log2(db_nitems));
  138. const size_t nbits = std::ceil(std::log2(db_nitems));
  139. const size_t nodes_in_interval = db_nitems-1;
  140. __m128i root;
  141. arc4random_buf(&root, sizeof(root));
  142. root = set_lsb(root, party);
  143. const size_t from_node = std::floor(static_cast<double>(from) / nodes_per_leaf);
  144. __m128i * s[2] = {
  145. reinterpret_cast<__m128i *>(output) + nodes_in_interval * (nodes_per_leaf - 1),
  146. s[0] + nodes_in_interval / 2
  147. };
  148. int8_t * t[2] = { _t, _t + nodes_in_interval / 2};
  149. int curlayer = depth % 2;
  150. s[curlayer][0] = root;
  151. t[curlayer][0] = get_lsb(root, 0b01);
  152. __m128i * CW = (__m128i *) std::aligned_alloc(sizeof(__m256i), depth * sizeof(__m128i));
  153. #ifdef VERBOSE
  154. if(ind == 0)
  155. {
  156. std::cout << "root = " << root[0] << " " << root[1] << std::endl;
  157. std::cout << "t[curlayer][0] " << (int) t[curlayer][0] << std::endl;
  158. }
  159. #endif
  160. dpf_instance[ind].root = root;
  161. // cw_construction computecw_array[15];
  162. // boost::asio::read(socketsP2[0], boost::asio::buffer(&computecw_array, 15 * sizeof(computecw_array[0])));
  163. for (size_t layer = 0; layer < depth; ++layer)
  164. {
  165. #ifdef VERBOSE
  166. printf("layer = %zu\n", layer);
  167. #endif
  168. curlayer = 1-curlayer;
  169. size_t i=0, j=0;
  170. auto nextbit = (from_node >> (nbits-layer-1)) & 1;
  171. size_t nodes_in_prev_layer = std::ceil(static_cast<double>(nodes_in_interval) / (1ULL << (depth-layer)));
  172. size_t nodes_in_cur_layer = std::ceil(static_cast<double>(nodes_in_interval) / (1ULL << (depth-layer-1)));
  173. __m128i L = _mm_setzero_si128();
  174. __m128i R = _mm_setzero_si128();
  175. for (i = nextbit, j = nextbit; j < nodes_in_prev_layer-1; ++j, i+=2)
  176. {
  177. traverse(prgkey, s[1-curlayer][j], &s[curlayer][i]);
  178. L ^= s[curlayer][i];
  179. R ^= s[curlayer][i+1];
  180. }
  181. if (nodes_in_prev_layer > j)
  182. {
  183. if (i < nodes_in_cur_layer - 1)
  184. {
  185. traverse(prgkey, s[1-curlayer][j], &s[curlayer][i]);
  186. L ^= s[curlayer][i];
  187. R ^= s[curlayer][i+1];
  188. }
  189. }
  190. uint8_t cwt_L, cwt_R;
  191. // Computes the correction word using OSWAP
  192. compute_CW(computecw_array, ind, layer, socketsPb[socket_no], L, R, target_share[layer], CW[layer], cwt_L, cwt_R);
  193. #ifdef DEBUG
  194. if(ind == 0)
  195. {
  196. std::cout << "CW reconstruction = " << CW[layer][0] << " " << CW[layer][1] << std::endl;
  197. std::cout << " cwt_L = " << (int) cwt_L << std::endl;
  198. std::cout << " cwt_R = " << (int) cwt_R << std::endl;
  199. }
  200. #endif
  201. dpf_instance[ind].CW[layer] = CW[layer];
  202. dpf_instance[ind].cwt_L[layer] = cwt_L;
  203. dpf_instance[ind].cwt_R[layer] = cwt_R;
  204. for(size_t j = 0; j < nodes_in_prev_layer; ++j)
  205. {
  206. t[curlayer][2*j] = get_lsb(s[curlayer][2*j]) ^ (cwt_L & t[1-curlayer][j]);
  207. s[curlayer][2*j] = clear_lsb(xor_if(s[curlayer][2*j], CW[layer], !t[1-curlayer][j]), 0b01);
  208. t[curlayer][(2*j)+1] = get_lsb(s[curlayer][(2*j)+1]) ^ (cwt_R & t[1-curlayer][j]);
  209. s[curlayer][(2*j)+1] = clear_lsb(xor_if(s[curlayer][(2*j)+1], CW[layer], !t[1-curlayer][j]), 0b01);
  210. }
  211. }
  212. __m128i Gamma = _mm_setzero_si128();
  213. for (size_t i = 0; i < to + 1; ++i)
  214. {
  215. Gamma[0] += output[i][0]; // the correction word for duoram update
  216. Gamma[1] += output[i][1]; // the correction word for share conversion
  217. }
  218. if(party)
  219. {
  220. Gamma[0] = -Gamma[0]; // the correction word for duoram update
  221. Gamma[1] = -Gamma[1]; // the correction word for share conversion
  222. }
  223. #ifdef DEBUG
  224. boost::asio::write(socketsPb[socket_no + 3], boost::asio::buffer(&Gamma, sizeof(Gamma)));
  225. boost::asio::read(socketsPb[socket_no + 3], boost::asio::buffer(&final_correction_word, sizeof(final_correction_word)));
  226. #endif
  227. final_correction_word = Gamma;
  228. } // create_dpfs
  229. inline void evaluate_dpfs( size_t db_nitems, dpfP2 dpfinstance, const AES_KEY& prgkey, const size_t from, const size_t to,
  230. __m128i * output, int8_t * _t, bool party, size_t ind)
  231. {
  232. const size_t bits_per_leaf = std::is_same<leaf_t, bool>::value ? 1 : sizeof(leaf_t) * CHAR_BIT;
  233. const bool is_packed = (sizeof(leaf_t) < sizeof(node_t));
  234. const size_t nodes_per_leaf = is_packed ? 1 : std::ceil(static_cast<double>(bits_per_leaf) / (sizeof(node_t) * CHAR_BIT));
  235. const size_t depth = std::ceil(std::log2(db_nitems));
  236. const size_t nbits = std::ceil(std::log2(db_nitems));
  237. const size_t nodes_in_interval = db_nitems-1;
  238. __m128i root = dpfinstance.root;
  239. __m128i * CW = (__m128i *) std::aligned_alloc(sizeof(__m256i), depth * sizeof(__m128i));
  240. uint8_t * cwt_L = (uint8_t *) std::aligned_alloc(sizeof(__m256i), depth * sizeof(uint8_t));
  241. uint8_t * cwt_R = (uint8_t *) std::aligned_alloc(sizeof(__m256i), depth * sizeof(uint8_t));
  242. for(size_t j = 0; j < depth; ++j)
  243. {
  244. CW[j] = dpfinstance.CW[j];
  245. cwt_L[j] = dpfinstance.cwt_L[j];
  246. cwt_R[j] = dpfinstance.cwt_R[j];
  247. }
  248. root = set_lsb(root, party);
  249. const size_t from_node = std::floor(static_cast<double>(from) / nodes_per_leaf);
  250. __m128i * s[2] = {
  251. reinterpret_cast<__m128i *>(output) + nodes_in_interval * (nodes_per_leaf - 1),
  252. s[0] + nodes_in_interval / 2
  253. };
  254. int8_t * t[2] = { _t, _t + nodes_in_interval / 2};
  255. int curlayer = depth % 2;
  256. s[curlayer][0] = root;
  257. t[curlayer][0] = get_lsb(root, 0b01);
  258. #ifdef VERBOSE
  259. if(ind == 0)
  260. {
  261. std::cout << "root = " << root[0] << " " << root[1] << std::endl;
  262. std::cout << "t[curlayer][0] " << (int) t[curlayer][0] << std::endl;
  263. }
  264. #endif
  265. for (size_t layer = 0; layer < depth; ++layer)
  266. {
  267. #ifdef VERBOSE
  268. printf("layer = %zu\n", layer);
  269. #endif
  270. curlayer = 1-curlayer;
  271. size_t i=0, j=0;
  272. auto nextbit = (from_node >> (nbits-layer-1)) & 1;
  273. size_t nodes_in_prev_layer = std::ceil(static_cast<double>(nodes_in_interval) / (1ULL << (depth-layer)));
  274. size_t nodes_in_cur_layer = std::ceil(static_cast<double>(nodes_in_interval) / (1ULL << (depth-layer-1)));
  275. for (i = nextbit, j = nextbit; j < nodes_in_prev_layer-1; ++j, i+=2)
  276. {
  277. traverse(prgkey, s[1-curlayer][j], &s[curlayer][i]);
  278. }
  279. if (nodes_in_prev_layer > j)
  280. {
  281. if (i < nodes_in_cur_layer - 1)
  282. {
  283. traverse(prgkey, s[1-curlayer][j], &s[curlayer][i]);
  284. }
  285. }
  286. #ifdef VERBOSE
  287. if(ind == 0)
  288. {
  289. std::cout << "CW reconstruction = " << CW[layer][0] << " " << CW[layer][1] << std::endl;
  290. std::cout << " cwt_L = " << (int) cwt_L[layer] << std::endl;
  291. std::cout << " cwt_R = " << (int) cwt_R[layer] << std::endl;
  292. }
  293. #endif
  294. for(size_t j = 0; j < nodes_in_prev_layer; ++j)
  295. {
  296. t[curlayer][2*j] = get_lsb(s[curlayer][2*j]) ^ (cwt_L[layer] & t[1-curlayer][j]);
  297. s[curlayer][2*j] = clear_lsb(xor_if(s[curlayer][2*j], CW[layer], !t[1-curlayer][j]), 0b11);
  298. t[curlayer][(2*j)+1] = get_lsb(s[curlayer][(2*j)+1]) ^ (cwt_R[layer] & t[1-curlayer][j]);
  299. s[curlayer][(2*j)+1] = clear_lsb(xor_if(s[curlayer][(2*j)+1], CW[layer], !t[1-curlayer][j]), 0b11);
  300. }
  301. }
  302. } // evaluate_dpfs