rdpf.tcc 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. // Templated method implementations for rdpf.hpp
  2. // Create a StreamEval object that will start its output at index start.
  3. // It will wrap around to 0 when it hits 2^depth. If use_expansion
  4. // is true, then if the DPF has been expanded, just output values
  5. // from that. If use_expansion=false or if the DPF has not been
  6. // expanded, compute the values on the fly.
  7. template <typename T>
  8. StreamEval<T>::StreamEval(const T &rdpf, address_t start,
  9. size_t &op_counter, bool use_expansion) : rdpf(rdpf),
  10. op_counter(op_counter), use_expansion(use_expansion)
  11. {
  12. depth = rdpf.depth();
  13. // Prevent overflow of 1<<depth
  14. if (depth < ADDRESS_MAX_BITS) {
  15. indexmask = (address_t(1)<<depth)-1;
  16. } else {
  17. indexmask = ~0;
  18. }
  19. // Record that we haven't actually output the leaf for index start
  20. // itself yet
  21. nextindex = start;
  22. if (use_expansion && rdpf.has_expansion()) {
  23. // We just need to keep the counter, not compute anything
  24. return;
  25. }
  26. path.resize(depth);
  27. pathindex = start;
  28. path[0] = rdpf.get_seed();
  29. for (nbits_t i=1;i<depth;++i) {
  30. bool dir = !!(pathindex & (address_t(1)<<(depth-i)));
  31. path[i] = rdpf.descend(path[i-1], i-1, dir, op_counter);
  32. }
  33. }
  34. template <typename T>
  35. typename T::node StreamEval<T>::next()
  36. {
  37. if (use_expansion && rdpf.has_expansion()) {
  38. // Just use the precomputed values
  39. typename T::node leaf = rdpf.get_expansion(nextindex);
  40. nextindex = (nextindex + 1) & indexmask;
  41. return leaf;
  42. }
  43. // Invariant: in the first call to next(), nextindex = pathindex.
  44. // Otherwise, nextindex = pathindex+1.
  45. // Get the XOR of nextindex and pathindex, and strip the low bit.
  46. // If nextindex and pathindex are equal, or pathindex is even
  47. // and nextindex is the consecutive odd number, index_xor will be 0,
  48. // indicating that we don't have to update the path, but just
  49. // compute the appropriate leaf given by the low bit of nextindex.
  50. //
  51. // Otherwise, say for example pathindex is 010010111 and nextindex
  52. // is 010011000. Then their XOR is 000001111, and stripping the low
  53. // bit yields 000001110, so how_many_1_bits will be 3.
  54. // That indicates (typically) that path[depth-3] was a left child,
  55. // and now we need to change it to a right child by descending right
  56. // from path[depth-4], and then filling the path after that with
  57. // left children.
  58. //
  59. // When we wrap around, however, index_xor will be 111111110 (after
  60. // we strip the low bit), and how_many_1_bits will be depth-1, but
  61. // the new top child (of the root seed) we have to compute will be a
  62. // left, not a right, child.
  63. uint64_t index_xor = (nextindex ^ pathindex) & ~1;
  64. nbits_t how_many_1_bits = __builtin_popcount(index_xor);
  65. if (how_many_1_bits > 0) {
  66. // This will almost always be 1, unless we've just wrapped
  67. // around from the right subtree back to the left, in which case
  68. // it will be 0.
  69. bool top_changed_bit =
  70. nextindex & (address_t(1) << how_many_1_bits);
  71. path[depth-how_many_1_bits] =
  72. rdpf.descend(path[depth-how_many_1_bits-1],
  73. depth-how_many_1_bits-1, top_changed_bit, op_counter);
  74. for (nbits_t i = depth-how_many_1_bits; i < depth-1; ++i) {
  75. path[i+1] = rdpf.descend(path[i], i, 0, op_counter);
  76. }
  77. }
  78. typename T::node leaf = rdpf.descend(path[depth-1], depth-1,
  79. nextindex & 1, op_counter);
  80. pathindex = nextindex;
  81. nextindex = (nextindex + 1) & indexmask;
  82. return leaf;
  83. }
  84. // I/O for RDPFs
  85. template <typename T>
  86. T& operator>>(T &is, RDPF &rdpf)
  87. {
  88. is.read((char *)&rdpf.seed, sizeof(rdpf.seed));
  89. uint8_t depth;
  90. // The whichhalf bit is the high bit of depth
  91. is.read((char *)&depth, sizeof(depth));
  92. rdpf.whichhalf = !!(depth & 0x80);
  93. depth &= 0x7f;
  94. bool read_expanded = false;
  95. if (depth > 64) {
  96. read_expanded = true;
  97. depth -= 64;
  98. }
  99. assert(depth <= ADDRESS_MAX_BITS);
  100. rdpf.cw.clear();
  101. for (uint8_t i=0; i<depth; ++i) {
  102. DPFnode cw;
  103. is.read((char *)&cw, sizeof(cw));
  104. rdpf.cw.push_back(cw);
  105. }
  106. if (read_expanded) {
  107. rdpf.expansion.resize(1<<depth);
  108. is.read((char *)rdpf.expansion.data(),
  109. sizeof(rdpf.expansion[0])<<depth);
  110. }
  111. value_t cfbits = 0;
  112. is.read((char *)&cfbits, BITBYTES(depth));
  113. rdpf.cfbits = cfbits;
  114. is.read((char *)&rdpf.unit_sum_inverse, sizeof(rdpf.unit_sum_inverse));
  115. is.read((char *)&rdpf.scaled_sum, sizeof(rdpf.scaled_sum));
  116. is.read((char *)&rdpf.scaled_xor, sizeof(rdpf.scaled_xor));
  117. return is;
  118. }
  119. // Write the DPF to the output stream. If expanded=true, then include
  120. // the expansion _if_ the DPF is itself already expanded. You can use
  121. // this to write DPFs to files.
  122. template <typename T>
  123. T& write_maybe_expanded(T &os, const RDPF &rdpf,
  124. bool expanded = true)
  125. {
  126. os.write((const char *)&rdpf.seed, sizeof(rdpf.seed));
  127. uint8_t depth = rdpf.cw.size();
  128. assert(depth <= ADDRESS_MAX_BITS);
  129. // The whichhalf bit is the high bit of depth
  130. // If we're writing an expansion, add 64 to depth as well
  131. uint8_t whichhalf_and_depth = depth |
  132. (uint8_t(rdpf.whichhalf)<<7);
  133. bool write_expansion = false;
  134. if (expanded && rdpf.expansion.size() == (size_t(1)<<depth)) {
  135. write_expansion = true;
  136. whichhalf_and_depth += 64;
  137. }
  138. os.write((const char *)&whichhalf_and_depth,
  139. sizeof(whichhalf_and_depth));
  140. for (uint8_t i=0; i<depth; ++i) {
  141. os.write((const char *)&rdpf.cw[i], sizeof(rdpf.cw[i]));
  142. }
  143. if (write_expansion) {
  144. os.write((const char *)rdpf.expansion.data(),
  145. sizeof(rdpf.expansion[0])<<depth);
  146. }
  147. os.write((const char *)&rdpf.cfbits, BITBYTES(depth));
  148. os.write((const char *)&rdpf.unit_sum_inverse, sizeof(rdpf.unit_sum_inverse));
  149. os.write((const char *)&rdpf.scaled_sum, sizeof(rdpf.scaled_sum));
  150. os.write((const char *)&rdpf.scaled_xor, sizeof(rdpf.scaled_xor));
  151. return os;
  152. }
  153. // The ordinary << version never writes the expansion, since this is
  154. // what we use to send DPFs over the network.
  155. template <typename T>
  156. T& operator<<(T &os, const RDPF &rdpf)
  157. {
  158. return write_maybe_expanded(os, rdpf, false);
  159. }
  160. // I/O for RDPF Triples
  161. // We never write RDPFTriples over the network, so always write
  162. // the DPF expansions if they're available.
  163. template <typename T>
  164. T& operator<<(T &os, const RDPFTriple &rdpftrip)
  165. {
  166. write_maybe_expanded(os, rdpftrip.dpf[0], true);
  167. write_maybe_expanded(os, rdpftrip.dpf[1], true);
  168. write_maybe_expanded(os, rdpftrip.dpf[2], true);
  169. nbits_t depth = rdpftrip.dpf[0].depth();
  170. os.write((const char *)&rdpftrip.as_target.ashare, BITBYTES(depth));
  171. os.write((const char *)&rdpftrip.xs_target.xshare, BITBYTES(depth));
  172. return os;
  173. }
  174. template <typename T>
  175. T& operator>>(T &is, RDPFTriple &rdpftrip)
  176. {
  177. is >> rdpftrip.dpf[0] >> rdpftrip.dpf[1] >> rdpftrip.dpf[2];
  178. nbits_t depth = rdpftrip.dpf[0].depth();
  179. rdpftrip.as_target.ashare = 0;
  180. is.read((char *)&rdpftrip.as_target.ashare, BITBYTES(depth));
  181. rdpftrip.xs_target.xshare = 0;
  182. is.read((char *)&rdpftrip.xs_target.xshare, BITBYTES(depth));
  183. return is;
  184. }
  185. // I/O for RDPF Pairs
  186. // We never write RDPFPairs over the network, so always write
  187. // the DPF expansions if they're available.
  188. template <typename T>
  189. T& operator<<(T &os, const RDPFPair &rdpfpair)
  190. {
  191. write_maybe_expanded(os, rdpfpair.dpf[0], true);
  192. write_maybe_expanded(os, rdpfpair.dpf[1], true);
  193. return os;
  194. }
  195. template <typename T>
  196. T& operator>>(T &is, RDPFPair &rdpfpair)
  197. {
  198. is >> rdpfpair.dpf[0] >> rdpfpair.dpf[1];
  199. return is;
  200. }