// Templated method implementations for rdpf.hpp // Create a StreamEval object that will start its output at index start. // It will wrap around to 0 when it hits 2^depth. If use_expansion // is true, then if the DPF has been expanded, just output values // from that. If use_expansion=false or if the DPF has not been // expanded, compute the values on the fly. If xor_offset is non-zero, // then the outputs are actually // DPF(start XOR xor_offset) // DPF((start+1) XOR xor_offset) // DPF((start+2) XOR xor_offset) // etc. template StreamEval::StreamEval(const T &rdpf, address_t start, address_t xor_offset, size_t &aes_ops, bool use_expansion) : rdpf(rdpf), aes_ops(aes_ops), use_expansion(use_expansion), counter_xor_offset(xor_offset) { depth = rdpf.depth(); // Prevent overflow of 1< typename T::node StreamEval::next() { if (use_expansion && rdpf.has_expansion()) { // Just use the precomputed values typename T::node leaf = rdpf.get_expansion(nextindex ^ counter_xor_offset); nextindex = (nextindex + 1) & indexmask; return leaf; } // Invariant: in the first call to next(), nextindex = pathindex. // Otherwise, nextindex = pathindex+1. // Get the XOR of nextindex and pathindex, and strip the low bit. // If nextindex and pathindex are equal, or pathindex is even // and nextindex is the consecutive odd number, index_xor will be 0, // indicating that we don't have to update the path, but just // compute the appropriate leaf given by the low bit of nextindex. // // Otherwise, say for example pathindex is 010010111 and nextindex // is 010011000. Then their XOR is 000001111, and stripping the low // bit yields 000001110, so how_many_1_bits will be 3. // That indicates (typically) that path[depth-3] was a left child, // and now we need to change it to a right child by descending right // from path[depth-4], and then filling the path after that with // left children. // // When we wrap around, however, index_xor will be 111111110 (after // we strip the low bit), and how_many_1_bits will be depth-1, but // the new top child (of the root seed) we have to compute will be a // left, not a right, child. uint64_t index_xor = (nextindex ^ pathindex) & ~1; nbits_t how_many_1_bits = __builtin_popcount(index_xor); if (how_many_1_bits > 0) { // This will almost always be 1, unless we've just wrapped // around from the right subtree back to the left, in which case // it will be 0. bool top_changed_bit = !!(nextindex & (address_t(1) << how_many_1_bits)); bool xor_offset_bit = !!(counter_xor_offset & (address_t(1) << how_many_1_bits)); path[depth-how_many_1_bits] = rdpf.descend(path[depth-how_many_1_bits-1], depth-how_many_1_bits-1, top_changed_bit ^ xor_offset_bit, aes_ops); for (nbits_t i = depth-how_many_1_bits; i < depth-1; ++i) { bool xor_offset_bit = !!(counter_xor_offset & (address_t(1) << (depth-i-1))); path[i+1] = rdpf.descend(path[i], i, xor_offset_bit, aes_ops); } } bool xor_offset_bit = counter_xor_offset & 1; typename T::node leaf = rdpf.descend(path[depth-1], depth-1, (nextindex & 1) ^ xor_offset_bit, aes_ops); pathindex = nextindex; nextindex = (nextindex + 1) & indexmask; return leaf; } // Additive share of the scaling value M_as such that the high words // of the leaf values for P0 and P1 add to M_as * e_{target} template <> inline std::tuple RDPFTriple::scaled_value() const { return std::make_tuple(dpf[0].scaled_sum, dpf[1].scaled_sum, dpf[2].scaled_sum); } // XOR share of the scaling value M_xs such that the high words // of the leaf values for P0 and P1 XOR to M_xs * e_{target} template <> inline std::tuple RDPFTriple::scaled_value() const { return std::make_tuple(dpf[0].scaled_xor, dpf[1].scaled_xor, dpf[2].scaled_xor); } // Get the bit-shared unit vector entry from the leaf node template <> inline std::tuple RDPFTriple::unit(node leaf) const { return std::make_tuple( dpf[0].unit_bs(std::get<0>(leaf)), dpf[1].unit_bs(std::get<1>(leaf)), dpf[2].unit_bs(std::get<2>(leaf))); } // Get the additive-shared unit vector entry from the leaf node template <> inline std::tuple RDPFTriple::unit(node leaf) const { return std::make_tuple( dpf[0].unit_as(std::get<0>(leaf)), dpf[1].unit_as(std::get<1>(leaf)), dpf[2].unit_as(std::get<2>(leaf))); } // Get the XOR-shared scaled vector entry from the leaf ndoe template <> inline std::tuple RDPFTriple::scaled(node leaf) const { return std::make_tuple( dpf[0].scaled_xs(std::get<0>(leaf)), dpf[1].scaled_xs(std::get<1>(leaf)), dpf[2].scaled_xs(std::get<2>(leaf))); } // Get the additive-shared scaled vector entry from the leaf node template <> inline std::tuple RDPFTriple::scaled(node leaf) const { return std::make_tuple( dpf[0].scaled_as(std::get<0>(leaf)), dpf[1].scaled_as(std::get<1>(leaf)), dpf[2].scaled_as(std::get<2>(leaf))); } // Additive share of the scaling value M_as such that the high words // of the leaf values for P0 and P1 add to M_as * e_{target} template <> inline std::tuple RDPFPair::scaled_value() const { return std::make_tuple(dpf[0].scaled_sum, dpf[1].scaled_sum); } // XOR share of the scaling value M_xs such that the high words // of the leaf values for P0 and P1 XOR to M_xs * e_{target} template <> inline std::tuple RDPFPair::scaled_value() const { return std::make_tuple(dpf[0].scaled_xor, dpf[1].scaled_xor); } // Get the bit-shared unit vector entry from the leaf node template <> inline std::tuple RDPFPair::unit(node leaf) const { return std::make_tuple( dpf[0].unit_bs(std::get<0>(leaf)), dpf[1].unit_bs(std::get<1>(leaf))); } // Get the additive-shared unit vector entry from the leaf node template <> inline std::tuple RDPFPair::unit(node leaf) const { return std::make_tuple( dpf[0].unit_as(std::get<0>(leaf)), dpf[1].unit_as(std::get<1>(leaf))); } // Get the XOR-shared scaled vector entry from the leaf ndoe template <> inline std::tuple RDPFPair::scaled(node leaf) const { return std::make_tuple( dpf[0].scaled_xs(std::get<0>(leaf)), dpf[1].scaled_xs(std::get<1>(leaf))); } // Get the additive-shared scaled vector entry from the leaf node template <> inline std::tuple RDPFPair::scaled(node leaf) const { return std::make_tuple( dpf[0].scaled_as(std::get<0>(leaf)), dpf[1].scaled_as(std::get<1>(leaf))); } // I/O for RDPFs template T& operator>>(T &is, RDPF &rdpf) { is.read((char *)&rdpf.seed, sizeof(rdpf.seed)); rdpf.whichhalf = get_lsb(rdpf.seed); uint8_t depth; // Add 64 to depth to indicate an expanded RDPF is.read((char *)&depth, sizeof(depth)); bool read_expanded = false; if (depth > 64) { read_expanded = true; depth -= 64; } assert(depth <= ADDRESS_MAX_BITS); rdpf.cw.clear(); for (uint8_t i=0; i T& write_maybe_expanded(T &os, const RDPF &rdpf, bool expanded = true) { os.write((const char *)&rdpf.seed, sizeof(rdpf.seed)); uint8_t depth = rdpf.cw.size(); assert(depth <= ADDRESS_MAX_BITS); // If we're writing an expansion, add 64 to depth uint8_t expanded_depth = depth; bool write_expansion = false; if (expanded && rdpf.expansion.size() == (size_t(1)< T& operator<<(T &os, const RDPF &rdpf) { return write_maybe_expanded(os, rdpf, false); } // I/O for RDPF Triples // We never write RDPFTriples over the network, so always write // the DPF expansions if they're available. template T& operator<<(T &os, const RDPFTriple &rdpftrip) { write_maybe_expanded(os, rdpftrip.dpf[0], true); write_maybe_expanded(os, rdpftrip.dpf[1], true); write_maybe_expanded(os, rdpftrip.dpf[2], true); nbits_t depth = rdpftrip.dpf[0].depth(); os.write((const char *)&rdpftrip.as_target.ashare, BITBYTES(depth)); os.write((const char *)&rdpftrip.xs_target.xshare, BITBYTES(depth)); return os; } template T& operator>>(T &is, RDPFTriple &rdpftrip) { is >> rdpftrip.dpf[0] >> rdpftrip.dpf[1] >> rdpftrip.dpf[2]; nbits_t depth = rdpftrip.dpf[0].depth(); rdpftrip.as_target.ashare = 0; is.read((char *)&rdpftrip.as_target.ashare, BITBYTES(depth)); rdpftrip.xs_target.xshare = 0; is.read((char *)&rdpftrip.xs_target.xshare, BITBYTES(depth)); return is; } // I/O for RDPF Pairs // We never write RDPFPairs over the network, so always write // the DPF expansions if they're available. template T& operator<<(T &os, const RDPFPair &rdpfpair) { write_maybe_expanded(os, rdpfpair.dpf[0], true); write_maybe_expanded(os, rdpfpair.dpf[1], true); return os; } template T& operator>>(T &is, RDPFPair &rdpfpair) { is >> rdpfpair.dpf[0] >> rdpfpair.dpf[1]; return is; }