|
@@ -67,11 +67,6 @@ using namespace dpf;
|
|
|
#include "mpc.h"
|
|
|
void generate_random_targets(uint8_t **target_share_read, size_t n_threads, bool party, size_t expo)
|
|
|
{
|
|
|
- for (size_t i = 0; i < n_threads; i++)
|
|
|
- {
|
|
|
- target_share_read[i] = new uint8_t[64];
|
|
|
- }
|
|
|
-
|
|
|
for (size_t j = 0; j < 64; ++j)
|
|
|
{
|
|
|
for (size_t i = 0; i < n_threads; ++i)
|
|
@@ -105,9 +100,9 @@ void compute_CW(bool party, tcp::socket &sout, __m128i L, __m128i R, uint8_t bit
|
|
|
qfd = open(qfile.c_str(), O_RDWR);
|
|
|
Y = (__m128i *)mmap(NULL, 8 * sizeof(__m128i),
|
|
|
PROT_READ, MAP_PRIVATE, qfd, 0);
|
|
|
- close(qfd);
|
|
|
- munmap(X, 8 * sizeof(__m128i));
|
|
|
- munmap(Y, 8 * sizeof(__m128i));
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
}
|
|
|
|
|
|
if (!party)
|
|
@@ -121,9 +116,9 @@ void compute_CW(bool party, tcp::socket &sout, __m128i L, __m128i R, uint8_t bit
|
|
|
qfd = open(qfile.c_str(), O_RDWR);
|
|
|
Y = (__m128i *)mmap(NULL, 8 * sizeof(__m128i),
|
|
|
PROT_READ, MAP_PRIVATE, qfd, 0);
|
|
|
- close(qfd);
|
|
|
- munmap(X, 8 * sizeof(__m128i));
|
|
|
- munmap(Y, 8 * sizeof(__m128i));
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
}
|
|
|
|
|
|
|
|
@@ -537,13 +532,15 @@ int main(int argc, char *argv[])
|
|
|
|
|
|
|
|
|
|
|
|
-
|
|
|
+ uint8_t **target_share_read = new uint8_t *[thread_per_batch];
|
|
|
+ for (size_t i = 0; i < n_threads; i++) target_share_read[i] = new uint8_t[64];
|
|
|
+
|
|
|
for(size_t iters = 0; iters < n_batches; ++iters)
|
|
|
{
|
|
|
if (n_batches > 1) {
|
|
|
printf("Starting evalfull_mpc batch %lu / %lu\n", iters+1, n_batches);
|
|
|
}
|
|
|
- uint8_t **target_share_read = new uint8_t *[thread_per_batch];
|
|
|
+
|
|
|
generate_random_targets(target_share_read, thread_per_batch, party, expo);
|
|
|
boost::asio::thread_pool pool(thread_per_batch);
|
|
|
for (size_t j = 0; j < thread_per_batch; ++j)
|
|
@@ -554,28 +551,26 @@ int main(int argc, char *argv[])
|
|
|
}
|
|
|
|
|
|
pool.join();
|
|
|
- for(size_t j = 0; j < thread_per_batch; ++j)
|
|
|
- {
|
|
|
- delete[] target_share_read[j];
|
|
|
- }
|
|
|
- delete[] target_share_read;
|
|
|
+
|
|
|
convert_shares(output, flags, thread_per_batch, db_nitems, final_correction_word, socketsPb[0], party);
|
|
|
}
|
|
|
|
|
|
- for(size_t j = 0; j < thread_per_batch; ++j)
|
|
|
+ for(size_t j = 0; j < thread_per_batch; ++j)
|
|
|
{
|
|
|
-
|
|
|
free(output[j]);
|
|
|
free(flags[j]);
|
|
|
+ delete[] target_share_read[j];
|
|
|
}
|
|
|
+
|
|
|
free(output);
|
|
|
free(flags);
|
|
|
free(final_correction_word);
|
|
|
-
|
|
|
+ delete[] target_share_read;
|
|
|
+
|
|
|
auto end = std::chrono::steady_clock::now();
|
|
|
std::chrono::duration<double> elapsed_seconds = end - start;
|
|
|
std::cout << "WallClockTime: " << elapsed_seconds.count() << " s" << std::endl;
|
|
|
std::cout << "CommunicationCost: " << communication_cost << " bytes" << std::endl;
|
|
|
-
|
|
|
+
|
|
|
return 0;
|
|
|
}
|