sort.cpp 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. #include <map>
  2. #include <deque>
  3. #include <pthread.h>
  4. #include "sort.hpp"
  5. // A set of precomputed WaksmanNetworks of a given size
  6. struct SizedWNs {
  7. pthread_mutex_t mutex;
  8. std::deque<WaksmanNetwork> wns;
  9. };
  10. // A (mutexed) map mapping sizes to SizedWNs
  11. struct PrecompWNs {
  12. pthread_mutex_t mutex;
  13. std::map<uint32_t,SizedWNs> sized_wns;
  14. };
  15. static PrecompWNs precomp_wns;
  16. // A (mutexed) map mapping (N, nthreads) pairs to WNEvalPlans
  17. struct EvalPlans {
  18. pthread_mutex_t mutex;
  19. std::map<std::pair<uint32_t,threadid_t>,WNEvalPlan> eval_plans;
  20. };
  21. static EvalPlans precomp_eps;
  22. void sort_precompute(uint32_t N)
  23. {
  24. uint32_t *random_permutation = NULL;
  25. try {
  26. random_permutation = new uint32_t[N];
  27. } catch (std::bad_alloc&) {
  28. printf("Allocating memory failed in sort_precompute\n");
  29. assert(false);
  30. }
  31. for (uint32_t i=0;i<N;++i) {
  32. random_permutation[i] = i;
  33. }
  34. RecursiveShuffle_M2((unsigned char *) random_permutation, N, sizeof(uint32_t));
  35. WaksmanNetwork wnet(N);
  36. wnet.setPermutation(random_permutation);
  37. // Note that sized_wns[N] creates a map entry for N if it doesn't yet exist
  38. pthread_mutex_lock(&precomp_wns.mutex);
  39. SizedWNs& szwn = precomp_wns.sized_wns[N];
  40. pthread_mutex_unlock(&precomp_wns.mutex);
  41. pthread_mutex_lock(&szwn.mutex);
  42. szwn.wns.push_back(std::move(wnet));
  43. pthread_mutex_unlock(&szwn.mutex);
  44. }
  45. void sort_precompute_evalplan(uint32_t N, threadid_t nthreads)
  46. {
  47. std::pair<uint32_t,threadid_t> idx = {N, nthreads};
  48. pthread_mutex_lock(&precomp_eps.mutex);
  49. if (!precomp_eps.eval_plans.count(idx)) {
  50. precomp_eps.eval_plans.try_emplace(idx, N, nthreads);
  51. }
  52. pthread_mutex_unlock(&precomp_eps.mutex);
  53. }
  54. // Perform the sort using up to nthreads threads. The items to sort are
  55. // byte arrays of size msg_size. The key is the first 4 bytes of each
  56. // item.
  57. void sort_mtobliv(threadid_t nthreads, uint8_t* items, uint16_t msg_size,
  58. uint32_t Nr, uint32_t Na,
  59. // the arguments to the callback are nthreads, items, the sorted
  60. // indices, and the number of non-padding items
  61. std::function<void(threadid_t, const uint8_t*, const uint64_t*,
  62. uint32_t Nr)> cb)
  63. {
  64. // Find the smallest Nw for which we have a precomputed
  65. // WaksmanNetwork with Nr <= Nw <= Na
  66. pthread_mutex_lock(&precomp_wns.mutex);
  67. std::optional<WaksmanNetwork> wn;
  68. uint32_t Nw;
  69. for (auto& N : precomp_wns.sized_wns) {
  70. if (N.first > Na) {
  71. printf("No precomputed WaksmanNetworks of size at most %u\n", Na);
  72. assert(false);
  73. }
  74. if (N.first < Nr) {
  75. continue;
  76. }
  77. // We're in the right range, but see if we have an actual
  78. // precomputed WaksmanNetwork
  79. pthread_mutex_lock(&N.second.mutex);
  80. if (N.second.wns.size() == 0) {
  81. pthread_mutex_unlock(&N.second.mutex);
  82. continue;
  83. }
  84. wn = std::move(N.second.wns.front());
  85. N.second.wns.pop_front();
  86. Nw = N.first;
  87. pthread_mutex_unlock(&N.second.mutex);
  88. break;
  89. }
  90. pthread_mutex_unlock(&precomp_wns.mutex);
  91. if (!wn) {
  92. printf("No precomputed WaksmanNetwork of size range [%u,%u] found.\n",
  93. Nr, Na);
  94. assert(wn);
  95. }
  96. std::pair<uint32_t,threadid_t> epidx = {Nw, nthreads};
  97. pthread_mutex_lock(&precomp_eps.mutex);
  98. if (!precomp_eps.eval_plans.count(epidx)) {
  99. printf("No precomputed WNEvalPlan with N=%u, nthreads=%hu\n",
  100. Nw, nthreads);
  101. assert(false);
  102. }
  103. const WNEvalPlan &eval_plan = precomp_eps.eval_plans.at(epidx);
  104. pthread_mutex_unlock(&precomp_eps.mutex);
  105. // Mark Nw-Nr items as padding (Nr, Na, and Nw are _not_ private)
  106. for (uint32_t i=Nr; i<Nw; ++i) {
  107. (*(uint32_t*)(items+msg_size*i)) = uint32_t(-1);
  108. }
  109. // Shuffle Nw items
  110. wn.value().applyInversePermutation<OSWAP_16X>(
  111. items, msg_size, eval_plan);
  112. // Create the indices
  113. uint64_t *idx = new uint64_t[Nr];
  114. uint64_t *nextidx = idx;
  115. for (uint32_t i=0; i<Nw; ++i) {
  116. uint64_t key = (*(uint32_t*)(items+msg_size*i));
  117. if (key != uint32_t(-1)) {
  118. *nextidx = (key<<32) + i;
  119. ++nextidx;
  120. }
  121. }
  122. if (nextidx != idx + Nr) {
  123. printf("Found %u non-padding items, expected %u\n",
  124. nextidx-idx, Nr);
  125. assert(nextidx == idx + Nr);
  126. }
  127. // Sort the keys and indices
  128. uint64_t *backingidx = new uint64_t[Nr];
  129. bool whichbuf = mtmergesort<uint64_t>(idx, Nr, backingidx, nthreads);
  130. uint64_t *sortedidx = whichbuf ? backingidx : idx;
  131. for (uint32_t i=0; i<Nr; ++i) {
  132. sortedidx[i] &= uint64_t(0xffffffff);
  133. }
  134. cb(nthreads, items, sortedidx, Nr);
  135. delete[] idx;
  136. delete[] backingidx;
  137. }