shim_brk.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_brk.c
  15. *
  16. * Implementation of system call "brk".
  17. */
  18. #include <sys/mman.h>
  19. #include <pal.h>
  20. #include <shim_checkpoint.h>
  21. #include <shim_internal.h>
  22. #include <shim_profile.h>
  23. #include <shim_table.h>
  24. #include <shim_utils.h>
  25. #include <shim_vma.h>
  26. #define BRK_SIZE 4096
  27. struct shim_brk_info {
  28. size_t data_segment_size;
  29. void* brk_start;
  30. void* brk_end;
  31. void* brk_current;
  32. };
  33. static struct shim_brk_info region;
  34. DEFINE_PROFILE_OCCURENCE(brk, memory);
  35. DEFINE_PROFILE_OCCURENCE(brk_count, memory);
  36. DEFINE_PROFILE_OCCURENCE(brk_migrate_count, memory);
  37. void get_brk_region(void** start, void** end, void** current) {
  38. MASTER_LOCK();
  39. *start = region.brk_start;
  40. *end = region.brk_end;
  41. *current = region.brk_current;
  42. MASTER_UNLOCK();
  43. }
  44. int init_brk_region(void* brk_region, size_t data_segment_size) {
  45. if (region.brk_start)
  46. return 0;
  47. data_segment_size = ALLOC_ALIGN_UP(data_segment_size);
  48. uint64_t brk_max_size = DEFAULT_BRK_MAX_SIZE;
  49. if (root_config) {
  50. char brk_cfg[CONFIG_MAX];
  51. if (get_config(root_config, "sys.brk.size", brk_cfg, sizeof(brk_cfg)) > 0)
  52. brk_max_size = parse_int(brk_cfg);
  53. }
  54. set_rlimit_cur(RLIMIT_DATA, brk_max_size + data_segment_size);
  55. int flags = MAP_PRIVATE | MAP_ANONYMOUS;
  56. bool brk_on_heap = true;
  57. const int TRIES = 10;
  58. /*
  59. * Chia-Che 8/24/2017
  60. * Adding an argument to specify the initial starting address of brk region. The general
  61. * assumption of Linux is that the brk region should be within
  62. * [exec-data-end, exec-data-end + 0x2000000).
  63. */
  64. if (brk_region) {
  65. size_t max_brk = 0;
  66. if (PAL_CB(user_address.end) >= PAL_CB(executable_range.end))
  67. max_brk = PAL_CB(user_address.end) - PAL_CB(executable_range.end);
  68. if (PAL_CB(user_address_hole.end) - PAL_CB(user_address_hole.start) > 0) {
  69. /* XXX: This assumes that we always want brk to be after the hole. */
  70. brk_region = MAX(brk_region, PAL_CB(user_address_hole.end));
  71. max_brk =
  72. MIN(max_brk, (size_t)(PAL_CB(user_address.end) - PAL_CB(user_address_hole.end)));
  73. }
  74. /* Check whether the brk region can potentially be located after exec at all. */
  75. if (brk_max_size <= max_brk) {
  76. int try;
  77. for (try = TRIES; try > 0; try--) {
  78. uint32_t rand = 0;
  79. #if ENABLE_ASLR == 1
  80. int ret = DkRandomBitsRead(&rand, sizeof(rand));
  81. if (ret < 0)
  82. return -convert_pal_errno(-ret);
  83. rand %= MIN((size_t)0x2000000,
  84. (size_t)(PAL_CB(user_address.end) - brk_region - brk_max_size));
  85. rand = ALLOC_ALIGN_DOWN(rand);
  86. if (brk_region + rand + brk_max_size >= PAL_CB(user_address.end))
  87. continue;
  88. #else
  89. /* Without randomization there is no point to retry here */
  90. if (brk_region + rand + brk_max_size >= PAL_CB(user_address.end))
  91. break;
  92. #endif
  93. struct shim_vma_val vma;
  94. if (lookup_overlap_vma(brk_region + rand, brk_max_size, &vma) == -ENOENT) {
  95. /* Found a place for brk */
  96. brk_region += rand;
  97. brk_on_heap = false;
  98. break;
  99. }
  100. #if !(ENABLE_ASLR == 1)
  101. /* Without randomization, try memory directly after the overlapping block */
  102. brk_region = vma.addr + vma.length;
  103. #endif
  104. }
  105. }
  106. }
  107. if (brk_on_heap) {
  108. brk_region = bkeep_unmapped_heap(brk_max_size, PROT_READ | PROT_WRITE, flags | VMA_UNMAPPED,
  109. NULL, 0, "brk");
  110. if (!brk_region) {
  111. return -ENOMEM;
  112. }
  113. } else {
  114. /*
  115. * Create the bookkeeping before allocating the brk region. The bookkeeping should never
  116. * fail because we've already confirmed the availability.
  117. */
  118. if (bkeep_mmap(brk_region, brk_max_size, PROT_READ | PROT_WRITE, flags | VMA_UNMAPPED, NULL,
  119. 0, "brk") < 0)
  120. BUG();
  121. }
  122. void* end_brk_region = NULL;
  123. /* Allocate the whole brk region */
  124. void* ret =
  125. (void*)DkVirtualMemoryAlloc(brk_region, brk_max_size, 0, PAL_PROT_READ | PAL_PROT_WRITE);
  126. /* Checking if the PAL call succeeds. */
  127. if (!ret) {
  128. bkeep_munmap(brk_region, brk_max_size, flags);
  129. return -ENOMEM;
  130. }
  131. ADD_PROFILE_OCCURENCE(brk, brk_max_size);
  132. INC_PROFILE_OCCURENCE(brk_count);
  133. end_brk_region = brk_region + BRK_SIZE;
  134. region.data_segment_size = data_segment_size;
  135. region.brk_start = brk_region;
  136. region.brk_end = end_brk_region;
  137. region.brk_current = brk_region;
  138. debug("brk area: %p - %p\n", brk_region, end_brk_region);
  139. debug("brk reserved area: %p - %p\n", end_brk_region, brk_region + brk_max_size);
  140. /*
  141. * Create another bookkeeping for the current brk region. The remaining space will be marked as
  142. * unmapped so that the library OS can reuse the space for other purpose.
  143. */
  144. if (bkeep_mmap(brk_region, BRK_SIZE, PROT_READ | PROT_WRITE, flags, NULL, 0, "brk") < 0)
  145. BUG();
  146. return 0;
  147. }
  148. int reset_brk(void) {
  149. MASTER_LOCK();
  150. if (!region.brk_start) {
  151. MASTER_UNLOCK();
  152. return 0;
  153. }
  154. int ret = shim_do_munmap(region.brk_start, region.brk_end - region.brk_start);
  155. if (ret < 0) {
  156. MASTER_UNLOCK();
  157. return ret;
  158. }
  159. region.brk_start = region.brk_end = region.brk_current = NULL;
  160. MASTER_UNLOCK();
  161. return 0;
  162. }
  163. void* shim_do_brk(void* brk) {
  164. MASTER_LOCK();
  165. if (init_brk_region(NULL, 0) < 0) { // If brk is never initialized, assume no executable
  166. debug("Failed to initialize brk!\n");
  167. brk = NULL;
  168. goto out;
  169. }
  170. if (!brk) {
  171. unchanged:
  172. brk = region.brk_current;
  173. goto out;
  174. }
  175. if (brk < region.brk_start)
  176. goto unchanged;
  177. if (brk > region.brk_end) {
  178. uint64_t rlim_data = get_rlimit_cur(RLIMIT_DATA);
  179. // Check if there is enough space within the system limit
  180. if (rlim_data < region.data_segment_size) {
  181. brk = NULL;
  182. goto out;
  183. }
  184. uint64_t brk_max_size = rlim_data - region.data_segment_size;
  185. if (brk > region.brk_start + brk_max_size)
  186. goto unchanged;
  187. void* brk_end = region.brk_end;
  188. while (brk_end < brk)
  189. brk_end += BRK_SIZE;
  190. debug("brk area: %p - %p\n", region.brk_start, brk_end);
  191. debug("brk reserved area: %p - %p\n", brk_end, region.brk_start + brk_max_size);
  192. bkeep_mmap(region.brk_start, brk_end - region.brk_start, PROT_READ | PROT_WRITE,
  193. MAP_ANONYMOUS | MAP_PRIVATE, NULL, 0, "brk");
  194. region.brk_current = brk;
  195. region.brk_end = brk_end;
  196. goto out;
  197. }
  198. region.brk_current = brk;
  199. out:
  200. MASTER_UNLOCK();
  201. return brk;
  202. }
  203. BEGIN_CP_FUNC(brk) {
  204. __UNUSED(obj);
  205. __UNUSED(size);
  206. __UNUSED(objp);
  207. if (region.brk_start) {
  208. ADD_CP_FUNC_ENTRY((ptr_t)region.brk_start);
  209. ADD_CP_ENTRY(ADDR, region.brk_current);
  210. ADD_CP_ENTRY(SIZE, region.brk_end - region.brk_start);
  211. ADD_CP_ENTRY(SIZE, region.data_segment_size);
  212. }
  213. }
  214. END_CP_FUNC(bek)
  215. BEGIN_RS_FUNC(brk) {
  216. __UNUSED(rebase);
  217. region.brk_start = (void*)GET_CP_FUNC_ENTRY();
  218. region.brk_current = (void*)GET_CP_ENTRY(ADDR);
  219. region.brk_end = region.brk_start + GET_CP_ENTRY(SIZE);
  220. region.data_segment_size = GET_CP_ENTRY(SIZE);
  221. debug("brk area: %p - %p\n", region.brk_start, region.brk_end);
  222. size_t brk_size = region.brk_end - region.brk_start;
  223. uint64_t rlim_data = get_rlimit_cur(RLIMIT_DATA);
  224. assert(rlim_data > region.data_segment_size);
  225. uint64_t brk_max_size = rlim_data - region.data_segment_size;
  226. if (brk_size < brk_max_size) {
  227. void* alloc_addr = region.brk_end;
  228. size_t alloc_size = brk_max_size - brk_size;
  229. struct shim_vma_val vma;
  230. if (!lookup_overlap_vma(alloc_addr, alloc_size, &vma)) {
  231. /* if memory are already allocated here, adjust RLIMIT_DATA */
  232. alloc_size = vma.addr - alloc_addr;
  233. set_rlimit_cur(RLIMIT_DATA, (uint64_t)brk_size + alloc_size + region.data_segment_size);
  234. }
  235. int ret = bkeep_mmap(alloc_addr, alloc_size, PROT_READ | PROT_WRITE,
  236. MAP_ANONYMOUS | MAP_PRIVATE | VMA_UNMAPPED, NULL, 0, "brk");
  237. if (ret < 0)
  238. return ret;
  239. void* ptr = DkVirtualMemoryAlloc(alloc_addr, alloc_size, 0, PAL_PROT_READ | PAL_PROT_WRITE);
  240. __UNUSED(ptr);
  241. assert(ptr == alloc_addr);
  242. ADD_PROFILE_OCCURENCE(brk, alloc_size);
  243. INC_PROFILE_OCCURENCE(brk_migrate_count);
  244. debug("brk reserved area: %p - %p\n", alloc_addr, alloc_addr + alloc_size);
  245. }
  246. DEBUG_RS("current=%p,region=%p-%p", region.brk_current, region.brk_start, region.brk_end);
  247. }
  248. END_RS_FUNC(brk)