shim_brk.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_brk.c
  15. *
  16. * Implementation of system call "brk".
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_utils.h>
  20. #include <shim_table.h>
  21. #include <shim_vma.h>
  22. #include <shim_checkpoint.h>
  23. #include <shim_profile.h>
  24. #include <pal.h>
  25. #include <sys/mman.h>
  26. #define BRK_SIZE 4096
  27. unsigned long brk_max_size = 0;
  28. struct shim_brk_info {
  29. void * brk_start;
  30. void * brk_end;
  31. void * brk_current;
  32. };
  33. static struct shim_brk_info region;
  34. DEFINE_PROFILE_OCCURENCE(brk, memory);
  35. DEFINE_PROFILE_OCCURENCE(brk_count, memory);
  36. DEFINE_PROFILE_OCCURENCE(brk_migrate_count, memory);
  37. void get_brk_region (void ** start, void ** end, void ** current)
  38. {
  39. MASTER_LOCK();
  40. *start = region.brk_start;
  41. *end = region.brk_end;
  42. *current = region.brk_current;
  43. MASTER_UNLOCK();
  44. }
  45. int init_brk_region (void * brk_region)
  46. {
  47. if (region.brk_start)
  48. return 0;
  49. if (!brk_max_size) {
  50. char brk_cfg[CONFIG_MAX];
  51. if (root_config &&
  52. get_config(root_config, "sys.brk.size", brk_cfg, CONFIG_MAX) > 0)
  53. brk_max_size = parse_int(brk_cfg);
  54. if (!brk_max_size)
  55. brk_max_size = DEFAULT_BRK_MAX_SIZE;
  56. }
  57. int flags = MAP_PRIVATE|MAP_ANONYMOUS;
  58. bool brk_on_heap = true;
  59. const int TRIES = 10;
  60. /*
  61. * Chia-Che 8/24/2017
  62. * Adding an argument to specify the initial starting
  63. * address of brk region.
  64. * The general assumption of Linux is that the brk region
  65. * should be within [exec-data-end, exec-data-end + 0x2000000)
  66. */
  67. if (brk_region) {
  68. size_t max_brk = 0;
  69. if (PAL_CB(user_address.end) >= PAL_CB(executable_range.end))
  70. max_brk = PAL_CB(user_address.end) - PAL_CB(executable_range.end);
  71. /* Check whether the brk region can potentially be located after exec at all. */
  72. if (brk_max_size <= max_brk) {
  73. int try;
  74. for (try = TRIES; try > 0; try--) {
  75. uint32_t rand = 0;
  76. #if ENABLE_ASLR == 1
  77. int ret = DkRandomBitsRead(&rand, sizeof(rand));
  78. if (ret < 0)
  79. return -convert_pal_errno(-ret);
  80. rand %= MIN((size_t)0x2000000,
  81. (size_t)(PAL_CB(user_address.end) - brk_region - brk_max_size));
  82. rand = ALIGN_DOWN(rand);
  83. if (brk_region + rand + brk_max_size >= PAL_CB(user_address.end))
  84. continue;
  85. #else
  86. /* Without randomization there is no point to retry here */
  87. if (brk_region + rand + brk_max_size >= PAL_CB(user_address.end))
  88. break;
  89. #endif
  90. struct shim_vma_val vma;
  91. if (lookup_overlap_vma(brk_region + rand, brk_max_size, &vma) == -ENOENT) {
  92. /* Found a place for brk */
  93. brk_region += rand;
  94. brk_on_heap = false;
  95. break;
  96. }
  97. #if !(ENABLE_ASLR == 1)
  98. /* Without randomization, try memory directly after the overlapping block */
  99. brk_region = vma.addr + vma.length;
  100. #endif
  101. }
  102. }
  103. }
  104. if (brk_on_heap) {
  105. brk_region = bkeep_unmapped_heap(brk_max_size, PROT_READ|PROT_WRITE,
  106. flags|VMA_UNMAPPED, NULL, 0, "brk");
  107. if (!brk_region) {
  108. return -ENOMEM;
  109. }
  110. } else {
  111. /*
  112. * Create the bookkeeping before allocating the brk region.
  113. * The bookkeeping should never fail because we've already confirmed
  114. * the availability.
  115. */
  116. if (bkeep_mmap(brk_region, brk_max_size, PROT_READ|PROT_WRITE,
  117. flags|VMA_UNMAPPED, NULL, 0, "brk") < 0)
  118. BUG();
  119. }
  120. void * end_brk_region = NULL;
  121. /* Allocate the whole brk region */
  122. void * ret = (void *) DkVirtualMemoryAlloc(brk_region, brk_max_size, 0,
  123. PAL_PROT_READ|PAL_PROT_WRITE);
  124. /* Checking if the PAL call succeeds. */
  125. if (!ret) {
  126. bkeep_munmap(brk_region, brk_max_size, flags);
  127. return -ENOMEM;
  128. }
  129. ADD_PROFILE_OCCURENCE(brk, brk_max_size);
  130. INC_PROFILE_OCCURENCE(brk_count);
  131. end_brk_region = brk_region + BRK_SIZE;
  132. region.brk_start = brk_region;
  133. region.brk_end = end_brk_region;
  134. region.brk_current = brk_region;
  135. debug("brk area: %p - %p\n", brk_region, end_brk_region);
  136. debug("brk reserved area: %p - %p\n", end_brk_region,
  137. brk_region + brk_max_size);
  138. /*
  139. * Create another bookkeeping for the current brk region. The remaining
  140. * space will be marked as unmapped so that the library OS can reuse the
  141. * space for other purpose.
  142. */
  143. if (bkeep_mmap(brk_region, BRK_SIZE, PROT_READ|PROT_WRITE, flags,
  144. NULL, 0, "brk") < 0)
  145. BUG();
  146. return 0;
  147. }
  148. int reset_brk (void)
  149. {
  150. MASTER_LOCK();
  151. if (!region.brk_start) {
  152. MASTER_UNLOCK();
  153. return 0;
  154. }
  155. int ret = shim_do_munmap(region.brk_start,
  156. region.brk_end - region.brk_start);
  157. if (ret < 0) {
  158. MASTER_UNLOCK();
  159. return ret;
  160. }
  161. region.brk_start = region.brk_end = region.brk_current = NULL;
  162. MASTER_UNLOCK();
  163. return 0;
  164. }
  165. void * shim_do_brk (void * brk)
  166. {
  167. MASTER_LOCK();
  168. if (init_brk_region(NULL) < 0) {
  169. debug("Failed to initialize brk!\n");
  170. brk = NULL;
  171. goto out;
  172. }
  173. if (!brk) {
  174. unchanged:
  175. brk = region.brk_current;
  176. goto out;
  177. }
  178. if (brk < region.brk_start)
  179. goto unchanged;
  180. if (brk > region.brk_end) {
  181. if (brk > region.brk_start + brk_max_size)
  182. goto unchanged;
  183. void * brk_end = region.brk_end;
  184. while (brk_end < brk)
  185. brk_end += BRK_SIZE;
  186. debug("brk area: %p - %p\n", region.brk_start, brk_end);
  187. debug("brk reserved area: %p - %p\n", brk_end,
  188. region.brk_start + brk_max_size);
  189. bkeep_mmap(region.brk_start, brk_end - region.brk_start,
  190. PROT_READ|PROT_WRITE,
  191. MAP_ANONYMOUS|MAP_PRIVATE, NULL, 0, "brk");
  192. region.brk_current = brk;
  193. region.brk_end = brk_end;
  194. goto out;
  195. }
  196. region.brk_current = brk;
  197. out:
  198. MASTER_UNLOCK();
  199. return brk;
  200. }
  201. BEGIN_CP_FUNC(brk)
  202. {
  203. __UNUSED(obj);
  204. __UNUSED(size);
  205. __UNUSED(objp);
  206. if (region.brk_start) {
  207. ADD_CP_FUNC_ENTRY((ptr_t)region.brk_start);
  208. ADD_CP_ENTRY(ADDR, region.brk_current);
  209. ADD_CP_ENTRY(SIZE, region.brk_end - region.brk_start);
  210. assert(brk_max_size);
  211. ADD_CP_ENTRY(SIZE, brk_max_size);
  212. }
  213. }
  214. END_CP_FUNC(bek)
  215. BEGIN_RS_FUNC(brk)
  216. {
  217. __UNUSED(rebase);
  218. region.brk_start = (void *) GET_CP_FUNC_ENTRY();
  219. region.brk_current = (void *) GET_CP_ENTRY(ADDR);
  220. region.brk_end = region.brk_start + GET_CP_ENTRY(SIZE);
  221. brk_max_size = GET_CP_ENTRY(SIZE);
  222. debug("brk area: %p - %p\n", region.brk_start, region.brk_end);
  223. size_t brk_size = region.brk_end - region.brk_start;
  224. if (brk_size < brk_max_size) {
  225. void * alloc_addr = region.brk_end;
  226. size_t alloc_size = brk_max_size - brk_size;
  227. struct shim_vma_val vma;
  228. if (!lookup_overlap_vma(alloc_addr, alloc_size, &vma)) {
  229. /* if memory are already allocated here, adjust brk_max_size */
  230. alloc_size = vma.addr - alloc_addr;
  231. brk_max_size = brk_size + alloc_size;
  232. }
  233. int ret = bkeep_mmap(alloc_addr, alloc_size,
  234. PROT_READ|PROT_WRITE,
  235. MAP_ANONYMOUS|MAP_PRIVATE|VMA_UNMAPPED,
  236. NULL, 0, "brk");
  237. if (ret < 0)
  238. return ret;
  239. void * ptr = DkVirtualMemoryAlloc(alloc_addr, alloc_size, 0,
  240. PAL_PROT_READ|PAL_PROT_WRITE);
  241. assert(ptr == alloc_addr);
  242. ADD_PROFILE_OCCURENCE(brk, alloc_size);
  243. INC_PROFILE_OCCURENCE(brk_migrate_count);
  244. debug("brk reserved area: %p - %p\n", alloc_addr,
  245. alloc_addr + alloc_size);
  246. }
  247. DEBUG_RS("current=%p,region=%p-%p", region.brk_current, region.brk_start,
  248. region.brk_end);
  249. }
  250. END_RS_FUNC(brk)