trts_add_trim.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include <string.h>
  32. #include "sgx_utils.h"
  33. #include "trts_inst.h"
  34. #include "util.h"
  35. #include "trts_trim.h"
  36. #include "trts_util.h"
  37. #include "global_data.h"
  38. #include "se_memcpy.h"
  39. #include "se_page_attr.h"
  40. #include "trts_internal.h"
  41. #ifndef SE_SIM
  42. struct dynamic_flags_attributes
  43. {
  44. si_flags_t si_flags;
  45. uint16_t attributes;
  46. };
  47. // Low level API to EACCEPT pages on grow-up region.
  48. static int sgx_accept_backward(si_flags_t sfl, size_t lo, size_t hi)
  49. {
  50. size_t addr = hi;
  51. SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si;
  52. si.flags = sfl;
  53. for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++)
  54. si.reserved[i] = 0;
  55. while (lo < addr)
  56. {
  57. int rc = do_eaccept(&si, addr -= SE_PAGE_SIZE);
  58. if (rc != 0)
  59. abort();
  60. }
  61. return 0;
  62. }
  63. // Low level API to EACCEPT pages on grow-up region during exception handling.
  64. static int sgx_accept_forward_within_exception(size_t lo, size_t hi)
  65. {
  66. size_t addr = lo;
  67. SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si;
  68. #ifdef DEBUG
  69. unsigned int sp_value = 0;
  70. asm("mov %%esp, %0;" : "=r" (sp_value) :);
  71. if ((sp_value & (SE_PAGE_SIZE -1)) <= (SE_PAGE_SIZE - (STATIC_STACK_SIZE % SE_PAGE_SIZE)))
  72. return SGX_ERROR_UNEXPECTED;
  73. #endif
  74. si.flags = SI_FLAGS_RW | SI_FLAG_PENDING;
  75. for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++)
  76. si.reserved[i] = 0;
  77. while (addr < hi)
  78. {
  79. int rc = do_eaccept(&si, addr);
  80. if (rc != 0)
  81. abort();
  82. addr += SE_PAGE_SIZE;
  83. }
  84. return 0;
  85. }
  86. const volatile layout_t *get_dynamic_layout_by_id(uint16_t id)
  87. {
  88. for(uint32_t i = 0; i < g_global_data.layout_entry_num; i++)
  89. {
  90. if(g_global_data.layout_table[i].entry.id == id)
  91. {
  92. return &(g_global_data.layout_table[i]);
  93. }
  94. }
  95. return NULL;
  96. }
  97. // EACCEPT trim requests when the enclave completes initialization.
  98. int accept_post_remove(const volatile layout_t *layout_start, const volatile layout_t *layout_end, size_t offset)
  99. {
  100. int ret = -1;
  101. for (const volatile layout_t *layout = layout_start; layout < layout_end; layout++)
  102. {
  103. if (!IS_GROUP_ID(layout->group.id) && (layout->entry.attributes & PAGE_ATTR_POST_REMOVE))
  104. {
  105. size_t start_addr = (size_t)layout->entry.rva + offset + (size_t)get_enclave_base();
  106. uint32_t page_count = layout->entry.page_count;
  107. if (0 != (ret = sgx_accept_forward(SI_FLAG_TRIM | SI_FLAG_MODIFIED, start_addr, start_addr + ((size_t)page_count << SE_PAGE_SHIFT))))
  108. return ret;
  109. }
  110. else if (IS_GROUP_ID(layout->group.id))
  111. {
  112. size_t step = 0;
  113. for(uint32_t j = 0; j < layout->group.load_times; j++)
  114. {
  115. step += (size_t)layout->group.load_step;
  116. if(0 != (ret = accept_post_remove(&layout[-layout->group.entry_count], layout, step)))
  117. return ret;
  118. }
  119. }
  120. }
  121. return 0;
  122. }
  123. static int check_heap_dyn_range(void *addr, size_t page_count, struct dynamic_flags_attributes *fa)
  124. {
  125. size_t heap_dyn_start, heap_dyn_size;
  126. heap_dyn_start = (size_t)get_heap_base() + get_heap_min_size();
  127. heap_dyn_size = get_heap_size() - get_heap_min_size();
  128. if ((size_t)addr >= heap_dyn_start
  129. && (size_t)addr + (page_count << SE_PAGE_SHIFT) <= heap_dyn_start + heap_dyn_size)
  130. {
  131. if (fa != NULL)
  132. {
  133. fa->si_flags = SI_FLAGS_RW;
  134. fa->attributes = PAGE_ATTR_POST_ADD;
  135. }
  136. return 0;
  137. }
  138. else
  139. {
  140. return -1;
  141. }
  142. }
  143. static int check_dynamic_entry_range(void *addr, size_t page_count, uint16_t entry_id, size_t entry_offset, struct dynamic_flags_attributes *fa)
  144. {
  145. const volatile layout_t *layout = NULL;
  146. size_t entry_start_addr;
  147. uint32_t entry_page_count;
  148. if (entry_id < LAYOUT_ID_HEAP_MIN
  149. || entry_id > LAYOUT_ID_STACK_DYN_MIN
  150. || (NULL == (layout = get_dynamic_layout_by_id(entry_id))))
  151. {
  152. return -1;
  153. }
  154. entry_start_addr = (size_t)get_enclave_base() + (size_t)layout->entry.rva + entry_offset;
  155. entry_page_count = layout->entry.page_count;
  156. if ((size_t)addr >= entry_start_addr
  157. && (size_t)addr + (page_count << SE_PAGE_SHIFT) <= entry_start_addr + ((size_t)entry_page_count << SE_PAGE_SHIFT))
  158. {
  159. if (fa != NULL)
  160. {
  161. fa->si_flags = layout->entry.si_flags;
  162. fa->attributes = layout->entry.attributes;
  163. }
  164. return 0;
  165. }
  166. else
  167. {
  168. return -1;
  169. }
  170. }
  171. static int check_utility_thread_dynamic_stack(void *addr, size_t page_count, struct dynamic_flags_attributes *fa)
  172. {
  173. return check_dynamic_entry_range(addr, page_count, LAYOUT_ID_STACK_MAX, 0, fa);
  174. }
  175. // Verify if the range specified belongs to a dynamic range recorded in metadata.
  176. static int check_dynamic_range(void *addr, size_t page_count, size_t *offset, struct dynamic_flags_attributes *fa)
  177. {
  178. const volatile layout_t *dt_layout = NULL;
  179. // check for integer overflow
  180. if ((size_t)addr > SIZE_MAX - (page_count << SE_PAGE_SHIFT))
  181. return -1;
  182. // check heap dynamic range
  183. if (0 == check_heap_dyn_range(addr, page_count, fa))
  184. return 0;
  185. // check dynamic stack within utility thread
  186. if (0 == check_utility_thread_dynamic_stack(addr, page_count, fa))
  187. return 0;
  188. // check dynamic thread entries range
  189. if (NULL != (dt_layout = get_dynamic_layout_by_id(LAYOUT_ID_THREAD_GROUP_DYN)))
  190. {
  191. for (uint16_t id = LAYOUT_ID_TCS_DYN; id <= LAYOUT_ID_STACK_DYN_MIN; id++)
  192. for (uint32_t i = 0; i < dt_layout->group.load_times + 1; i++)
  193. {
  194. if (0 == check_dynamic_entry_range(addr, page_count, id, i * ((size_t)dt_layout->group.load_step), fa))
  195. {
  196. if (offset != NULL) *offset = i * ((size_t)dt_layout->group.load_step);
  197. return 0;
  198. }
  199. }
  200. }
  201. else
  202. {
  203. // LAYOUT_ID_THREAD_GROUP_DYN does not exist, but possibly there is one single dynamic thead
  204. for (uint16_t id = LAYOUT_ID_TCS_DYN; id <= LAYOUT_ID_STACK_DYN_MIN; id++)
  205. if (0 == check_dynamic_entry_range(addr, page_count, id, 0, fa))
  206. {
  207. if (offset != NULL) *offset = 0;
  208. return 0;
  209. }
  210. }
  211. return -1;
  212. }
  213. int is_dynamic_thread(void *tcs)
  214. {
  215. struct dynamic_flags_attributes fa;
  216. if ((tcs != NULL) && (check_dynamic_range(tcs, 1, NULL, &fa) == 0) &&
  217. (fa.si_flags == SI_FLAGS_TCS))
  218. {
  219. return true;
  220. }
  221. return false;
  222. }
  223. int is_dynamic_thread_exist()
  224. {
  225. if(!EDMM_supported)
  226. return false;
  227. const volatile layout_t * layout = get_dynamic_layout_by_id(LAYOUT_ID_STACK_DYN_MIN);
  228. if (!layout)
  229. return false;
  230. else
  231. return true;
  232. }
  233. uint32_t get_dynamic_stack_max_page()
  234. {
  235. const volatile layout_t * layout = get_dynamic_layout_by_id(LAYOUT_ID_STACK_MAX);
  236. if (!layout)
  237. return 0;
  238. else
  239. return layout->entry.page_count;
  240. }
  241. #endif
  242. int sgx_accept_forward(si_flags_t sfl, size_t lo, size_t hi)
  243. {
  244. #ifdef SE_SIM
  245. (void)sfl;
  246. (void)lo;
  247. (void)hi;
  248. return 0;
  249. #else
  250. size_t addr = lo;
  251. SE_DECLSPEC_ALIGN(sizeof(sec_info_t)) sec_info_t si;
  252. si.flags = sfl;
  253. for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++)
  254. si.reserved[i] = 0;
  255. while (addr < hi)
  256. {
  257. int rc = do_eaccept(&si, addr);
  258. if (rc != 0)
  259. abort();
  260. addr += SE_PAGE_SIZE;
  261. }
  262. return 0;
  263. #endif
  264. }
  265. // High level API to EACCEPT pages, mainly used in exception handling
  266. // to deal with stack expansion.
  267. int apply_pages_within_exception(void *start_address, size_t page_count)
  268. {
  269. #ifdef SE_SIM
  270. (void)start_address;
  271. (void)page_count;
  272. return 0;
  273. #else
  274. int rc;
  275. if (start_address == NULL)
  276. return -1;
  277. if (check_dynamic_range(start_address, page_count, NULL, NULL) != 0)
  278. return -1;
  279. size_t start = (size_t)start_address;
  280. size_t end = start + (page_count << SE_PAGE_SHIFT);
  281. rc = sgx_accept_forward_within_exception(start, end);
  282. return rc;
  283. #endif
  284. }
  285. // High level API to EACCEPT pages
  286. int apply_EPC_pages(void *start_address, size_t page_count)
  287. {
  288. #ifdef SE_SIM
  289. (void)start_address;
  290. (void)page_count;
  291. return 0;
  292. #else
  293. int rc;
  294. struct dynamic_flags_attributes fa;
  295. if (start_address == NULL)
  296. return -1;
  297. if (check_dynamic_range(start_address, page_count, NULL, &fa) != 0)
  298. return -1;
  299. size_t start = (size_t)start_address;
  300. size_t end = start + (page_count << SE_PAGE_SHIFT);
  301. if (fa.attributes & PAGE_DIR_GROW_DOWN)
  302. {
  303. rc = sgx_accept_forward(SI_FLAGS_RW | SI_FLAG_PENDING, start, end);
  304. }
  305. else
  306. {
  307. rc = sgx_accept_backward(SI_FLAGS_RW | SI_FLAG_PENDING, start, end);
  308. }
  309. return rc;
  310. #endif
  311. }
  312. // High level API to trim previously EAUG-ed pages.
  313. int trim_EPC_pages(void *start_address, size_t page_count)
  314. {
  315. #ifdef SE_SIM
  316. (void)start_address;
  317. (void)page_count;
  318. return 0;
  319. #else
  320. int rc;
  321. if (start_address == NULL)
  322. return -1;
  323. // check range
  324. if (check_dynamic_range(start_address, page_count, NULL, NULL) != 0)
  325. return -1;
  326. size_t start = (size_t)start_address;
  327. size_t end = start + (page_count << SE_PAGE_SHIFT);
  328. // trim ocall
  329. rc = trim_range_ocall(start, end);
  330. assert(rc == 0);
  331. rc = sgx_accept_forward(SI_FLAG_TRIM | SI_FLAG_MODIFIED, start, end);
  332. assert(rc == 0);
  333. // trim commit ocall
  334. size_t i = start;
  335. while (i < end)
  336. {
  337. rc = trim_range_commit_ocall(i);
  338. assert(rc == 0);
  339. i += SE_PAGE_SIZE;
  340. }
  341. return rc;
  342. #endif
  343. }
  344. // Create a thread dynamically.
  345. // It will add necessary pages and transform one of them into type TCS.
  346. sgx_status_t do_add_thread(void *ptcs)
  347. {
  348. #ifdef SE_SIM
  349. (void)ptcs;
  350. return SGX_SUCCESS;
  351. #else
  352. int ret = SGX_ERROR_UNEXPECTED;
  353. tcs_t *tcs = (tcs_t *)ptcs;
  354. tcs_t *tcs_template = NULL;
  355. size_t offset = 0;
  356. size_t enclave_base = (size_t)get_enclave_base();
  357. if ( 0 != check_dynamic_range((void *)tcs, 1, &offset, NULL))
  358. return SGX_ERROR_UNEXPECTED;
  359. // check if the tcs provided exactly matches the one in signtool
  360. const volatile layout_t *tcs_layout = get_dynamic_layout_by_id(LAYOUT_ID_TCS_DYN);
  361. if (!tcs_layout)
  362. return SGX_ERROR_UNEXPECTED;
  363. if ((size_t)(enclave_base + tcs_layout->entry.rva + offset) != (size_t)(tcs))
  364. return SGX_ERROR_UNEXPECTED;
  365. // adding page for all the dynamic entries
  366. for (uint16_t id = LAYOUT_ID_TCS_DYN; id <= LAYOUT_ID_STACK_DYN_MIN; id++)
  367. {
  368. const volatile layout_t *layout = get_dynamic_layout_by_id(id);
  369. if (layout && (layout->entry.attributes & PAGE_ATTR_DYN_THREAD))
  370. {
  371. ret = apply_EPC_pages((void *)(enclave_base + layout->entry.rva + offset), layout->entry.page_count);
  372. if (ret != 0)
  373. return SGX_ERROR_UNEXPECTED;
  374. }
  375. }
  376. //Copy and initialize TCS
  377. tcs_template = (tcs_t *)g_global_data.tcs_template;
  378. memcpy_s(tcs, TCS_SIZE, tcs_template, sizeof(g_global_data.tcs_template));
  379. //Adjust the tcs fields
  380. tcs->ossa = (size_t)GET_PTR(size_t, (void *)tcs, tcs->ossa) - enclave_base;
  381. tcs->ofs_base = (size_t)GET_PTR(size_t, (void *)tcs, tcs->ofs_base) - enclave_base;
  382. tcs->ogs_base = (size_t)GET_PTR(size_t, (void *)tcs, tcs->ogs_base) - enclave_base;
  383. //OCALL for MKTCS
  384. ret = sgx_ocall(0, tcs);
  385. if (ret != 0)
  386. return SGX_ERROR_UNEXPECTED;
  387. //EACCEPT for MKTCS
  388. ret = sgx_accept_backward(SI_FLAG_TCS | SI_FLAG_MODIFIED, (size_t)tcs, (size_t)tcs + SE_PAGE_SIZE);
  389. if (ret != 0)
  390. return SGX_ERROR_UNEXPECTED;
  391. return SGX_SUCCESS;
  392. #endif
  393. }