trts_veh.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. /**
  32. * File: trts_veh.cpp
  33. * Description:
  34. * This file implements the support of custom exception handling.
  35. */
  36. #include "sgx_trts_exception.h"
  37. #include <stdlib.h>
  38. #include "sgx_trts.h"
  39. #include "xsave.h"
  40. #include "arch.h"
  41. #include "sgx_spinlock.h"
  42. #include "thread_data.h"
  43. #include "global_data.h"
  44. #include "trts_internal.h"
  45. #include "trts_inst.h"
  46. #include "util.h"
  47. #include "trts_util.h"
  48. typedef struct _handler_node_t
  49. {
  50. uintptr_t callback;
  51. struct _handler_node_t *next;
  52. } handler_node_t;
  53. static handler_node_t *g_first_node = NULL;
  54. static sgx_spinlock_t g_handler_lock = SGX_SPINLOCK_INITIALIZER;
  55. static uintptr_t g_veh_cookie = 0;
  56. #define ENC_VEH_POINTER(x) (uintptr_t)(x) ^ g_veh_cookie
  57. #define DEC_VEH_POINTER(x) (sgx_exception_handler_t)((x) ^ g_veh_cookie)
  58. // sgx_register_exception_handler()
  59. // register a custom exception handler
  60. // Parameter
  61. // is_first_handler - the order in which the handler should be called.
  62. // if the parameter is nonzero, the handler is the first handler to be called.
  63. // if the parameter is zero, the handler is the last handler to be called.
  64. // exception_handler - a pointer to the handler to be called.
  65. // Return Value
  66. // handler - success
  67. // NULL - fail
  68. void *sgx_register_exception_handler(int is_first_handler, sgx_exception_handler_t exception_handler)
  69. {
  70. // initialize g_veh_cookie for the first time sgx_register_exception_handler is called.
  71. if(unlikely(g_veh_cookie == 0))
  72. {
  73. uintptr_t rand = 0;
  74. do
  75. {
  76. if(SGX_SUCCESS != sgx_read_rand((unsigned char *)&rand, sizeof(rand)))
  77. {
  78. return NULL;
  79. }
  80. } while(rand == 0);
  81. sgx_spin_lock(&g_handler_lock);
  82. if(g_veh_cookie == 0)
  83. {
  84. g_veh_cookie = rand;
  85. }
  86. sgx_spin_unlock(&g_handler_lock);
  87. }
  88. if(!sgx_is_within_enclave((const void*)exception_handler, 0))
  89. {
  90. return NULL;
  91. }
  92. handler_node_t *node = (handler_node_t *)malloc(sizeof(handler_node_t));
  93. if(!node)
  94. {
  95. return NULL;
  96. }
  97. node->callback = ENC_VEH_POINTER(exception_handler);
  98. // write lock
  99. sgx_spin_lock(&g_handler_lock);
  100. if((g_first_node == NULL) || is_first_handler)
  101. {
  102. node->next = g_first_node;
  103. g_first_node = node;
  104. }
  105. else
  106. {
  107. handler_node_t *tmp = g_first_node;
  108. while(tmp->next != NULL)
  109. {
  110. tmp = tmp->next;
  111. }
  112. node->next = NULL;
  113. tmp->next = node;
  114. }
  115. // write unlock
  116. sgx_spin_unlock(&g_handler_lock);
  117. return node;
  118. }
  119. // sgx_unregister_exception_handler()
  120. // unregister a custom exception handler.
  121. // Parameter
  122. // handler - a handler to the custom exception handler previously
  123. // registered using the sgx_register_exception_handler function.
  124. // Return Value
  125. // none zero - success
  126. // 0 - fail
  127. int sgx_unregister_exception_handler(void *handler)
  128. {
  129. if(!handler)
  130. {
  131. return 0;
  132. }
  133. int status = 0;
  134. // write lock
  135. sgx_spin_lock(&g_handler_lock);
  136. if(g_first_node)
  137. {
  138. handler_node_t *node = g_first_node;
  139. if(node == handler)
  140. {
  141. g_first_node = node->next;
  142. status = 1;
  143. }
  144. else
  145. {
  146. while(node->next != NULL)
  147. {
  148. if(node->next == handler)
  149. {
  150. node->next = node->next->next;
  151. status = 1;
  152. break;
  153. }
  154. node = node->next;
  155. }
  156. }
  157. }
  158. // write unlock
  159. sgx_spin_unlock(&g_handler_lock);
  160. if(status) free(handler);
  161. return status;
  162. }
  163. // continue_execution(sgx_exception_info_t *info):
  164. // try to restore the thread context saved in info to current execution context.
  165. extern "C" __attribute__((regparm(1))) void continue_execution(sgx_exception_info_t *info);
  166. // internal_handle_exception(sgx_exception_info_t *info):
  167. // the 2nd phrase exception handing, which traverse registered exception handlers.
  168. // if the exception can be handled, then continue execution
  169. // otherwise, throw abortion, go back to 1st phrase, and call the default handler.
  170. extern "C" __attribute__((regparm(1))) void internal_handle_exception(sgx_exception_info_t *info)
  171. {
  172. int status = EXCEPTION_CONTINUE_SEARCH;
  173. handler_node_t *node = NULL;
  174. thread_data_t *thread_data = get_thread_data();
  175. size_t size = 0;
  176. uintptr_t *nhead = NULL;
  177. uintptr_t *ntmp = NULL;
  178. uintptr_t xsp = 0;
  179. if (thread_data->exception_flag < 0)
  180. goto failed_end;
  181. thread_data->exception_flag++;
  182. // read lock
  183. sgx_spin_lock(&g_handler_lock);
  184. node = g_first_node;
  185. while(node != NULL)
  186. {
  187. size += sizeof(uintptr_t);
  188. node = node->next;
  189. }
  190. // There's no exception handler registered
  191. if (size == 0)
  192. {
  193. sgx_spin_unlock(&g_handler_lock);
  194. //exception cannot be handled
  195. thread_data->exception_flag = -1;
  196. //instruction triggering the exception will be executed again.
  197. continue_execution(info);
  198. }
  199. if ((nhead = (uintptr_t *)malloc(size)) == NULL)
  200. {
  201. sgx_spin_unlock(&g_handler_lock);
  202. goto failed_end;
  203. }
  204. ntmp = nhead;
  205. node = g_first_node;
  206. while(node != NULL)
  207. {
  208. *ntmp = node->callback;
  209. ntmp++;
  210. node = node->next;
  211. }
  212. // read unlock
  213. sgx_spin_unlock(&g_handler_lock);
  214. // call exception handler until EXCEPTION_CONTINUE_EXECUTION is returned
  215. ntmp = nhead;
  216. while(size > 0)
  217. {
  218. sgx_exception_handler_t handler = DEC_VEH_POINTER(*ntmp);
  219. status = handler(info);
  220. if(EXCEPTION_CONTINUE_EXECUTION == status)
  221. {
  222. break;
  223. }
  224. ntmp++;
  225. size -= sizeof(sgx_exception_handler_t);
  226. }
  227. free(nhead);
  228. // call default handler
  229. // ignore invalid return value, treat to EXCEPTION_CONTINUE_SEARCH
  230. // check SP to be written on SSA is pointing to the trusted stack
  231. xsp = info->cpu_context.REG(sp);
  232. if (!is_valid_sp(xsp))
  233. {
  234. goto failed_end;
  235. }
  236. if(EXCEPTION_CONTINUE_EXECUTION == status)
  237. {
  238. //exception is handled, decrease the nested exception count
  239. thread_data->exception_flag--;
  240. }
  241. else
  242. {
  243. //exception cannot be handled
  244. thread_data->exception_flag = -1;
  245. }
  246. //instruction triggering the exception will be executed again.
  247. continue_execution(info);
  248. failed_end:
  249. thread_data->exception_flag = -1; // mark the current exception cannot be handled
  250. abort(); // throw abortion
  251. }
  252. static int expand_stack_by_pages(void *start_addr, size_t page_count)
  253. {
  254. int ret = -1;
  255. if ((start_addr == NULL) || (page_count == 0))
  256. return -1;
  257. ret = apply_pages_within_exception(start_addr, page_count);
  258. return ret;
  259. }
  260. // trts_handle_exception(void *tcs)
  261. // the entry point for the exceptoin handling
  262. // Parameter
  263. // the pointer of TCS
  264. // Return Value
  265. // none zero - success
  266. // 0 - fail
  267. extern "C" sgx_status_t trts_handle_exception(void *tcs)
  268. {
  269. thread_data_t *thread_data = get_thread_data();
  270. ssa_gpr_t *ssa_gpr = NULL;
  271. sgx_exception_info_t *info = NULL;
  272. uintptr_t sp, *new_sp = NULL;
  273. size_t size = 0;
  274. if (tcs == NULL) goto default_handler;
  275. if (check_static_stack_canary(tcs) != 0)
  276. goto default_handler;
  277. if(get_enclave_state() != ENCLAVE_INIT_DONE)
  278. {
  279. goto default_handler;
  280. }
  281. // check if the exception is raised from 2nd phrase
  282. if(thread_data->exception_flag == -1) {
  283. goto default_handler;
  284. }
  285. if ((TD2TCS(thread_data) != tcs)
  286. || (((thread_data->first_ssa_gpr)&(~0xfff)) - SE_PAGE_SIZE) != (uintptr_t)tcs) {
  287. goto default_handler;
  288. }
  289. // no need to check the result of ssa_gpr because thread_data is always trusted
  290. ssa_gpr = reinterpret_cast<ssa_gpr_t *>(thread_data->first_ssa_gpr);
  291. sp = ssa_gpr->REG(sp);
  292. if(!is_stack_addr((void*)sp, 0)) // check stack overrun only, alignment will be checked after exception handled
  293. {
  294. g_enclave_state = ENCLAVE_CRASHED;
  295. return SGX_ERROR_STACK_OVERRUN;
  296. }
  297. size = 0;
  298. #ifdef SE_GNU64
  299. size += 128; // x86_64 requires a 128-bytes red zone, which begins directly
  300. // after the return addr and includes func's arguments
  301. #endif
  302. // decrease the stack to give space for info
  303. size += sizeof(sgx_exception_info_t);
  304. sp -= size;
  305. sp = sp & ~0xF;
  306. // check the decreased sp to make sure it is in the trusted stack range
  307. if(!is_stack_addr((void *)sp, size))
  308. {
  309. g_enclave_state = ENCLAVE_CRASHED;
  310. return SGX_ERROR_STACK_OVERRUN;
  311. }
  312. info = (sgx_exception_info_t *)sp;
  313. // decrease the stack to save the SSA[0]->ip
  314. size = sizeof(uintptr_t);
  315. sp -= size;
  316. if(!is_stack_addr((void *)sp, size))
  317. {
  318. g_enclave_state = ENCLAVE_CRASHED;
  319. return SGX_ERROR_STACK_OVERRUN;
  320. }
  321. // sp is within limit_addr and commit_addr, currently only SGX 2.0 under hardware mode will enter this branch.^M
  322. if((size_t)sp < thread_data->stack_commit_addr)
  323. {
  324. int ret = -1;
  325. size_t page_aligned_delta = 0;
  326. /* try to allocate memory dynamically */
  327. page_aligned_delta = ROUND_TO(thread_data->stack_commit_addr - (size_t)sp, SE_PAGE_SIZE);
  328. if ((thread_data->stack_commit_addr > page_aligned_delta)
  329. && ((thread_data->stack_commit_addr - page_aligned_delta) >= thread_data->stack_limit_addr))
  330. {
  331. ret = expand_stack_by_pages((void *)(thread_data->stack_commit_addr - page_aligned_delta), (page_aligned_delta >> SE_PAGE_SHIFT));
  332. }
  333. if (ret == 0)
  334. {
  335. thread_data->stack_commit_addr -= page_aligned_delta;
  336. return SGX_SUCCESS;
  337. }
  338. else
  339. {
  340. g_enclave_state = ENCLAVE_CRASHED;
  341. return SGX_ERROR_STACK_OVERRUN;
  342. }
  343. }
  344. if(ssa_gpr->exit_info.valid != 1)
  345. { // exception handlers are not allowed to call in a non-exception state
  346. goto default_handler;
  347. }
  348. // initialize the info with SSA[0]
  349. info->exception_vector = (sgx_exception_vector_t)ssa_gpr->exit_info.vector;
  350. info->exception_type = (sgx_exception_type_t)ssa_gpr->exit_info.exit_type;
  351. info->cpu_context.REG(ax) = ssa_gpr->REG(ax);
  352. info->cpu_context.REG(cx) = ssa_gpr->REG(cx);
  353. info->cpu_context.REG(dx) = ssa_gpr->REG(dx);
  354. info->cpu_context.REG(bx) = ssa_gpr->REG(bx);
  355. info->cpu_context.REG(sp) = ssa_gpr->REG(sp);
  356. info->cpu_context.REG(bp) = ssa_gpr->REG(bp);
  357. info->cpu_context.REG(si) = ssa_gpr->REG(si);
  358. info->cpu_context.REG(di) = ssa_gpr->REG(di);
  359. info->cpu_context.REG(flags) = ssa_gpr->REG(flags);
  360. info->cpu_context.REG(ip) = ssa_gpr->REG(ip);
  361. #ifdef SE_64
  362. info->cpu_context.r8 = ssa_gpr->r8;
  363. info->cpu_context.r9 = ssa_gpr->r9;
  364. info->cpu_context.r10 = ssa_gpr->r10;
  365. info->cpu_context.r11 = ssa_gpr->r11;
  366. info->cpu_context.r12 = ssa_gpr->r12;
  367. info->cpu_context.r13 = ssa_gpr->r13;
  368. info->cpu_context.r14 = ssa_gpr->r14;
  369. info->cpu_context.r15 = ssa_gpr->r15;
  370. #endif
  371. new_sp = (uintptr_t *)sp;
  372. ssa_gpr->REG(ip) = (size_t)internal_handle_exception; // prepare the ip for 2nd phrase handling
  373. ssa_gpr->REG(sp) = (size_t)new_sp; // new stack for internal_handle_exception
  374. ssa_gpr->REG(ax) = (size_t)info; // 1st parameter (info) for LINUX32
  375. ssa_gpr->REG(di) = (size_t)info; // 1st parameter (info) for LINUX64, LINUX32 also uses it while restoring the context
  376. *new_sp = info->cpu_context.REG(ip); // for debugger to get call trace
  377. //mark valid to 0 to prevent eenter again
  378. ssa_gpr->exit_info.valid = 0;
  379. return SGX_SUCCESS;
  380. default_handler:
  381. g_enclave_state = ENCLAVE_CRASHED;
  382. return SGX_ERROR_ENCLAVE_CRASHED;
  383. }