sgx_thread.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. #include "assert.h"
  2. #include "pal_internal.h"
  3. #include "sgx_internal.h"
  4. #include "pal_security.h"
  5. #include <linux/futex.h>
  6. #include <linux/signal.h>
  7. #include <asm/errno.h>
  8. #include <asm/signal.h>
  9. #include <asm/prctl.h>
  10. #include "sgx_enclave.h"
  11. #include "debugger/sgx_gdb.h"
  12. struct thread_map {
  13. unsigned int tid;
  14. sgx_arch_tcs_t * tcs;
  15. };
  16. static sgx_arch_tcs_t * enclave_tcs;
  17. static int enclave_thread_num;
  18. static struct thread_map * enclave_thread_map;
  19. static void spin_lock(struct atomic_int* p) {
  20. while (atomic_cmpxchg(p, 0, 1)) {
  21. while (atomic_read(p) == 1)
  22. CPU_RELAX();
  23. }
  24. }
  25. static void spin_unlock(struct atomic_int* p) {
  26. atomic_set(p, 0);
  27. }
  28. static struct atomic_int tcs_lock = ATOMIC_INIT(0);
  29. void create_tcs_mapper (void * tcs_base, unsigned int thread_num)
  30. {
  31. size_t thread_map_size = ALIGN_UP_POW2(sizeof(struct thread_map) * thread_num, PRESET_PAGESIZE);
  32. enclave_tcs = tcs_base;
  33. enclave_thread_num = thread_num;
  34. enclave_thread_map = (struct thread_map*)INLINE_SYSCALL(mmap, 6, NULL, thread_map_size,
  35. PROT_READ | PROT_WRITE,
  36. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  37. for (uint32_t i = 0 ; i < thread_num ; i++) {
  38. enclave_thread_map[i].tid = 0;
  39. enclave_thread_map[i].tcs = &enclave_tcs[i];
  40. }
  41. }
  42. void map_tcs(unsigned int tid) {
  43. spin_lock(&tcs_lock);
  44. for (int i = 0 ; i < enclave_thread_num ; i++)
  45. if (!enclave_thread_map[i].tid) {
  46. enclave_thread_map[i].tid = tid;
  47. get_tcb_linux()->tcs = enclave_thread_map[i].tcs;
  48. ((struct enclave_dbginfo *) DBGINFO_ADDR)->thread_tids[i] = tid;
  49. break;
  50. }
  51. spin_unlock(&tcs_lock);
  52. }
  53. void unmap_tcs(void) {
  54. int index = get_tcb_linux()->tcs - enclave_tcs;
  55. struct thread_map * map = &enclave_thread_map[index];
  56. assert(index < enclave_thread_num);
  57. spin_lock(&tcs_lock);
  58. get_tcb_linux()->tcs = NULL;
  59. ((struct enclave_dbginfo *) DBGINFO_ADDR)->thread_tids[index] = 0;
  60. map->tid = 0;
  61. spin_unlock(&tcs_lock);
  62. }
  63. /*
  64. * pal_thread_init(): An initialization wrapper of a newly-created thread (including
  65. * the first thread). This function accepts a TCB pointer to be set to the GS register
  66. * of the thread. The rest of the TCB is used as the alternative stack for signal
  67. * handling. Notice that this sets up the untrusted thread -- an enclave thread is set
  68. * up by other means (e.g., the GS register is set by an SGX-enforced TCS.OGSBASGX).
  69. */
  70. int pal_thread_init(void* tcbptr) {
  71. PAL_TCB_LINUX* tcb = tcbptr;
  72. int ret;
  73. /* set GS reg of this thread to thread's TCB; after this point, can use get_tcb_linux() */
  74. ret = INLINE_SYSCALL(arch_prctl, 2, ARCH_SET_GS, tcb);
  75. if (IS_ERR(ret)) {
  76. ret = -EPERM;
  77. goto out;
  78. }
  79. if (tcb->alt_stack) {
  80. /* align stack to 16 bytes */
  81. void* alt_stack = ALIGN_DOWN_PTR(tcb, 16);
  82. assert(alt_stack > tcb->alt_stack);
  83. stack_t ss;
  84. ss.ss_sp = alt_stack;
  85. ss.ss_flags = 0;
  86. ss.ss_size = alt_stack - tcb->alt_stack;
  87. ret = INLINE_SYSCALL(sigaltstack, 2, &ss, NULL);
  88. if (IS_ERR(ret)) {
  89. ret = -EPERM;
  90. goto out;
  91. }
  92. }
  93. int tid = INLINE_SYSCALL(gettid, 0);
  94. map_tcs(tid); /* updates tcb->tcs */
  95. if (!tcb->tcs) {
  96. SGX_DBG(DBG_E,
  97. "There are no available TCS pages left for a new thread!\n"
  98. "Please try to increase sgx.thread_num in the manifest.\n"
  99. "The current value is %d\n", enclave_thread_num);
  100. ret = -ENOMEM;
  101. goto out;
  102. }
  103. if (!tcb->stack) {
  104. /* only first thread doesn't have a stack (it uses the one provided by Linux); first
  105. * thread calls ecall_enclave_start() instead of ecall_thread_start() so just exit */
  106. return 0;
  107. }
  108. /* not-first (child) thread, start it */
  109. ecall_thread_start();
  110. unmap_tcs();
  111. ret = 0;
  112. out:
  113. INLINE_SYSCALL(munmap, 2, tcb->stack, THREAD_STACK_SIZE + ALT_STACK_SIZE);
  114. return ret;
  115. }
  116. noreturn void thread_exit(int status) {
  117. PAL_TCB_LINUX* tcb = get_tcb_linux();
  118. /* technically, async signals were already blocked before calling this function
  119. * (by sgx_ocall_exit()) but we keep it here for future proof */
  120. block_async_signals(true);
  121. if (tcb->alt_stack) {
  122. stack_t ss;
  123. ss.ss_sp = NULL;
  124. ss.ss_flags = SS_DISABLE;
  125. ss.ss_size = 0;
  126. /* take precautions to unset the TCB and alternative stack first */
  127. INLINE_SYSCALL(arch_prctl, 2, ARCH_SET_GS, 0);
  128. INLINE_SYSCALL(sigaltstack, 2, &ss, NULL);
  129. }
  130. /* free the thread stack (via munmap) and exit; note that exit() needs a "status" arg
  131. * but it could be allocated on a stack, so we must put it in register and do asm */
  132. __asm__ volatile("syscall \n\t" /* all args are already prepared, call munmap */
  133. "movq %%rdx, %%rax \n\t" /* prepare for exit: rax = __NR_exit */
  134. "movq %%rbx, %%rdi \n\t" /* prepare for exit: rdi = status */
  135. "syscall \n\t" /* all args are prepared, call exit */
  136. : /* no output regs since we don't return from exit */
  137. : "a"(__NR_munmap), "D"(tcb->stack), "S"(THREAD_STACK_SIZE + ALT_STACK_SIZE),
  138. "d"(__NR_exit), "b"(status)
  139. : "cc", "rcx", "r11", "memory" /* syscall instr clobbers cc, rcx, and r11 */
  140. );
  141. while (true) {
  142. /* nothing */
  143. }
  144. }
  145. int clone_thread(void) {
  146. int ret = 0;
  147. void* stack = (void*)INLINE_SYSCALL(mmap, 6, NULL, THREAD_STACK_SIZE + ALT_STACK_SIZE,
  148. PROT_READ | PROT_WRITE,
  149. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  150. if (IS_ERR_P(stack))
  151. return -ENOMEM;
  152. /* Stack layout for the new thread looks like this (recall that stacks grow towards lower
  153. * addresses on Linux on x86-64):
  154. *
  155. * stack +--> +-------------------+
  156. * | child stack | THREAD_STACK_SIZE
  157. * child_stack +--> +-------------------+
  158. * | alternate stack | ALT_STACK_SIZE - sizeof(PAL_TCB_LINUX)
  159. * tcb +--> +-------------------+
  160. * | PAL TCB | sizeof(PAL_TCB_LINUX)
  161. * +-------------------+
  162. *
  163. * Note that this whole memory region is zeroed out because we use mmap(). */
  164. void* child_stack_top = stack + THREAD_STACK_SIZE;
  165. /* initialize TCB at the top of the alternative stack */
  166. PAL_TCB_LINUX* tcb = child_stack_top + ALT_STACK_SIZE - sizeof(PAL_TCB_LINUX);
  167. tcb->common.self = &tcb->common;
  168. tcb->alt_stack = child_stack_top;
  169. tcb->stack = stack;
  170. tcb->tcs = NULL; /* initialized by child thread */
  171. /* align child_stack to 16 */
  172. child_stack_top = ALIGN_DOWN_PTR(child_stack_top, 16);
  173. int dummy_parent_tid_field = 0;
  174. ret = clone(pal_thread_init, child_stack_top,
  175. CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SYSVSEM|
  176. CLONE_THREAD|CLONE_SIGHAND|CLONE_PTRACE|
  177. CLONE_PARENT_SETTID,
  178. (void*) tcb,
  179. &dummy_parent_tid_field, NULL);
  180. if (IS_ERR(ret)) {
  181. INLINE_SYSCALL(munmap, 2, stack, THREAD_STACK_SIZE + ALT_STACK_SIZE);
  182. return -ERRNO(ret);
  183. }
  184. return 0;
  185. }
  186. int interrupt_thread (void * tcs)
  187. {
  188. int index = (sgx_arch_tcs_t *) tcs - enclave_tcs;
  189. struct thread_map * map = &enclave_thread_map[index];
  190. if (index >= enclave_thread_num)
  191. return -EINVAL;
  192. if (!map->tid)
  193. return -EINVAL;
  194. INLINE_SYSCALL(tgkill, 3, pal_enclave.pal_sec.pid, map->tid, SIGCONT);
  195. return 0;
  196. }