sgx_main.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. #include <pal_linux.h>
  2. #include <pal_linux_error.h>
  3. #include <pal_rtld.h>
  4. #include <hex.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <ctype.h>
  16. #include <sysdep.h>
  17. #include <sysdeps/generic/ldsodefs.h>
  18. size_t g_page_size = PRESET_PAGESIZE;
  19. struct pal_enclave pal_enclave;
  20. static inline
  21. char * alloc_concat(const char * p, size_t plen,
  22. const char * s, size_t slen)
  23. {
  24. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  25. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  26. char * buf = malloc(plen + slen + 1);
  27. if (!buf)
  28. return NULL;
  29. if (plen)
  30. memcpy(buf, p, plen);
  31. if (slen)
  32. memcpy(buf + plen, s, slen);
  33. buf[plen + slen] = '\0';
  34. return buf;
  35. }
  36. static unsigned long parse_int (const char * str)
  37. {
  38. unsigned long num = 0;
  39. int radix = 10;
  40. char c;
  41. if (str[0] == '0') {
  42. str++;
  43. radix = 8;
  44. if (str[0] == 'x') {
  45. str++;
  46. radix = 16;
  47. }
  48. }
  49. while ((c = *(str++))) {
  50. int8_t val = hex2dec(c);
  51. if (val < 0)
  52. break;
  53. if ((uint8_t) val >= radix)
  54. break;
  55. num = num * radix + (uint8_t) val;
  56. }
  57. if (c == 'G' || c == 'g')
  58. num *= 1024 * 1024 * 1024;
  59. else if (c == 'M' || c == 'm')
  60. num *= 1024 * 1024;
  61. else if (c == 'K' || c == 'k')
  62. num *= 1024;
  63. return num;
  64. }
  65. static char * resolve_uri (const char * uri, const char ** errstring)
  66. {
  67. if (!strstartswith_static(uri, URI_PREFIX_FILE)) {
  68. *errstring = "Invalid URI";
  69. return NULL;
  70. }
  71. char path_buf[URI_MAX];
  72. size_t len = URI_MAX;
  73. int ret = get_norm_path(uri + 5, path_buf, &len);
  74. if (ret < 0) {
  75. *errstring = "Invalid URI";
  76. return NULL;
  77. }
  78. return alloc_concat(URI_PREFIX_FILE, URI_PREFIX_FILE_LEN, path_buf, len);
  79. }
  80. static
  81. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  82. unsigned long * entry)
  83. {
  84. int ret = 0;
  85. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  86. return -ERRNO(ret);
  87. char filebuf[FILEBUF_SIZE];
  88. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  89. if (IS_ERR(ret))
  90. return -ERRNO(ret);
  91. if ((size_t)ret < sizeof(ElfW(Ehdr)))
  92. return -ENOEXEC;
  93. const ElfW(Ehdr) * header = (void *) filebuf;
  94. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  95. const ElfW(Phdr) * ph;
  96. if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0)
  97. return -ENOEXEC;
  98. struct loadcmd {
  99. ElfW(Addr) mapstart, mapend;
  100. } loadcmds[16], *c;
  101. int nloadcmds = 0;
  102. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  103. if (ph->p_type == PT_LOAD) {
  104. if (nloadcmds == 16)
  105. return -EINVAL;
  106. c = &loadcmds[nloadcmds++];
  107. c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
  108. c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_memsz);
  109. }
  110. *base = loadcmds[0].mapstart;
  111. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  112. if (entry)
  113. *entry = header->e_entry;
  114. return 0;
  115. }
  116. static
  117. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  118. unsigned long base, unsigned long prot)
  119. {
  120. int ret = 0;
  121. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  122. return -ERRNO(ret);
  123. char filebuf[FILEBUF_SIZE];
  124. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  125. if (IS_ERR(ret))
  126. return -ERRNO(ret);
  127. const ElfW(Ehdr) * header = (void *) filebuf;
  128. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  129. const ElfW(Phdr) * ph;
  130. struct loadcmd {
  131. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  132. unsigned int mapoff;
  133. int prot;
  134. } loadcmds[16], *c;
  135. int nloadcmds = 0;
  136. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  137. if (ph->p_type == PT_LOAD) {
  138. if (nloadcmds == 16)
  139. return -EINVAL;
  140. c = &loadcmds[nloadcmds++];
  141. c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
  142. c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
  143. c->datastart = ph->p_vaddr;
  144. c->dataend = ph->p_vaddr + ph->p_filesz;
  145. c->allocend = ph->p_vaddr + ph->p_memsz;
  146. c->mapoff = ALLOC_ALIGN_DOWN(ph->p_offset);
  147. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  148. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  149. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  150. }
  151. base -= loadcmds[0].mapstart;
  152. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  153. ElfW(Addr) zero = c->dataend;
  154. ElfW(Addr) zeroend = ALLOC_ALIGN_UP(c->allocend);
  155. ElfW(Addr) zeropage = ALLOC_ALIGN_UP(zero);
  156. if (zeroend < zeropage)
  157. zeropage = zeroend;
  158. if (c->mapend > c->mapstart) {
  159. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  160. c->mapend - c->mapstart,
  161. PROT_READ|PROT_WRITE,
  162. MAP_PRIVATE | MAP_FILE,
  163. fd, c->mapoff);
  164. if (IS_ERR_P(addr))
  165. return -ERRNO_P(addr);
  166. if (c->datastart > c->mapstart)
  167. memset(addr, 0, c->datastart - c->mapstart);
  168. if (zeropage > zero)
  169. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  170. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  171. c->mapend - c->mapstart,
  172. SGX_PAGE_REG, c->prot, 0,
  173. (c->prot & PROT_EXEC) ? "code" : "data");
  174. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  175. if (ret < 0)
  176. return ret;
  177. }
  178. if (zeroend > zeropage) {
  179. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  180. zeroend - zeropage,
  181. SGX_PAGE_REG, c->prot, false, "bss");
  182. if (ret < 0)
  183. return ret;
  184. }
  185. }
  186. return 0;
  187. }
  188. int initialize_enclave (struct pal_enclave * enclave)
  189. {
  190. int ret = 0;
  191. int enclave_image = -1;
  192. char* enclave_uri = NULL;
  193. sgx_arch_token_t enclave_token;
  194. sgx_arch_enclave_css_t enclave_sigstruct;
  195. sgx_arch_secs_t enclave_secs;
  196. unsigned long enclave_entry_addr;
  197. unsigned long heap_min = DEFAULT_HEAP_MIN;
  198. /* this array may overflow the stack, so we allocate it in BSS */
  199. static void* tcs_addrs[MAX_DBG_THREADS];
  200. char cfgbuf[CONFIG_MAX];
  201. const char* errstring = "out of memory";
  202. /* Use sgx.enclave_pal_file from manifest if exists */
  203. if (get_config(enclave->config, "sgx.enclave_pal_file", cfgbuf, sizeof(cfgbuf)) > 0) {
  204. enclave_uri = resolve_uri(cfgbuf, &errstring);
  205. } else {
  206. enclave_uri = alloc_concat(URI_PREFIX_FILE, URI_PREFIX_FILE_LEN, ENCLAVE_PAL_FILENAME, -1);
  207. }
  208. if (!enclave_uri) {
  209. SGX_DBG(DBG_E,
  210. "Cannot open in-enclave PAL: %s (incorrect sgx.enclave_pal_file in manifest?)\n",
  211. errstring);
  212. ret = -EINVAL;
  213. goto out;
  214. }
  215. enclave_image = INLINE_SYSCALL(open, 3, enclave_uri + URI_PREFIX_FILE_LEN, O_RDONLY, 0);
  216. if (IS_ERR(enclave_image)) {
  217. SGX_DBG(DBG_E, "Cannot find enclave image: %s\n", enclave_uri);
  218. ret = -ERRNO(enclave_image);
  219. goto out;
  220. }
  221. /* Reading sgx.enclave_size from manifest */
  222. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, sizeof(cfgbuf)) <= 0) {
  223. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  224. ret = -EINVAL;
  225. goto out;
  226. }
  227. enclave->size = parse_int(cfgbuf);
  228. if (!enclave->size || !IS_POWER_OF_2(enclave->size)) {
  229. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  230. ret = -EINVAL;
  231. goto out;
  232. }
  233. /* Reading sgx.thread_num from manifest */
  234. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, sizeof(cfgbuf)) > 0) {
  235. enclave->thread_num = parse_int(cfgbuf);
  236. if (enclave->thread_num > MAX_DBG_THREADS) {
  237. SGX_DBG(DBG_E, "Too many threads to debug\n");
  238. ret = -EINVAL;
  239. goto out;
  240. }
  241. } else {
  242. enclave->thread_num = 1;
  243. }
  244. if (get_config(enclave->config, "sgx.static_address", cfgbuf, sizeof(cfgbuf)) > 0 && cfgbuf[0] == '1') {
  245. enclave->baseaddr = ALIGN_DOWN_POW2(heap_min, enclave->size);
  246. } else {
  247. enclave->baseaddr = ENCLAVE_HIGH_ADDRESS;
  248. heap_min = 0;
  249. }
  250. ret = read_enclave_token(enclave->token, &enclave_token);
  251. if (ret < 0) {
  252. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  253. goto out;
  254. }
  255. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  256. if (ret < 0) {
  257. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  258. goto out;
  259. }
  260. memset(&enclave_secs, 0, sizeof(enclave_secs));
  261. enclave_secs.base = enclave->baseaddr;
  262. enclave_secs.size = enclave->size;
  263. ret = create_enclave(&enclave_secs, &enclave_token);
  264. if (ret < 0) {
  265. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  266. goto out;
  267. }
  268. enclave->ssaframesize = enclave_secs.ssa_frame_size * g_page_size;
  269. struct stat stat;
  270. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  271. if (IS_ERR(ret)) {
  272. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  273. ret = -ERRNO(ret);
  274. goto out;
  275. }
  276. int manifest_size = stat.st_size;
  277. /* Start populating enclave memory */
  278. struct mem_area {
  279. const char * desc;
  280. bool skip_eextend;
  281. int fd;
  282. bool is_binary; /* only meaningful if fd != -1 */
  283. unsigned long addr, size, prot;
  284. enum sgx_page_type type;
  285. };
  286. struct mem_area * areas =
  287. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  288. int area_num = 0;
  289. /* The manifest needs to be allocated at the upper end of the enclave
  290. * memory. That's used by pal_linux_main to find the manifest area. So add
  291. * it first to the list with memory areas. */
  292. areas[area_num] = (struct mem_area) {
  293. .desc = "manifest", .skip_eextend = false, .fd = enclave->manifest,
  294. .is_binary = false, .addr = 0, .size = ALLOC_ALIGN_UP(manifest_size),
  295. .prot = PROT_READ, .type = SGX_PAGE_REG
  296. };
  297. area_num++;
  298. areas[area_num] = (struct mem_area) {
  299. .desc = "ssa", .skip_eextend = false, .fd = -1,
  300. .is_binary = false, .addr = 0,
  301. .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  302. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  303. };
  304. struct mem_area* ssa_area = &areas[area_num++];
  305. areas[area_num] = (struct mem_area) {
  306. .desc = "tcs", .skip_eextend = false, .fd = -1,
  307. .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
  308. .prot = 0, .type = SGX_PAGE_TCS
  309. };
  310. struct mem_area* tcs_area = &areas[area_num++];
  311. areas[area_num] = (struct mem_area) {
  312. .desc = "tls", .skip_eextend = false, .fd = -1,
  313. .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
  314. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  315. };
  316. struct mem_area* tls_area = &areas[area_num++];
  317. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  318. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  319. areas[area_num] = (struct mem_area) {
  320. .desc = "stack", .skip_eextend = false, .fd = -1,
  321. .is_binary = false, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  322. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  323. };
  324. area_num++;
  325. }
  326. areas[area_num] = (struct mem_area) {
  327. .desc = "pal", .skip_eextend = false, .fd = enclave_image,
  328. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  329. .prot = 0, .type = SGX_PAGE_REG
  330. };
  331. struct mem_area* pal_area = &areas[area_num++];
  332. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  333. if (ret < 0) {
  334. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", enclave_uri, -ret);
  335. goto out;
  336. }
  337. struct mem_area* exec_area = NULL;
  338. if (enclave->exec != -1) {
  339. areas[area_num] = (struct mem_area) {
  340. .desc = "exec", .skip_eextend = false, .fd = enclave->exec,
  341. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  342. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  343. };
  344. exec_area = &areas[area_num++];
  345. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  346. if (ret < 0) {
  347. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  348. goto out;
  349. }
  350. }
  351. unsigned long populating = enclave->size;
  352. for (int i = 0 ; i < area_num ; i++) {
  353. if (areas[i].addr)
  354. continue;
  355. areas[i].addr = populating - areas[i].size;
  356. populating = SATURATED_P_SUB(areas[i].addr, MEMORY_GAP, 0);
  357. }
  358. enclave_entry_addr += pal_area->addr;
  359. if (exec_area) {
  360. if (exec_area->addr + exec_area->size > pal_area->addr - MEMORY_GAP) {
  361. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  362. ret = -EINVAL;
  363. goto out;
  364. }
  365. if (exec_area->addr + exec_area->size + MEMORY_GAP < populating) {
  366. if (populating > heap_min) {
  367. unsigned long addr = exec_area->addr + exec_area->size + MEMORY_GAP;
  368. if (addr < heap_min)
  369. addr = heap_min;
  370. areas[area_num] = (struct mem_area) {
  371. .desc = "free", .skip_eextend = true, .fd = -1,
  372. .is_binary = false, .addr = addr, .size = populating - addr,
  373. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  374. };
  375. area_num++;
  376. }
  377. populating = SATURATED_P_SUB(exec_area->addr, MEMORY_GAP, 0);
  378. }
  379. }
  380. if (populating > heap_min) {
  381. areas[area_num] = (struct mem_area) {
  382. .desc = "free", .skip_eextend = true, .fd = -1,
  383. .is_binary = false, .addr = heap_min, .size = populating - heap_min,
  384. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  385. };
  386. area_num++;
  387. }
  388. for (int i = 0 ; i < area_num ; i++) {
  389. if (areas[i].fd != -1 && areas[i].is_binary) {
  390. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  391. if (ret < 0) {
  392. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  393. goto out;
  394. }
  395. continue;
  396. }
  397. void * data = NULL;
  398. if (!strcmp_static(areas[i].desc, "tls")) {
  399. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  400. PROT_READ|PROT_WRITE,
  401. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  402. if (IS_ERR_P(data) || data == NULL) {
  403. /* Note that Graphene currently doesn't handle 0x0 addresses */
  404. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  405. goto out;
  406. }
  407. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  408. struct enclave_tls * gs = data + g_page_size * t;
  409. memset(gs, 0, g_page_size);
  410. assert(sizeof(*gs) <= g_page_size);
  411. gs->common.self = (PAL_TCB *)(
  412. tls_area->addr + g_page_size * t + enclave_secs.base);
  413. gs->enclave_size = enclave->size;
  414. gs->tcs_offset = tcs_area->addr + g_page_size * t;
  415. gs->initial_stack_offset =
  416. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  417. gs->ssa = (void *) ssa_area->addr +
  418. enclave->ssaframesize * SSAFRAMENUM * t +
  419. enclave_secs.base;
  420. gs->gpr = gs->ssa +
  421. enclave->ssaframesize - sizeof(sgx_pal_gpr_t);
  422. gs->manifest_size = manifest_size;
  423. gs->heap_min = (void *) enclave_secs.base + heap_min;
  424. gs->heap_max = (void *) enclave_secs.base + pal_area->addr - MEMORY_GAP;
  425. if (exec_area) {
  426. gs->exec_addr = (void *) enclave_secs.base + exec_area->addr;
  427. gs->exec_size = exec_area->size;
  428. }
  429. gs->thread = NULL;
  430. }
  431. } else if (!strcmp_static(areas[i].desc, "tcs")) {
  432. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  433. PROT_READ|PROT_WRITE,
  434. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  435. if (IS_ERR_P(data) || data == NULL) {
  436. /* Note that Graphene currently doesn't handle 0x0 addresses */
  437. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  438. goto out;
  439. }
  440. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  441. sgx_arch_tcs_t * tcs = data + g_page_size * t;
  442. memset(tcs, 0, g_page_size);
  443. tcs->ossa = ssa_area->addr +
  444. enclave->ssaframesize * SSAFRAMENUM * t;
  445. tcs->nssa = SSAFRAMENUM;
  446. tcs->oentry = enclave_entry_addr;
  447. tcs->ofs_base = 0;
  448. tcs->ogs_base = tls_area->addr + t * g_page_size;
  449. tcs->ofs_limit = 0xfff;
  450. tcs->ogs_limit = 0xfff;
  451. tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + g_page_size * t;
  452. }
  453. } else if (areas[i].fd != -1) {
  454. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  455. PROT_READ,
  456. MAP_FILE|MAP_PRIVATE,
  457. areas[i].fd, 0);
  458. if (IS_ERR_P(data) || data == NULL) {
  459. /* Note that Graphene currently doesn't handle 0x0 addresses */
  460. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  461. goto out;
  462. }
  463. }
  464. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  465. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  466. if (data)
  467. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  468. if (ret < 0) {
  469. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  470. goto out;
  471. }
  472. }
  473. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  474. if (ret < 0) {
  475. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  476. goto out;
  477. }
  478. create_tcs_mapper((void *) enclave_secs.base + tcs_area->addr, enclave->thread_num);
  479. struct enclave_dbginfo * dbg = (void *)
  480. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  481. sizeof(struct enclave_dbginfo),
  482. PROT_READ|PROT_WRITE,
  483. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  484. -1, 0);
  485. if (IS_ERR_P(dbg)) {
  486. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  487. } else {
  488. dbg->pid = INLINE_SYSCALL(getpid, 0);
  489. dbg->base = enclave->baseaddr;
  490. dbg->size = enclave->size;
  491. dbg->ssaframesize = enclave->ssaframesize;
  492. dbg->aep = async_exit_pointer;
  493. dbg->thread_tids[0] = dbg->pid;
  494. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  495. dbg->tcs_addrs[i] = tcs_addrs[i];
  496. }
  497. ret = 0;
  498. out:
  499. if (enclave_image >= 0)
  500. INLINE_SYSCALL(close, 1, enclave_image);
  501. free(enclave_uri);
  502. return ret;
  503. }
  504. static unsigned long randval = 0;
  505. void getrand (void * buffer, size_t size)
  506. {
  507. size_t bytes = 0;
  508. while (bytes + sizeof(uint64_t) <= size) {
  509. *(uint64_t*) (buffer + bytes) = randval;
  510. randval = hash64(randval);
  511. bytes += sizeof(uint64_t);
  512. }
  513. if (bytes < size) {
  514. memcpy(buffer + bytes, &randval, size - bytes);
  515. randval = hash64(randval);
  516. }
  517. }
  518. static void create_instance (struct pal_sec * pal_sec)
  519. {
  520. PAL_NUM id;
  521. getrand(&id, sizeof(id));
  522. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix), "/graphene/%016lx/", id);
  523. pal_sec->instance_id = id;
  524. }
  525. static int load_manifest (int fd, struct config_store ** config_ptr)
  526. {
  527. int ret = 0;
  528. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  529. if (IS_ERR(nbytes)) {
  530. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  531. return -ERRNO(nbytes);
  532. }
  533. struct config_store * config = malloc(sizeof(struct config_store));
  534. if (!config) {
  535. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  536. return -ENOMEM;
  537. }
  538. void * config_raw = (void *)
  539. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  540. if (IS_ERR_P(config_raw)) {
  541. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  542. ret = -ERRNO_P(config_raw);
  543. goto out;
  544. }
  545. config->raw_data = config_raw;
  546. config->raw_size = nbytes;
  547. config->malloc = malloc;
  548. config->free = NULL;
  549. const char * errstring = NULL;
  550. ret = read_config(config, NULL, &errstring);
  551. if (ret < 0) {
  552. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  553. goto out;
  554. }
  555. *config_ptr = config;
  556. ret = 0;
  557. out:
  558. if (ret < 0) {
  559. free(config);
  560. if (!IS_ERR_P(config_raw))
  561. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  562. }
  563. return ret;
  564. }
  565. /*
  566. * Returns the number of online CPUs read from /sys/devices/system/cpu/online, -errno on failure.
  567. * Understands complex formats like "1,3-5,6".
  568. */
  569. static int get_cpu_count(void) {
  570. int fd = INLINE_SYSCALL(open, 3, "/sys/devices/system/cpu/online", O_RDONLY|O_CLOEXEC, 0);
  571. if (fd < 0)
  572. return unix_to_pal_error(ERRNO(fd));
  573. char buf[64];
  574. int ret = INLINE_SYSCALL(read, 3, fd, buf, sizeof(buf) - 1);
  575. if (ret < 0) {
  576. INLINE_SYSCALL(close, 1, fd);
  577. return unix_to_pal_error(ERRNO(ret));
  578. }
  579. buf[ret] = '\0'; /* ensure null-terminated buf even in partial read */
  580. char* end;
  581. char* ptr = buf;
  582. int cpu_count = 0;
  583. while (*ptr) {
  584. while (*ptr == ' ' || *ptr == '\t' || *ptr == ',')
  585. ptr++;
  586. int firstint = (int)strtol(ptr, &end, 10);
  587. if (ptr == end)
  588. break;
  589. if (*end == '\0' || *end == ',') {
  590. /* single CPU index, count as one more CPU */
  591. cpu_count++;
  592. } else if (*end == '-') {
  593. /* CPU range, count how many CPUs in range */
  594. ptr = end + 1;
  595. int secondint = (int)strtol(ptr, &end, 10);
  596. if (secondint > firstint)
  597. cpu_count += secondint - firstint + 1; // inclusive (e.g., 0-7, or 8-16)
  598. }
  599. ptr = end;
  600. }
  601. INLINE_SYSCALL(close, 1, fd);
  602. if (cpu_count == 0)
  603. return -PAL_ERROR_STREAMNOTEXIST;
  604. return cpu_count;
  605. }
  606. static int load_enclave (struct pal_enclave * enclave,
  607. int manifest_fd,
  608. char * manifest_uri,
  609. char * exec_uri,
  610. char * args, size_t args_size,
  611. char * env, size_t env_size,
  612. bool exec_uri_inferred)
  613. {
  614. struct pal_sec * pal_sec = &enclave->pal_sec;
  615. int ret;
  616. struct timeval tv;
  617. #if PRINT_ENCLAVE_STAT == 1
  618. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  619. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  620. #endif
  621. ret = open_gsgx();
  622. if (ret < 0)
  623. return ret;
  624. if (!is_wrfsbase_supported())
  625. return -EPERM;
  626. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  627. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  628. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  629. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  630. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  631. int num_cpus = get_cpu_count();
  632. if (num_cpus < 0) {
  633. return num_cpus;
  634. }
  635. pal_sec->num_cpus = num_cpus;
  636. #ifdef DEBUG
  637. size_t env_i = 0;
  638. while (env_i < env_size) {
  639. if (!strcmp_static(&env[env_i], "IN_GDB=1")) {
  640. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  641. pal_sec->in_gdb = true;
  642. } else if (strstartswith_static(&env[env_i], "LD_PRELOAD=")) {
  643. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  644. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  645. env_size -= env_i_size;
  646. continue;
  647. }
  648. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  649. }
  650. #endif
  651. enclave->manifest = manifest_fd;
  652. ret = load_manifest(enclave->manifest, &enclave->config);
  653. if (ret < 0) {
  654. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  655. return -EINVAL;
  656. }
  657. char cfgbuf[CONFIG_MAX];
  658. const char * errstring;
  659. // A manifest can specify an executable with a different base name
  660. // than the manifest itself. Always give the exec field of the manifest
  661. // precedence if specified.
  662. if (get_config(enclave->config, "loader.exec", cfgbuf, sizeof(cfgbuf)) > 0) {
  663. exec_uri = resolve_uri(cfgbuf, &errstring);
  664. exec_uri_inferred = false;
  665. if (!exec_uri) {
  666. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  667. return -EINVAL;
  668. }
  669. }
  670. enclave->exec = INLINE_SYSCALL(open, 3, exec_uri + URI_PREFIX_FILE_LEN,
  671. O_RDONLY|O_CLOEXEC, 0);
  672. if (IS_ERR(enclave->exec)) {
  673. if (exec_uri_inferred) {
  674. // It is valid for an enclave not to have an executable.
  675. // We need to catch the case where we inferred the executable
  676. // from the manifest file name, but it doesn't exist, and let
  677. // the enclave go a bit further. Go ahead and warn the user,
  678. // though.
  679. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  680. "or may represent a manifest misconfiguration. This typically "
  681. "represents advanced usage, and if it is not what you intended, "
  682. "try setting the loader.exec field in the manifest.\n", exec_uri);
  683. enclave->exec = -1;
  684. } else {
  685. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  686. return -EINVAL;
  687. }
  688. }
  689. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, sizeof(cfgbuf)) < 0) {
  690. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  691. return -EINVAL;
  692. }
  693. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  694. if (!sig_uri) {
  695. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  696. return -EINVAL;
  697. }
  698. if (!strendswith(sig_uri, ".sig")) {
  699. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  700. free(sig_uri);
  701. return -EINVAL;
  702. }
  703. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + URI_PREFIX_FILE_LEN,
  704. O_RDONLY|O_CLOEXEC, 0);
  705. if (IS_ERR(enclave->sigfile)) {
  706. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  707. free(sig_uri);
  708. return -EINVAL;
  709. }
  710. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - static_strlen(".sig"), ".token", -1);
  711. free(sig_uri);
  712. if (!token_uri) {
  713. INLINE_SYSCALL(close, 1, enclave->sigfile);
  714. return -ENOMEM;
  715. }
  716. enclave->token = INLINE_SYSCALL(open, 3, token_uri + URI_PREFIX_FILE_LEN,
  717. O_RDONLY|O_CLOEXEC, 0);
  718. if (IS_ERR(enclave->token)) {
  719. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  720. PAL_FILE("pal-sgx-get-token")
  721. "\' on the runtime host or run \'make SGX=1 sgx-tokens\' "
  722. "in the Graphene source to create the token file.\n",
  723. token_uri);
  724. free(token_uri);
  725. return -EINVAL;
  726. }
  727. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  728. free(token_uri);
  729. ret = initialize_enclave(enclave);
  730. if (ret < 0)
  731. return ret;
  732. if (!pal_sec->instance_id)
  733. create_instance(&enclave->pal_sec);
  734. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  735. if (enclave->exec == -1) {
  736. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  737. } else {
  738. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  739. }
  740. ret = sgx_signal_setup();
  741. if (ret < 0)
  742. return ret;
  743. if (get_config(enclave->config, "sgx.ra_client_key", cfgbuf, sizeof(cfgbuf)) > 0) {
  744. /* initialize communication with AESM enclave only if app requests remote attestation */
  745. ret = init_aesm_targetinfo(&pal_sec->aesm_targetinfo);
  746. if (ret < 0)
  747. return ret;
  748. }
  749. void* alt_stack = (void*)INLINE_SYSCALL(mmap, 6, NULL, ALT_STACK_SIZE,
  750. PROT_READ | PROT_WRITE,
  751. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  752. if (IS_ERR_P(alt_stack))
  753. return -ENOMEM;
  754. /* initialize TCB at the top of the alternative stack */
  755. PAL_TCB_URTS* tcb = alt_stack + ALT_STACK_SIZE - sizeof(PAL_TCB_URTS);
  756. pal_tcb_urts_init(
  757. tcb, /*stack=*/NULL, alt_stack); /* main thread uses the stack provided by Linux */
  758. pal_thread_init(tcb);
  759. /* start running trusted PAL */
  760. ecall_enclave_start(args, args_size, env, env_size);
  761. #if PRINT_ENCLAVE_STAT == 1
  762. PAL_NUM exit_time = 0;
  763. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  764. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  765. #endif
  766. unmap_tcs();
  767. INLINE_SYSCALL(munmap, 2, alt_stack, ALT_STACK_SIZE);
  768. INLINE_SYSCALL(exit, 0);
  769. return 0;
  770. }
  771. /* Grow stack of main thread to THREAD_STACK_SIZE by allocating a large dummy array and probing
  772. * each stack page (Linux dynamically grows the stack of the main thread but gets confused with
  773. * huge-jump stack accesses coming from within the enclave). Note that other, non-main threads
  774. * are created manually via clone(.., THREAD_STACK_SIZE, ..) and thus do not need this hack. */
  775. static void __attribute__ ((noinline)) force_linux_to_grow_stack() {
  776. char dummy[THREAD_STACK_SIZE];
  777. for (uint64_t i = 0; i < sizeof(dummy); i += PRESET_PAGESIZE) {
  778. /* touch each page on the stack just to make it is not optimized away */
  779. __asm__ volatile("movq %0, %%rbx\r\n"
  780. "movq (%%rbx), %%rbx\r\n"
  781. : : "r"(&dummy[i]) : "%rbx");
  782. }
  783. }
  784. int main (int argc, char ** argv, char ** envp)
  785. {
  786. char * manifest_uri = NULL;
  787. char * exec_uri = NULL;
  788. const char * pal_loader = argv[0];
  789. int fd = -1;
  790. int ret = 0;
  791. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  792. // inferred from the manifest name somewhat
  793. // differently
  794. force_linux_to_grow_stack();
  795. argc--;
  796. argv++;
  797. int is_child = sgx_init_child_process(&pal_enclave.pal_sec);
  798. if (is_child < 0) {
  799. ret = is_child;
  800. goto out;
  801. }
  802. if (!is_child) {
  803. /* occupy PROC_INIT_FD so no one will use it */
  804. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  805. if (!argc)
  806. goto usage;
  807. if (!strcmp_static(argv[0], URI_PREFIX_FILE)) {
  808. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  809. } else {
  810. exec_uri = alloc_concat(URI_PREFIX_FILE, -1, argv[0], -1);
  811. }
  812. } else {
  813. exec_uri = alloc_concat(pal_enclave.pal_sec.exec_name, -1, NULL, -1);
  814. }
  815. if (!exec_uri) {
  816. ret = -ENOMEM;
  817. goto out;
  818. }
  819. fd = INLINE_SYSCALL(open, 3, exec_uri + URI_PREFIX_FILE_LEN, O_RDONLY|O_CLOEXEC, 0);
  820. if (IS_ERR(fd)) {
  821. SGX_DBG(DBG_E, "Input file not found: %s\n", exec_uri);
  822. ret = fd;
  823. goto usage;
  824. }
  825. char file_first_four_bytes[4];
  826. ret = INLINE_SYSCALL(read, 3, fd, file_first_four_bytes, sizeof(file_first_four_bytes));
  827. if (IS_ERR(ret)) {
  828. goto out;
  829. }
  830. if (ret != sizeof(file_first_four_bytes)) {
  831. ret = -EINVAL;
  832. goto out;
  833. }
  834. char manifest_base_name[URI_MAX];
  835. size_t manifest_base_name_len = sizeof(manifest_base_name);
  836. ret = get_base_name(exec_uri + URI_PREFIX_FILE_LEN, manifest_base_name,
  837. &manifest_base_name_len);
  838. if (ret < 0) {
  839. goto out;
  840. }
  841. if (strendswith(manifest_base_name, ".manifest")) {
  842. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".sgx",
  843. sizeof(manifest_base_name) - manifest_base_name_len)) {
  844. ret = -E2BIG;
  845. goto out;
  846. }
  847. } else if (!strendswith(manifest_base_name, ".manifest.sgx")) {
  848. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".manifest.sgx",
  849. sizeof(manifest_base_name) - manifest_base_name_len)) {
  850. ret = -E2BIG;
  851. goto out;
  852. }
  853. }
  854. int manifest_fd = -1;
  855. if (memcmp(file_first_four_bytes, "\177ELF", sizeof(file_first_four_bytes))) {
  856. /* exec_uri doesn't refer to ELF executable, so it must refer to the
  857. * manifest. Verify this and update exec_uri with the manifest suffix
  858. * removed.
  859. */
  860. size_t exec_uri_len = strlen(exec_uri);
  861. if (strendswith(exec_uri, ".manifest")) {
  862. exec_uri[exec_uri_len - static_strlen(".manifest")] = '\0';
  863. } else if (strendswith(exec_uri, ".manifest.sgx")) {
  864. INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET);
  865. manifest_fd = fd;
  866. exec_uri[exec_uri_len - static_strlen(".manifest.sgx")] = '\0';
  867. } else {
  868. SGX_DBG(DBG_E, "Invalid manifest file specified: %s\n", exec_uri);
  869. goto usage;
  870. }
  871. exec_uri_inferred = true;
  872. }
  873. if (manifest_fd == -1) {
  874. INLINE_SYSCALL(close, 1, fd);
  875. fd = manifest_fd = INLINE_SYSCALL(open, 3, manifest_base_name, O_RDONLY|O_CLOEXEC, 0);
  876. if (IS_ERR(fd)) {
  877. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", manifest_base_name);
  878. goto usage;
  879. }
  880. }
  881. manifest_uri = alloc_concat(URI_PREFIX_FILE, URI_PREFIX_FILE_LEN, manifest_base_name, -1);
  882. if (!manifest_uri) {
  883. ret = -ENOMEM;
  884. goto out;
  885. }
  886. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  887. if (exec_uri_inferred)
  888. SGX_DBG(DBG_I, "Inferred executable file: %s\n", exec_uri);
  889. else
  890. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  891. /*
  892. * While C does not guarantee that the argv[i] and envp[i] strings are
  893. * continuous we know that we are running on Linux, which does this. This
  894. * saves us creating a copy of all argv and envp strings.
  895. */
  896. char * args = argv[0];
  897. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  898. int envc = 0;
  899. while (envp[envc] != NULL) {
  900. envc++;
  901. }
  902. char * env = envp[0];
  903. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  904. ret = load_enclave(&pal_enclave, manifest_fd, manifest_uri, exec_uri, args, args_size, env, env_size,
  905. exec_uri_inferred);
  906. out:
  907. if (pal_enclave.exec >= 0)
  908. INLINE_SYSCALL(close, 1, pal_enclave.exec);
  909. if (pal_enclave.sigfile >= 0)
  910. INLINE_SYSCALL(close, 1, pal_enclave.sigfile);
  911. if (pal_enclave.token >= 0)
  912. INLINE_SYSCALL(close, 1, pal_enclave.token);
  913. if (!IS_ERR(fd))
  914. INLINE_SYSCALL(close, 1, fd);
  915. free(exec_uri);
  916. free(manifest_uri);
  917. return ret;
  918. usage:
  919. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  920. ret = -EINVAL;
  921. goto out;
  922. }