sgx_main.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. #include <pal_linux.h>
  2. #include <pal_linux_error.h>
  3. #include <pal_rtld.h>
  4. #include <hex.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <ctype.h>
  16. #include <sysdep.h>
  17. #include <sysdeps/generic/ldsodefs.h>
  18. unsigned long pagesize = PRESET_PAGESIZE;
  19. unsigned long pagemask = ~(PRESET_PAGESIZE - 1);
  20. unsigned long pageshift = PRESET_PAGESIZE - 1;
  21. struct pal_enclave pal_enclave;
  22. static inline
  23. char * alloc_concat(const char * p, size_t plen,
  24. const char * s, size_t slen)
  25. {
  26. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  27. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  28. char * buf = malloc(plen + slen + 1);
  29. if (!buf)
  30. return NULL;
  31. if (plen)
  32. memcpy(buf, p, plen);
  33. if (slen)
  34. memcpy(buf + plen, s, slen);
  35. buf[plen + slen] = '\0';
  36. return buf;
  37. }
  38. static unsigned long parse_int (const char * str)
  39. {
  40. unsigned long num = 0;
  41. int radix = 10;
  42. char c;
  43. if (str[0] == '0') {
  44. str++;
  45. radix = 8;
  46. if (str[0] == 'x') {
  47. str++;
  48. radix = 16;
  49. }
  50. }
  51. while ((c = *(str++))) {
  52. int8_t val = hex2dec(c);
  53. if (val < 0)
  54. break;
  55. if ((uint8_t) val >= radix)
  56. break;
  57. num = num * radix + (uint8_t) val;
  58. }
  59. if (c == 'G' || c == 'g')
  60. num *= 1024 * 1024 * 1024;
  61. else if (c == 'M' || c == 'm')
  62. num *= 1024 * 1024;
  63. else if (c == 'K' || c == 'k')
  64. num *= 1024;
  65. return num;
  66. }
  67. static char * resolve_uri (const char * uri, const char ** errstring)
  68. {
  69. if (!strstartswith_static(uri, "file:")) {
  70. *errstring = "Invalid URI";
  71. return NULL;
  72. }
  73. char path_buf[URI_MAX];
  74. size_t len = URI_MAX;
  75. int ret = get_norm_path(uri + 5, path_buf, &len);
  76. if (ret < 0) {
  77. *errstring = "Invalid URI";
  78. return NULL;
  79. }
  80. return alloc_concat("file:", static_strlen("file:"), path_buf, len);
  81. }
  82. static
  83. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  84. unsigned long * entry)
  85. {
  86. int ret = 0;
  87. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  88. return -ERRNO(ret);
  89. char filebuf[FILEBUF_SIZE];
  90. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  91. if (IS_ERR(ret))
  92. return -ERRNO(ret);
  93. const ElfW(Ehdr) * header = (void *) filebuf;
  94. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  95. const ElfW(Phdr) * ph;
  96. struct loadcmd {
  97. ElfW(Addr) mapstart, mapend;
  98. } loadcmds[16], *c;
  99. int nloadcmds = 0;
  100. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  101. if (ph->p_type == PT_LOAD) {
  102. if (nloadcmds == 16)
  103. return -EINVAL;
  104. c = &loadcmds[nloadcmds++];
  105. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  106. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz);
  107. }
  108. *base = loadcmds[0].mapstart;
  109. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  110. if (entry)
  111. *entry = header->e_entry;
  112. return 0;
  113. }
  114. static
  115. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  116. unsigned long base, unsigned long prot)
  117. {
  118. int ret = 0;
  119. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  120. return -ERRNO(ret);
  121. char filebuf[FILEBUF_SIZE];
  122. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  123. if (IS_ERR(ret))
  124. return -ERRNO(ret);
  125. const ElfW(Ehdr) * header = (void *) filebuf;
  126. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  127. const ElfW(Phdr) * ph;
  128. struct loadcmd {
  129. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  130. unsigned int mapoff;
  131. int prot;
  132. } loadcmds[16], *c;
  133. int nloadcmds = 0;
  134. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  135. if (ph->p_type == PT_LOAD) {
  136. if (nloadcmds == 16)
  137. return -EINVAL;
  138. c = &loadcmds[nloadcmds++];
  139. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  140. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
  141. c->datastart = ph->p_vaddr;
  142. c->dataend = ph->p_vaddr + ph->p_filesz;
  143. c->allocend = ph->p_vaddr + ph->p_memsz;
  144. c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
  145. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  146. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  147. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  148. }
  149. base -= loadcmds[0].mapstart;
  150. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  151. ElfW(Addr) zero = c->dataend;
  152. ElfW(Addr) zeroend = ALLOC_ALIGNUP(c->allocend);
  153. ElfW(Addr) zeropage = ALLOC_ALIGNUP(zero);
  154. if (zeroend < zeropage)
  155. zeropage = zeroend;
  156. if (c->mapend > c->mapstart) {
  157. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  158. c->mapend - c->mapstart,
  159. PROT_READ|PROT_WRITE,
  160. MAP_PRIVATE | MAP_FILE,
  161. fd, c->mapoff);
  162. if (IS_ERR_P(addr))
  163. return -ERRNO_P(addr);
  164. if (c->datastart > c->mapstart)
  165. memset(addr, 0, c->datastart - c->mapstart);
  166. if (zeropage > zero)
  167. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  168. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  169. c->mapend - c->mapstart,
  170. SGX_PAGE_REG, c->prot, 0,
  171. (c->prot & PROT_EXEC) ? "code" : "data");
  172. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  173. if (ret < 0)
  174. return ret;
  175. }
  176. if (zeroend > zeropage) {
  177. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  178. zeroend - zeropage,
  179. SGX_PAGE_REG, c->prot, false, "bss");
  180. if (ret < 0)
  181. return ret;
  182. }
  183. }
  184. return 0;
  185. }
  186. int initialize_enclave (struct pal_enclave * enclave)
  187. {
  188. int ret = 0;
  189. int enclave_image;
  190. sgx_arch_token_t enclave_token;
  191. sgx_arch_enclave_css_t enclave_sigstruct;
  192. sgx_arch_secs_t enclave_secs;
  193. unsigned long enclave_entry_addr;
  194. unsigned long heap_min = DEFAULT_HEAP_MIN;
  195. /* this array may overflow the stack, so we allocate it in BSS */
  196. static void* tcs_addrs[MAX_DBG_THREADS];
  197. enclave_image = INLINE_SYSCALL(open, 3, ENCLAVE_FILENAME, O_RDONLY, 0);
  198. if (IS_ERR(enclave_image)) {
  199. SGX_DBG(DBG_E, "Cannot find %s\n", ENCLAVE_FILENAME);
  200. ret = -ERRNO(enclave_image);
  201. goto out;
  202. }
  203. char cfgbuf[CONFIG_MAX];
  204. /* Reading sgx.enclave_size from manifest */
  205. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, sizeof(cfgbuf)) <= 0) {
  206. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  207. ret = -EINVAL;
  208. goto out;
  209. }
  210. enclave->size = parse_int(cfgbuf);
  211. if (!enclave->size || !IS_POWER_OF_2(enclave->size)) {
  212. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  213. ret = -EINVAL;
  214. goto out;
  215. }
  216. /* Reading sgx.thread_num from manifest */
  217. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, sizeof(cfgbuf)) > 0) {
  218. enclave->thread_num = parse_int(cfgbuf);
  219. if (enclave->thread_num > MAX_DBG_THREADS) {
  220. SGX_DBG(DBG_E, "Too many threads to debug\n");
  221. ret = -EINVAL;
  222. goto out;
  223. }
  224. } else {
  225. enclave->thread_num = 1;
  226. }
  227. if (get_config(enclave->config, "sgx.static_address", cfgbuf, sizeof(cfgbuf)) > 0 && cfgbuf[0] == '1') {
  228. enclave->baseaddr = ALIGN_DOWN_POW2(heap_min, enclave->size);
  229. } else {
  230. enclave->baseaddr = ENCLAVE_HIGH_ADDRESS;
  231. heap_min = 0;
  232. }
  233. ret = read_enclave_token(enclave->token, &enclave_token);
  234. if (ret < 0) {
  235. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  236. goto out;
  237. }
  238. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  239. if (ret < 0) {
  240. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  241. goto out;
  242. }
  243. memset(&enclave_secs, 0, sizeof(enclave_secs));
  244. enclave_secs.base = enclave->baseaddr;
  245. enclave_secs.size = enclave->size;
  246. ret = create_enclave(&enclave_secs, &enclave_token);
  247. if (ret < 0) {
  248. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  249. goto out;
  250. }
  251. enclave->ssaframesize = enclave_secs.ssa_frame_size * pagesize;
  252. struct stat stat;
  253. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  254. if (IS_ERR(ret)) {
  255. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  256. ret = -ERRNO(ret);
  257. goto out;
  258. }
  259. int manifest_size = stat.st_size;
  260. /* Start populating enclave memory */
  261. struct mem_area {
  262. const char * desc;
  263. bool skip_eextend;
  264. int fd;
  265. bool is_binary; /* only meaningful if fd != -1 */
  266. unsigned long addr, size, prot;
  267. enum sgx_page_type type;
  268. };
  269. struct mem_area * areas =
  270. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  271. int area_num = 0;
  272. /* The manifest needs to be allocated at the upper end of the enclave
  273. * memory. That's used by pal_linux_main to find the manifest area. So add
  274. * it first to the list with memory areas. */
  275. areas[area_num] = (struct mem_area) {
  276. .desc = "manifest", .skip_eextend = false, .fd = enclave->manifest,
  277. .is_binary = false, .addr = 0, .size = ALLOC_ALIGNUP(manifest_size),
  278. .prot = PROT_READ, .type = SGX_PAGE_REG
  279. };
  280. area_num++;
  281. areas[area_num] = (struct mem_area) {
  282. .desc = "ssa", .skip_eextend = false, .fd = -1,
  283. .is_binary = false, .addr = 0,
  284. .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  285. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  286. };
  287. struct mem_area* ssa_area = &areas[area_num++];
  288. areas[area_num] = (struct mem_area) {
  289. .desc = "tcs", .skip_eextend = false, .fd = -1,
  290. .is_binary = false, .addr = 0, .size = enclave->thread_num * pagesize,
  291. .prot = 0, .type = SGX_PAGE_TCS
  292. };
  293. struct mem_area* tcs_area = &areas[area_num++];
  294. areas[area_num] = (struct mem_area) {
  295. .desc = "tls", .skip_eextend = false, .fd = -1,
  296. .is_binary = false, .addr = 0, .size = enclave->thread_num * pagesize,
  297. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  298. };
  299. struct mem_area* tls_area = &areas[area_num++];
  300. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  301. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  302. areas[area_num] = (struct mem_area) {
  303. .desc = "stack", .skip_eextend = false, .fd = -1,
  304. .is_binary = false, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  305. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  306. };
  307. area_num++;
  308. }
  309. areas[area_num] = (struct mem_area) {
  310. .desc = "pal", .skip_eextend = false, .fd = enclave_image,
  311. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  312. .prot = 0, .type = SGX_PAGE_REG
  313. };
  314. struct mem_area* pal_area = &areas[area_num++];
  315. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  316. if (ret < 0) {
  317. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", ENCLAVE_FILENAME, -ret);
  318. goto out;
  319. }
  320. struct mem_area* exec_area = NULL;
  321. if (enclave->exec != -1) {
  322. areas[area_num] = (struct mem_area) {
  323. .desc = "exec", .skip_eextend = false, .fd = enclave->exec,
  324. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  325. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  326. };
  327. exec_area = &areas[area_num++];
  328. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  329. if (ret < 0) {
  330. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  331. goto out;
  332. }
  333. }
  334. unsigned long populating = enclave->size;
  335. for (int i = 0 ; i < area_num ; i++) {
  336. if (areas[i].addr)
  337. continue;
  338. areas[i].addr = populating - areas[i].size;
  339. populating = SATURATED_P_SUB(areas[i].addr, MEMORY_GAP, 0);
  340. }
  341. enclave_entry_addr += pal_area->addr;
  342. if (exec_area) {
  343. if (exec_area->addr + exec_area->size > pal_area->addr - MEMORY_GAP) {
  344. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  345. ret = -EINVAL;
  346. goto out;
  347. }
  348. if (exec_area->addr + exec_area->size + MEMORY_GAP < populating) {
  349. if (populating > heap_min) {
  350. unsigned long addr = exec_area->addr + exec_area->size + MEMORY_GAP;
  351. if (addr < heap_min)
  352. addr = heap_min;
  353. areas[area_num] = (struct mem_area) {
  354. .desc = "free", .skip_eextend = true, .fd = -1,
  355. .is_binary = false, .addr = addr, .size = populating - addr,
  356. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  357. };
  358. area_num++;
  359. }
  360. populating = SATURATED_P_SUB(exec_area->addr, MEMORY_GAP, 0);
  361. }
  362. }
  363. if (populating > heap_min) {
  364. areas[area_num] = (struct mem_area) {
  365. .desc = "free", .skip_eextend = true, .fd = -1,
  366. .is_binary = false, .addr = heap_min, .size = populating - heap_min,
  367. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  368. };
  369. area_num++;
  370. }
  371. for (int i = 0 ; i < area_num ; i++) {
  372. if (areas[i].fd != -1 && areas[i].is_binary) {
  373. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  374. if (ret < 0) {
  375. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  376. goto out;
  377. }
  378. continue;
  379. }
  380. void * data = NULL;
  381. if (!strcmp_static(areas[i].desc, "tls")) {
  382. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  383. PROT_READ|PROT_WRITE,
  384. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  385. if (IS_ERR_P(data) || data == NULL) {
  386. /* Note that Graphene currently doesn't handle 0x0 addresses */
  387. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  388. goto out;
  389. }
  390. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  391. struct enclave_tls * gs = data + pagesize * t;
  392. memset(gs, 0, pagesize);
  393. assert(sizeof(*gs) <= pagesize);
  394. gs->common.self = (PAL_TCB *)(
  395. tls_area->addr + pagesize * t + enclave_secs.base);
  396. gs->enclave_size = enclave->size;
  397. gs->tcs_offset = tcs_area->addr + pagesize * t;
  398. gs->initial_stack_offset =
  399. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  400. gs->ssa = (void *) ssa_area->addr +
  401. enclave->ssaframesize * SSAFRAMENUM * t +
  402. enclave_secs.base;
  403. gs->gpr = gs->ssa +
  404. enclave->ssaframesize - sizeof(sgx_pal_gpr_t);
  405. gs->manifest_size = manifest_size;
  406. gs->heap_min = (void *) enclave_secs.base + heap_min;
  407. gs->heap_max = (void *) enclave_secs.base + pal_area->addr - MEMORY_GAP;
  408. if (exec_area) {
  409. gs->exec_addr = (void *) enclave_secs.base + exec_area->addr;
  410. gs->exec_size = exec_area->size;
  411. }
  412. gs->thread = NULL;
  413. }
  414. } else if (!strcmp_static(areas[i].desc, "tcs")) {
  415. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  416. PROT_READ|PROT_WRITE,
  417. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  418. if (IS_ERR_P(data) || data == NULL) {
  419. /* Note that Graphene currently doesn't handle 0x0 addresses */
  420. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  421. goto out;
  422. }
  423. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  424. sgx_arch_tcs_t * tcs = data + pagesize * t;
  425. memset(tcs, 0, pagesize);
  426. tcs->ossa = ssa_area->addr +
  427. enclave->ssaframesize * SSAFRAMENUM * t;
  428. tcs->nssa = SSAFRAMENUM;
  429. tcs->oentry = enclave_entry_addr;
  430. tcs->ofs_base = 0;
  431. tcs->ogs_base = tls_area->addr + t * pagesize;
  432. tcs->ofs_limit = 0xfff;
  433. tcs->ogs_limit = 0xfff;
  434. tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + pagesize * t;
  435. }
  436. } else if (areas[i].fd != -1) {
  437. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  438. PROT_READ,
  439. MAP_FILE|MAP_PRIVATE,
  440. areas[i].fd, 0);
  441. if (IS_ERR_P(data) || data == NULL) {
  442. /* Note that Graphene currently doesn't handle 0x0 addresses */
  443. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  444. goto out;
  445. }
  446. }
  447. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  448. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  449. if (data)
  450. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  451. if (ret < 0) {
  452. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  453. goto out;
  454. }
  455. }
  456. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  457. if (ret < 0) {
  458. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  459. goto out;
  460. }
  461. create_tcs_mapper((void *) enclave_secs.base + tcs_area->addr, enclave->thread_num);
  462. struct enclave_dbginfo * dbg = (void *)
  463. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  464. sizeof(struct enclave_dbginfo),
  465. PROT_READ|PROT_WRITE,
  466. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  467. -1, 0);
  468. if (IS_ERR_P(dbg)) {
  469. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  470. } else {
  471. dbg->pid = INLINE_SYSCALL(getpid, 0);
  472. dbg->base = enclave->baseaddr;
  473. dbg->size = enclave->size;
  474. dbg->ssaframesize = enclave->ssaframesize;
  475. dbg->aep = async_exit_pointer;
  476. dbg->thread_tids[0] = dbg->pid;
  477. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  478. dbg->tcs_addrs[i] = tcs_addrs[i];
  479. }
  480. ret = 0;
  481. out:
  482. if (enclave_image >= 0)
  483. INLINE_SYSCALL(close, 1, enclave_image);
  484. return ret;
  485. }
  486. static int mcast_s (int port)
  487. {
  488. struct sockaddr_in addr;
  489. int ret = 0;
  490. addr.sin_family = AF_INET;
  491. addr.sin_addr.s_addr = INADDR_ANY;
  492. addr.sin_port = htons(port);
  493. int fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  494. if (IS_ERR(fd))
  495. return -ERRNO(fd);
  496. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  497. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  498. if (IS_ERR(ret))
  499. return -ERRNO(ret);
  500. return fd;
  501. }
  502. static int mcast_c (int port)
  503. {
  504. int ret = 0, fd;
  505. struct sockaddr_in addr;
  506. addr.sin_family = AF_INET;
  507. addr.sin_addr.s_addr = INADDR_ANY;
  508. addr.sin_port = htons(port);
  509. fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  510. if (IS_ERR(fd))
  511. return -ERRNO(fd);
  512. int reuse = 1;
  513. INLINE_SYSCALL(setsockopt, 5, fd, SOL_SOCKET, SO_REUSEADDR,
  514. &reuse, sizeof(reuse));
  515. ret = INLINE_SYSCALL(bind, 3, fd, &addr, sizeof(addr));
  516. if (IS_ERR(ret))
  517. return -ERRNO(ret);
  518. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  519. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  520. if (IS_ERR(ret))
  521. return -ERRNO(ret);
  522. inet_pton4(MCAST_GROUP, sizeof(MCAST_GROUP) - 1,
  523. &addr.sin_addr.s_addr);
  524. struct ip_mreq group;
  525. group.imr_multiaddr.s_addr = addr.sin_addr.s_addr;
  526. group.imr_interface.s_addr = INADDR_ANY;
  527. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
  528. &group, sizeof(group));
  529. if (IS_ERR(ret))
  530. return -ERRNO(ret);
  531. return fd;
  532. }
  533. static unsigned long randval = 0;
  534. void getrand (void * buffer, size_t size)
  535. {
  536. size_t bytes = 0;
  537. while (bytes + sizeof(uint64_t) <= size) {
  538. *(uint64_t*) (buffer + bytes) = randval;
  539. randval = hash64(randval);
  540. bytes += sizeof(uint64_t);
  541. }
  542. if (bytes < size) {
  543. memcpy(buffer + bytes, &randval, size - bytes);
  544. randval = hash64(randval);
  545. }
  546. }
  547. static void create_instance (struct pal_sec * pal_sec)
  548. {
  549. PAL_NUM id;
  550. getrand(&id, sizeof(id));
  551. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix), "/graphene/%016lx/", id);
  552. pal_sec->instance_id = id;
  553. }
  554. static int load_manifest (int fd, struct config_store ** config_ptr)
  555. {
  556. int ret = 0;
  557. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  558. if (IS_ERR(nbytes)) {
  559. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  560. return -ERRNO(nbytes);
  561. }
  562. struct config_store * config = malloc(sizeof(struct config_store));
  563. if (!config) {
  564. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  565. return -ENOMEM;
  566. }
  567. void * config_raw = (void *)
  568. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  569. if (IS_ERR_P(config_raw)) {
  570. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  571. ret = -ERRNO_P(config_raw);
  572. goto out;
  573. }
  574. config->raw_data = config_raw;
  575. config->raw_size = nbytes;
  576. config->malloc = malloc;
  577. config->free = NULL;
  578. const char * errstring = NULL;
  579. ret = read_config(config, NULL, &errstring);
  580. if (ret < 0) {
  581. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  582. goto out;
  583. }
  584. *config_ptr = config;
  585. ret = 0;
  586. out:
  587. if (ret < 0) {
  588. free(config);
  589. if (!IS_ERR_P(config_raw))
  590. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  591. }
  592. return ret;
  593. }
  594. /*
  595. * Returns the number of online CPUs read from /sys/devices/system/cpu/online, -errno on failure.
  596. * Understands complex formats like "1,3-5,6".
  597. */
  598. static int get_cpu_count(void) {
  599. int fd = INLINE_SYSCALL(open, 3, "/sys/devices/system/cpu/online", O_RDONLY|O_CLOEXEC, 0);
  600. if (fd < 0)
  601. return unix_to_pal_error(ERRNO(fd));
  602. char buf[64];
  603. int ret = INLINE_SYSCALL(read, 3, fd, buf, sizeof(buf) - 1);
  604. if (ret < 0) {
  605. INLINE_SYSCALL(close, 1, fd);
  606. return unix_to_pal_error(ERRNO(ret));
  607. }
  608. buf[ret] = '\0'; /* ensure null-terminated buf even in partial read */
  609. char* end;
  610. char* ptr = buf;
  611. int cpu_count = 0;
  612. while (*ptr) {
  613. while (*ptr == ' ' || *ptr == '\t' || *ptr == ',')
  614. ptr++;
  615. int firstint = (int)strtol(ptr, &end, 10);
  616. if (ptr == end)
  617. break;
  618. if (*end == '\0' || *end == ',') {
  619. /* single CPU index, count as one more CPU */
  620. cpu_count++;
  621. } else if (*end == '-') {
  622. /* CPU range, count how many CPUs in range */
  623. ptr = end + 1;
  624. int secondint = (int)strtol(ptr, &end, 10);
  625. if (secondint > firstint)
  626. cpu_count += secondint - firstint + 1; // inclusive (e.g., 0-7, or 8-16)
  627. }
  628. ptr = end;
  629. }
  630. INLINE_SYSCALL(close, 1, fd);
  631. if (cpu_count == 0)
  632. return -PAL_ERROR_STREAMNOTEXIST;
  633. return cpu_count;
  634. }
  635. static int load_enclave (struct pal_enclave * enclave,
  636. int manifest_fd,
  637. char * manifest_uri,
  638. char * exec_uri,
  639. char * args, size_t args_size,
  640. char * env, size_t env_size,
  641. bool exec_uri_inferred)
  642. {
  643. struct pal_sec * pal_sec = &enclave->pal_sec;
  644. int ret;
  645. struct timeval tv;
  646. #if PRINT_ENCLAVE_STAT == 1
  647. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  648. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  649. #endif
  650. ret = open_gsgx();
  651. if (ret < 0)
  652. return ret;
  653. if (!is_wrfsbase_supported())
  654. return -EPERM;
  655. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  656. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  657. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  658. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  659. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  660. int num_cpus = get_cpu_count();
  661. if (num_cpus < 0) {
  662. return num_cpus;
  663. }
  664. pal_sec->num_cpus = num_cpus;
  665. #ifdef DEBUG
  666. size_t env_i = 0;
  667. while (env_i < env_size) {
  668. if (!strcmp_static(&env[env_i], "IN_GDB=1")) {
  669. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  670. pal_sec->in_gdb = true;
  671. } else if (strstartswith_static(&env[env_i], "LD_PRELOAD=")) {
  672. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  673. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  674. env_size -= env_i_size;
  675. continue;
  676. }
  677. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  678. }
  679. #endif
  680. enclave->manifest = manifest_fd;
  681. ret = load_manifest(enclave->manifest, &enclave->config);
  682. if (ret < 0) {
  683. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  684. return -EINVAL;
  685. }
  686. char cfgbuf[CONFIG_MAX];
  687. const char * errstring;
  688. // A manifest can specify an executable with a different base name
  689. // than the manifest itself. Always give the exec field of the manifest
  690. // precedence if specified.
  691. if (get_config(enclave->config, "loader.exec", cfgbuf, sizeof(cfgbuf)) > 0) {
  692. exec_uri = resolve_uri(cfgbuf, &errstring);
  693. exec_uri_inferred = false;
  694. if (!exec_uri) {
  695. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  696. return -EINVAL;
  697. }
  698. }
  699. enclave->exec = INLINE_SYSCALL(open, 3, exec_uri + static_strlen("file:"),
  700. O_RDONLY|O_CLOEXEC, 0);
  701. if (IS_ERR(enclave->exec)) {
  702. if (exec_uri_inferred) {
  703. // It is valid for an enclave not to have an executable.
  704. // We need to catch the case where we inferred the executable
  705. // from the manifest file name, but it doesn't exist, and let
  706. // the enclave go a bit further. Go ahead and warn the user,
  707. // though.
  708. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  709. "or may represent a manifest misconfiguration. This typically "
  710. "represents advanced usage, and if it is not what you intended, "
  711. "try setting the loader.exec field in the manifest.\n", exec_uri);
  712. enclave->exec = -1;
  713. } else {
  714. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  715. return -EINVAL;
  716. }
  717. }
  718. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, sizeof(cfgbuf)) < 0) {
  719. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  720. return -EINVAL;
  721. }
  722. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  723. if (!sig_uri) {
  724. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  725. return -EINVAL;
  726. }
  727. if (!strendswith(sig_uri, ".sig")) {
  728. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  729. free(sig_uri);
  730. return -EINVAL;
  731. }
  732. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + static_strlen("file:"),
  733. O_RDONLY|O_CLOEXEC, 0);
  734. if (IS_ERR(enclave->sigfile)) {
  735. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  736. free(sig_uri);
  737. return -EINVAL;
  738. }
  739. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - static_strlen(".sig"), ".token", -1);
  740. free(sig_uri);
  741. if (!token_uri) {
  742. INLINE_SYSCALL(close, 1, enclave->sigfile);
  743. return -ENOMEM;
  744. }
  745. enclave->token = INLINE_SYSCALL(open, 3, token_uri + static_strlen("file:"),
  746. O_RDONLY|O_CLOEXEC, 0);
  747. if (IS_ERR(enclave->token)) {
  748. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  749. PAL_FILE("pal-sgx-get-token")
  750. "\' on the runtime host or run \'make SGX=1 sgx-tokens\' "
  751. "in the Graphene source to create the token file.\n",
  752. token_uri);
  753. free(token_uri);
  754. return -EINVAL;
  755. }
  756. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  757. free(token_uri);
  758. ret = initialize_enclave(enclave);
  759. if (ret < 0)
  760. return ret;
  761. if (!pal_sec->instance_id)
  762. create_instance(&enclave->pal_sec);
  763. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  764. if (enclave->exec == -1) {
  765. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  766. } else {
  767. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  768. }
  769. if (!pal_sec->mcast_port) {
  770. unsigned short mcast_port;
  771. getrand(&mcast_port, sizeof(unsigned short));
  772. pal_sec->mcast_port = mcast_port > 1024 ? mcast_port : mcast_port + 1024;
  773. }
  774. if ((ret = mcast_s(pal_sec->mcast_port)) >= 0) {
  775. pal_sec->mcast_srv = ret;
  776. if ((ret = mcast_c(pal_sec->mcast_port)) >= 0) {
  777. pal_sec->mcast_cli = ret;
  778. } else {
  779. INLINE_SYSCALL(close, 1, pal_sec->mcast_srv);
  780. pal_sec->mcast_srv = 0;
  781. }
  782. }
  783. ret = sgx_signal_setup();
  784. if (ret < 0)
  785. return ret;
  786. ret = init_aesm_targetinfo(&pal_sec->aesm_targetinfo);
  787. if (ret < 0)
  788. return ret;
  789. void* alt_stack = (void*)INLINE_SYSCALL(mmap, 6, NULL, ALT_STACK_SIZE,
  790. PROT_READ | PROT_WRITE,
  791. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  792. if (IS_ERR_P(alt_stack))
  793. return -ENOMEM;
  794. /* initialize TCB at the top of the alternative stack */
  795. PAL_TCB_LINUX* tcb = alt_stack + ALT_STACK_SIZE - sizeof(PAL_TCB_LINUX);
  796. tcb->common.self = &tcb->common;
  797. tcb->alt_stack = alt_stack;
  798. tcb->stack = NULL; /* main thread uses the stack provided by Linux */
  799. tcb->tcs = NULL; /* initialized by child thread */
  800. pal_thread_init(tcb);
  801. /* start running trusted PAL */
  802. ecall_enclave_start(args, args_size, env, env_size);
  803. #if PRINT_ENCLAVE_STAT == 1
  804. PAL_NUM exit_time = 0;
  805. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  806. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  807. #endif
  808. unmap_tcs();
  809. INLINE_SYSCALL(munmap, 2, alt_stack, ALT_STACK_SIZE);
  810. INLINE_SYSCALL(exit, 0);
  811. return 0;
  812. }
  813. /* Grow stack of main thread to THREAD_STACK_SIZE by allocating a large dummy array and probing
  814. * each stack page (Linux dynamically grows the stack of the main thread but gets confused with
  815. * huge-jump stack accesses coming from within the enclave). Note that other, non-main threads
  816. * are created manually via clone(.., THREAD_STACK_SIZE, ..) and thus do not need this hack. */
  817. static void __attribute__ ((noinline)) force_linux_to_grow_stack() {
  818. char dummy[THREAD_STACK_SIZE];
  819. for (uint64_t i = 0; i < sizeof(dummy); i += PRESET_PAGESIZE) {
  820. /* touch each page on the stack just to make it is not optimized away */
  821. __asm__ volatile("movq %0, %%rbx\r\n"
  822. "movq (%%rbx), %%rbx\r\n"
  823. : : "r"(&dummy[i]) : "%rbx");
  824. }
  825. }
  826. int main (int argc, char ** argv, char ** envp)
  827. {
  828. char * manifest_uri = NULL;
  829. char * exec_uri = NULL;
  830. const char * pal_loader = argv[0];
  831. int fd = -1;
  832. int ret = 0;
  833. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  834. // inferred from the manifest name somewhat
  835. // differently
  836. force_linux_to_grow_stack();
  837. argc--;
  838. argv++;
  839. int is_child = sgx_init_child_process(&pal_enclave.pal_sec);
  840. if (is_child < 0) {
  841. ret = is_child;
  842. goto out;
  843. }
  844. if (!is_child) {
  845. /* occupy PROC_INIT_FD so no one will use it */
  846. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  847. if (!argc)
  848. goto usage;
  849. if (!strcmp_static(argv[0], "file:")) {
  850. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  851. } else {
  852. exec_uri = alloc_concat("file:", -1, argv[0], -1);
  853. }
  854. } else {
  855. exec_uri = alloc_concat(pal_enclave.pal_sec.exec_name, -1, NULL, -1);
  856. }
  857. if (!exec_uri) {
  858. ret = -ENOMEM;
  859. goto out;
  860. }
  861. fd = INLINE_SYSCALL(open, 3, exec_uri + static_strlen("file:"), O_RDONLY|O_CLOEXEC, 0);
  862. if (IS_ERR(fd)) {
  863. SGX_DBG(DBG_E, "Input file not found: %s\n", exec_uri);
  864. ret = fd;
  865. goto usage;
  866. }
  867. char file_first_four_bytes[4];
  868. ret = INLINE_SYSCALL(read, 3, fd, file_first_four_bytes, sizeof(file_first_four_bytes));
  869. if (IS_ERR(ret)) {
  870. goto out;
  871. }
  872. if (ret != sizeof(file_first_four_bytes)) {
  873. ret = -EINVAL;
  874. goto out;
  875. }
  876. char manifest_base_name[URI_MAX];
  877. size_t manifest_base_name_len = sizeof(manifest_base_name);
  878. ret = get_base_name(exec_uri + static_strlen("file:"), manifest_base_name,
  879. &manifest_base_name_len);
  880. if (ret < 0) {
  881. goto out;
  882. }
  883. if (strendswith(manifest_base_name, ".manifest")) {
  884. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".sgx",
  885. sizeof(manifest_base_name) - manifest_base_name_len)) {
  886. ret = -E2BIG;
  887. goto out;
  888. }
  889. } else if (!strendswith(manifest_base_name, ".manifest.sgx")) {
  890. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".manifest.sgx",
  891. sizeof(manifest_base_name) - manifest_base_name_len)) {
  892. ret = -E2BIG;
  893. goto out;
  894. }
  895. }
  896. int manifest_fd = -1;
  897. if (memcmp(file_first_four_bytes, "\177ELF", sizeof(file_first_four_bytes))) {
  898. /* exec_uri doesn't refer to ELF executable, so it must refer to the
  899. * manifest. Verify this and update exec_uri with the manifest suffix
  900. * removed.
  901. */
  902. size_t exec_uri_len = strlen(exec_uri);
  903. if (strendswith(exec_uri, ".manifest")) {
  904. exec_uri[exec_uri_len - static_strlen(".manifest")] = '\0';
  905. } else if (strendswith(exec_uri, ".manifest.sgx")) {
  906. INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET);
  907. manifest_fd = fd;
  908. exec_uri[exec_uri_len - static_strlen(".manifest.sgx")] = '\0';
  909. } else {
  910. SGX_DBG(DBG_E, "Invalid manifest file specified: %s\n", exec_uri);
  911. goto usage;
  912. }
  913. exec_uri_inferred = true;
  914. }
  915. if (manifest_fd == -1) {
  916. INLINE_SYSCALL(close, 1, fd);
  917. fd = manifest_fd = INLINE_SYSCALL(open, 3, manifest_base_name, O_RDONLY|O_CLOEXEC, 0);
  918. if (IS_ERR(fd)) {
  919. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", manifest_base_name);
  920. goto usage;
  921. }
  922. }
  923. manifest_uri = alloc_concat("file:", static_strlen("file:"), manifest_base_name, -1);
  924. if (!manifest_uri) {
  925. ret = -ENOMEM;
  926. goto out;
  927. }
  928. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  929. if (exec_uri_inferred)
  930. SGX_DBG(DBG_I, "Inferred executable file: %s\n", exec_uri);
  931. else
  932. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  933. /*
  934. * While C does not guarantee that the argv[i] and envp[i] strings are
  935. * continuous we know that we are running on Linux, which does this. This
  936. * saves us creating a copy of all argv and envp strings.
  937. */
  938. char * args = argv[0];
  939. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  940. int envc = 0;
  941. while (envp[envc] != NULL) {
  942. envc++;
  943. }
  944. char * env = envp[0];
  945. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  946. ret = load_enclave(&pal_enclave, manifest_fd, manifest_uri, exec_uri, args, args_size, env, env_size,
  947. exec_uri_inferred);
  948. out:
  949. if (pal_enclave.exec >= 0)
  950. INLINE_SYSCALL(close, 1, pal_enclave.exec);
  951. if (pal_enclave.sigfile >= 0)
  952. INLINE_SYSCALL(close, 1, pal_enclave.sigfile);
  953. if (pal_enclave.token >= 0)
  954. INLINE_SYSCALL(close, 1, pal_enclave.token);
  955. if (!IS_ERR(fd))
  956. INLINE_SYSCALL(close, 1, fd);
  957. free(exec_uri);
  958. free(manifest_uri);
  959. return ret;
  960. usage:
  961. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  962. ret = -EINVAL;
  963. goto out;
  964. }