sgx_main.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153
  1. #include <pal_linux.h>
  2. #include <pal_linux_error.h>
  3. #include <pal_rtld.h>
  4. #include <hex.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <ctype.h>
  16. #include <sysdep.h>
  17. #include <sysdeps/generic/ldsodefs.h>
  18. unsigned long pagesize = PRESET_PAGESIZE;
  19. unsigned long pagemask = ~(PRESET_PAGESIZE - 1);
  20. unsigned long pageshift = PRESET_PAGESIZE - 1;
  21. struct pal_enclave pal_enclave;
  22. static inline
  23. char * alloc_concat(const char * p, size_t plen,
  24. const char * s, size_t slen)
  25. {
  26. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  27. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  28. char * buf = malloc(plen + slen + 1);
  29. if (!buf)
  30. return NULL;
  31. if (plen)
  32. memcpy(buf, p, plen);
  33. if (slen)
  34. memcpy(buf + plen, s, slen);
  35. buf[plen + slen] = '\0';
  36. return buf;
  37. }
  38. static unsigned long parse_int (const char * str)
  39. {
  40. unsigned long num = 0;
  41. int radix = 10;
  42. char c;
  43. if (str[0] == '0') {
  44. str++;
  45. radix = 8;
  46. if (str[0] == 'x') {
  47. str++;
  48. radix = 16;
  49. }
  50. }
  51. while ((c = *(str++))) {
  52. int8_t val = hex2dec(c);
  53. if (val < 0)
  54. break;
  55. if ((uint8_t) val >= radix)
  56. break;
  57. num = num * radix + (uint8_t) val;
  58. }
  59. if (c == 'G' || c == 'g')
  60. num *= 1024 * 1024 * 1024;
  61. else if (c == 'M' || c == 'm')
  62. num *= 1024 * 1024;
  63. else if (c == 'K' || c == 'k')
  64. num *= 1024;
  65. return num;
  66. }
  67. static char * resolve_uri (const char * uri, const char ** errstring)
  68. {
  69. if (!strstartswith_static(uri, "file:")) {
  70. *errstring = "Invalid URI";
  71. return NULL;
  72. }
  73. char path_buf[URI_MAX];
  74. size_t len = URI_MAX;
  75. int ret = get_norm_path(uri + 5, path_buf, &len);
  76. if (ret < 0) {
  77. *errstring = "Invalid URI";
  78. return NULL;
  79. }
  80. return alloc_concat("file:", static_strlen("file:"), path_buf, len);
  81. }
  82. static
  83. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  84. unsigned long * entry)
  85. {
  86. int ret = 0;
  87. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  88. return -ERRNO(ret);
  89. char filebuf[FILEBUF_SIZE];
  90. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  91. if (IS_ERR(ret))
  92. return -ERRNO(ret);
  93. if ((size_t)ret < sizeof(ElfW(Ehdr)))
  94. return -ENOEXEC;
  95. const ElfW(Ehdr) * header = (void *) filebuf;
  96. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  97. const ElfW(Phdr) * ph;
  98. if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0)
  99. return -ENOEXEC;
  100. struct loadcmd {
  101. ElfW(Addr) mapstart, mapend;
  102. } loadcmds[16], *c;
  103. int nloadcmds = 0;
  104. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  105. if (ph->p_type == PT_LOAD) {
  106. if (nloadcmds == 16)
  107. return -EINVAL;
  108. c = &loadcmds[nloadcmds++];
  109. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  110. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz);
  111. }
  112. *base = loadcmds[0].mapstart;
  113. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  114. if (entry)
  115. *entry = header->e_entry;
  116. return 0;
  117. }
  118. static
  119. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  120. unsigned long base, unsigned long prot)
  121. {
  122. int ret = 0;
  123. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  124. return -ERRNO(ret);
  125. char filebuf[FILEBUF_SIZE];
  126. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  127. if (IS_ERR(ret))
  128. return -ERRNO(ret);
  129. const ElfW(Ehdr) * header = (void *) filebuf;
  130. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  131. const ElfW(Phdr) * ph;
  132. struct loadcmd {
  133. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  134. unsigned int mapoff;
  135. int prot;
  136. } loadcmds[16], *c;
  137. int nloadcmds = 0;
  138. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  139. if (ph->p_type == PT_LOAD) {
  140. if (nloadcmds == 16)
  141. return -EINVAL;
  142. c = &loadcmds[nloadcmds++];
  143. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  144. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
  145. c->datastart = ph->p_vaddr;
  146. c->dataend = ph->p_vaddr + ph->p_filesz;
  147. c->allocend = ph->p_vaddr + ph->p_memsz;
  148. c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
  149. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  150. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  151. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  152. }
  153. base -= loadcmds[0].mapstart;
  154. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  155. ElfW(Addr) zero = c->dataend;
  156. ElfW(Addr) zeroend = ALLOC_ALIGNUP(c->allocend);
  157. ElfW(Addr) zeropage = ALLOC_ALIGNUP(zero);
  158. if (zeroend < zeropage)
  159. zeropage = zeroend;
  160. if (c->mapend > c->mapstart) {
  161. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  162. c->mapend - c->mapstart,
  163. PROT_READ|PROT_WRITE,
  164. MAP_PRIVATE | MAP_FILE,
  165. fd, c->mapoff);
  166. if (IS_ERR_P(addr))
  167. return -ERRNO_P(addr);
  168. if (c->datastart > c->mapstart)
  169. memset(addr, 0, c->datastart - c->mapstart);
  170. if (zeropage > zero)
  171. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  172. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  173. c->mapend - c->mapstart,
  174. SGX_PAGE_REG, c->prot, 0,
  175. (c->prot & PROT_EXEC) ? "code" : "data");
  176. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  177. if (ret < 0)
  178. return ret;
  179. }
  180. if (zeroend > zeropage) {
  181. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  182. zeroend - zeropage,
  183. SGX_PAGE_REG, c->prot, false, "bss");
  184. if (ret < 0)
  185. return ret;
  186. }
  187. }
  188. return 0;
  189. }
  190. int initialize_enclave (struct pal_enclave * enclave)
  191. {
  192. int ret = 0;
  193. int enclave_image;
  194. sgx_arch_token_t enclave_token;
  195. sgx_arch_enclave_css_t enclave_sigstruct;
  196. sgx_arch_secs_t enclave_secs;
  197. unsigned long enclave_entry_addr;
  198. unsigned long heap_min = DEFAULT_HEAP_MIN;
  199. /* this array may overflow the stack, so we allocate it in BSS */
  200. static void* tcs_addrs[MAX_DBG_THREADS];
  201. enclave_image = INLINE_SYSCALL(open, 3, ENCLAVE_FILENAME, O_RDONLY, 0);
  202. if (IS_ERR(enclave_image)) {
  203. SGX_DBG(DBG_E, "Cannot find %s\n", ENCLAVE_FILENAME);
  204. ret = -ERRNO(enclave_image);
  205. goto out;
  206. }
  207. char cfgbuf[CONFIG_MAX];
  208. /* Reading sgx.enclave_size from manifest */
  209. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, sizeof(cfgbuf)) <= 0) {
  210. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  211. ret = -EINVAL;
  212. goto out;
  213. }
  214. enclave->size = parse_int(cfgbuf);
  215. if (!enclave->size || !IS_POWER_OF_2(enclave->size)) {
  216. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  217. ret = -EINVAL;
  218. goto out;
  219. }
  220. /* Reading sgx.thread_num from manifest */
  221. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, sizeof(cfgbuf)) > 0) {
  222. enclave->thread_num = parse_int(cfgbuf);
  223. if (enclave->thread_num > MAX_DBG_THREADS) {
  224. SGX_DBG(DBG_E, "Too many threads to debug\n");
  225. ret = -EINVAL;
  226. goto out;
  227. }
  228. } else {
  229. enclave->thread_num = 1;
  230. }
  231. if (get_config(enclave->config, "sgx.static_address", cfgbuf, sizeof(cfgbuf)) > 0 && cfgbuf[0] == '1') {
  232. enclave->baseaddr = ALIGN_DOWN_POW2(heap_min, enclave->size);
  233. } else {
  234. enclave->baseaddr = ENCLAVE_HIGH_ADDRESS;
  235. heap_min = 0;
  236. }
  237. ret = read_enclave_token(enclave->token, &enclave_token);
  238. if (ret < 0) {
  239. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  240. goto out;
  241. }
  242. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  243. if (ret < 0) {
  244. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  245. goto out;
  246. }
  247. memset(&enclave_secs, 0, sizeof(enclave_secs));
  248. enclave_secs.base = enclave->baseaddr;
  249. enclave_secs.size = enclave->size;
  250. ret = create_enclave(&enclave_secs, &enclave_token);
  251. if (ret < 0) {
  252. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  253. goto out;
  254. }
  255. enclave->ssaframesize = enclave_secs.ssa_frame_size * pagesize;
  256. struct stat stat;
  257. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  258. if (IS_ERR(ret)) {
  259. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  260. ret = -ERRNO(ret);
  261. goto out;
  262. }
  263. int manifest_size = stat.st_size;
  264. /* Start populating enclave memory */
  265. struct mem_area {
  266. const char * desc;
  267. bool skip_eextend;
  268. int fd;
  269. bool is_binary; /* only meaningful if fd != -1 */
  270. unsigned long addr, size, prot;
  271. enum sgx_page_type type;
  272. };
  273. struct mem_area * areas =
  274. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  275. int area_num = 0;
  276. /* The manifest needs to be allocated at the upper end of the enclave
  277. * memory. That's used by pal_linux_main to find the manifest area. So add
  278. * it first to the list with memory areas. */
  279. areas[area_num] = (struct mem_area) {
  280. .desc = "manifest", .skip_eextend = false, .fd = enclave->manifest,
  281. .is_binary = false, .addr = 0, .size = ALLOC_ALIGNUP(manifest_size),
  282. .prot = PROT_READ, .type = SGX_PAGE_REG
  283. };
  284. area_num++;
  285. areas[area_num] = (struct mem_area) {
  286. .desc = "ssa", .skip_eextend = false, .fd = -1,
  287. .is_binary = false, .addr = 0,
  288. .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  289. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  290. };
  291. struct mem_area* ssa_area = &areas[area_num++];
  292. areas[area_num] = (struct mem_area) {
  293. .desc = "tcs", .skip_eextend = false, .fd = -1,
  294. .is_binary = false, .addr = 0, .size = enclave->thread_num * pagesize,
  295. .prot = 0, .type = SGX_PAGE_TCS
  296. };
  297. struct mem_area* tcs_area = &areas[area_num++];
  298. areas[area_num] = (struct mem_area) {
  299. .desc = "tls", .skip_eextend = false, .fd = -1,
  300. .is_binary = false, .addr = 0, .size = enclave->thread_num * pagesize,
  301. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  302. };
  303. struct mem_area* tls_area = &areas[area_num++];
  304. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  305. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  306. areas[area_num] = (struct mem_area) {
  307. .desc = "stack", .skip_eextend = false, .fd = -1,
  308. .is_binary = false, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  309. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  310. };
  311. area_num++;
  312. }
  313. areas[area_num] = (struct mem_area) {
  314. .desc = "pal", .skip_eextend = false, .fd = enclave_image,
  315. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  316. .prot = 0, .type = SGX_PAGE_REG
  317. };
  318. struct mem_area* pal_area = &areas[area_num++];
  319. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  320. if (ret < 0) {
  321. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", ENCLAVE_FILENAME, -ret);
  322. goto out;
  323. }
  324. struct mem_area* exec_area = NULL;
  325. if (enclave->exec != -1) {
  326. areas[area_num] = (struct mem_area) {
  327. .desc = "exec", .skip_eextend = false, .fd = enclave->exec,
  328. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  329. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  330. };
  331. exec_area = &areas[area_num++];
  332. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  333. if (ret < 0) {
  334. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  335. goto out;
  336. }
  337. }
  338. unsigned long populating = enclave->size;
  339. for (int i = 0 ; i < area_num ; i++) {
  340. if (areas[i].addr)
  341. continue;
  342. areas[i].addr = populating - areas[i].size;
  343. populating = SATURATED_P_SUB(areas[i].addr, MEMORY_GAP, 0);
  344. }
  345. enclave_entry_addr += pal_area->addr;
  346. if (exec_area) {
  347. if (exec_area->addr + exec_area->size > pal_area->addr - MEMORY_GAP) {
  348. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  349. ret = -EINVAL;
  350. goto out;
  351. }
  352. if (exec_area->addr + exec_area->size + MEMORY_GAP < populating) {
  353. if (populating > heap_min) {
  354. unsigned long addr = exec_area->addr + exec_area->size + MEMORY_GAP;
  355. if (addr < heap_min)
  356. addr = heap_min;
  357. areas[area_num] = (struct mem_area) {
  358. .desc = "free", .skip_eextend = true, .fd = -1,
  359. .is_binary = false, .addr = addr, .size = populating - addr,
  360. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  361. };
  362. area_num++;
  363. }
  364. populating = SATURATED_P_SUB(exec_area->addr, MEMORY_GAP, 0);
  365. }
  366. }
  367. if (populating > heap_min) {
  368. areas[area_num] = (struct mem_area) {
  369. .desc = "free", .skip_eextend = true, .fd = -1,
  370. .is_binary = false, .addr = heap_min, .size = populating - heap_min,
  371. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  372. };
  373. area_num++;
  374. }
  375. for (int i = 0 ; i < area_num ; i++) {
  376. if (areas[i].fd != -1 && areas[i].is_binary) {
  377. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  378. if (ret < 0) {
  379. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  380. goto out;
  381. }
  382. continue;
  383. }
  384. void * data = NULL;
  385. if (!strcmp_static(areas[i].desc, "tls")) {
  386. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  387. PROT_READ|PROT_WRITE,
  388. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  389. if (IS_ERR_P(data) || data == NULL) {
  390. /* Note that Graphene currently doesn't handle 0x0 addresses */
  391. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  392. goto out;
  393. }
  394. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  395. struct enclave_tls * gs = data + pagesize * t;
  396. memset(gs, 0, pagesize);
  397. assert(sizeof(*gs) <= pagesize);
  398. gs->common.self = (PAL_TCB *)(
  399. tls_area->addr + pagesize * t + enclave_secs.base);
  400. gs->enclave_size = enclave->size;
  401. gs->tcs_offset = tcs_area->addr + pagesize * t;
  402. gs->initial_stack_offset =
  403. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  404. gs->ssa = (void *) ssa_area->addr +
  405. enclave->ssaframesize * SSAFRAMENUM * t +
  406. enclave_secs.base;
  407. gs->gpr = gs->ssa +
  408. enclave->ssaframesize - sizeof(sgx_pal_gpr_t);
  409. gs->manifest_size = manifest_size;
  410. gs->heap_min = (void *) enclave_secs.base + heap_min;
  411. gs->heap_max = (void *) enclave_secs.base + pal_area->addr - MEMORY_GAP;
  412. if (exec_area) {
  413. gs->exec_addr = (void *) enclave_secs.base + exec_area->addr;
  414. gs->exec_size = exec_area->size;
  415. }
  416. gs->thread = NULL;
  417. }
  418. } else if (!strcmp_static(areas[i].desc, "tcs")) {
  419. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  420. PROT_READ|PROT_WRITE,
  421. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  422. if (IS_ERR_P(data) || data == NULL) {
  423. /* Note that Graphene currently doesn't handle 0x0 addresses */
  424. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  425. goto out;
  426. }
  427. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  428. sgx_arch_tcs_t * tcs = data + pagesize * t;
  429. memset(tcs, 0, pagesize);
  430. tcs->ossa = ssa_area->addr +
  431. enclave->ssaframesize * SSAFRAMENUM * t;
  432. tcs->nssa = SSAFRAMENUM;
  433. tcs->oentry = enclave_entry_addr;
  434. tcs->ofs_base = 0;
  435. tcs->ogs_base = tls_area->addr + t * pagesize;
  436. tcs->ofs_limit = 0xfff;
  437. tcs->ogs_limit = 0xfff;
  438. tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + pagesize * t;
  439. }
  440. } else if (areas[i].fd != -1) {
  441. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  442. PROT_READ,
  443. MAP_FILE|MAP_PRIVATE,
  444. areas[i].fd, 0);
  445. if (IS_ERR_P(data) || data == NULL) {
  446. /* Note that Graphene currently doesn't handle 0x0 addresses */
  447. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  448. goto out;
  449. }
  450. }
  451. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  452. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  453. if (data)
  454. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  455. if (ret < 0) {
  456. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  457. goto out;
  458. }
  459. }
  460. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  461. if (ret < 0) {
  462. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  463. goto out;
  464. }
  465. create_tcs_mapper((void *) enclave_secs.base + tcs_area->addr, enclave->thread_num);
  466. struct enclave_dbginfo * dbg = (void *)
  467. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  468. sizeof(struct enclave_dbginfo),
  469. PROT_READ|PROT_WRITE,
  470. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  471. -1, 0);
  472. if (IS_ERR_P(dbg)) {
  473. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  474. } else {
  475. dbg->pid = INLINE_SYSCALL(getpid, 0);
  476. dbg->base = enclave->baseaddr;
  477. dbg->size = enclave->size;
  478. dbg->ssaframesize = enclave->ssaframesize;
  479. dbg->aep = async_exit_pointer;
  480. dbg->thread_tids[0] = dbg->pid;
  481. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  482. dbg->tcs_addrs[i] = tcs_addrs[i];
  483. }
  484. ret = 0;
  485. out:
  486. if (enclave_image >= 0)
  487. INLINE_SYSCALL(close, 1, enclave_image);
  488. return ret;
  489. }
  490. static int mcast_s (int port)
  491. {
  492. struct sockaddr_in addr;
  493. int ret = 0;
  494. addr.sin_family = AF_INET;
  495. addr.sin_addr.s_addr = INADDR_ANY;
  496. addr.sin_port = htons(port);
  497. int fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  498. if (IS_ERR(fd))
  499. return -ERRNO(fd);
  500. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  501. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  502. if (IS_ERR(ret))
  503. return -ERRNO(ret);
  504. return fd;
  505. }
  506. static int mcast_c (int port)
  507. {
  508. int ret = 0, fd;
  509. struct sockaddr_in addr;
  510. addr.sin_family = AF_INET;
  511. addr.sin_addr.s_addr = INADDR_ANY;
  512. addr.sin_port = htons(port);
  513. fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  514. if (IS_ERR(fd))
  515. return -ERRNO(fd);
  516. int reuse = 1;
  517. INLINE_SYSCALL(setsockopt, 5, fd, SOL_SOCKET, SO_REUSEADDR,
  518. &reuse, sizeof(reuse));
  519. ret = INLINE_SYSCALL(bind, 3, fd, &addr, sizeof(addr));
  520. if (IS_ERR(ret))
  521. return -ERRNO(ret);
  522. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  523. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  524. if (IS_ERR(ret))
  525. return -ERRNO(ret);
  526. inet_pton4(MCAST_GROUP, sizeof(MCAST_GROUP) - 1,
  527. &addr.sin_addr.s_addr);
  528. struct ip_mreq group;
  529. group.imr_multiaddr.s_addr = addr.sin_addr.s_addr;
  530. group.imr_interface.s_addr = INADDR_ANY;
  531. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
  532. &group, sizeof(group));
  533. if (IS_ERR(ret))
  534. return -ERRNO(ret);
  535. return fd;
  536. }
  537. static unsigned long randval = 0;
  538. void getrand (void * buffer, size_t size)
  539. {
  540. size_t bytes = 0;
  541. while (bytes + sizeof(uint64_t) <= size) {
  542. *(uint64_t*) (buffer + bytes) = randval;
  543. randval = hash64(randval);
  544. bytes += sizeof(uint64_t);
  545. }
  546. if (bytes < size) {
  547. memcpy(buffer + bytes, &randval, size - bytes);
  548. randval = hash64(randval);
  549. }
  550. }
  551. static void create_instance (struct pal_sec * pal_sec)
  552. {
  553. PAL_NUM id;
  554. getrand(&id, sizeof(id));
  555. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix), "/graphene/%016lx/", id);
  556. pal_sec->instance_id = id;
  557. }
  558. static int load_manifest (int fd, struct config_store ** config_ptr)
  559. {
  560. int ret = 0;
  561. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  562. if (IS_ERR(nbytes)) {
  563. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  564. return -ERRNO(nbytes);
  565. }
  566. struct config_store * config = malloc(sizeof(struct config_store));
  567. if (!config) {
  568. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  569. return -ENOMEM;
  570. }
  571. void * config_raw = (void *)
  572. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  573. if (IS_ERR_P(config_raw)) {
  574. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  575. ret = -ERRNO_P(config_raw);
  576. goto out;
  577. }
  578. config->raw_data = config_raw;
  579. config->raw_size = nbytes;
  580. config->malloc = malloc;
  581. config->free = NULL;
  582. const char * errstring = NULL;
  583. ret = read_config(config, NULL, &errstring);
  584. if (ret < 0) {
  585. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  586. goto out;
  587. }
  588. *config_ptr = config;
  589. ret = 0;
  590. out:
  591. if (ret < 0) {
  592. free(config);
  593. if (!IS_ERR_P(config_raw))
  594. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  595. }
  596. return ret;
  597. }
  598. /*
  599. * Returns the number of online CPUs read from /sys/devices/system/cpu/online, -errno on failure.
  600. * Understands complex formats like "1,3-5,6".
  601. */
  602. static int get_cpu_count(void) {
  603. int fd = INLINE_SYSCALL(open, 3, "/sys/devices/system/cpu/online", O_RDONLY|O_CLOEXEC, 0);
  604. if (fd < 0)
  605. return unix_to_pal_error(ERRNO(fd));
  606. char buf[64];
  607. int ret = INLINE_SYSCALL(read, 3, fd, buf, sizeof(buf) - 1);
  608. if (ret < 0) {
  609. INLINE_SYSCALL(close, 1, fd);
  610. return unix_to_pal_error(ERRNO(ret));
  611. }
  612. buf[ret] = '\0'; /* ensure null-terminated buf even in partial read */
  613. char* end;
  614. char* ptr = buf;
  615. int cpu_count = 0;
  616. while (*ptr) {
  617. while (*ptr == ' ' || *ptr == '\t' || *ptr == ',')
  618. ptr++;
  619. int firstint = (int)strtol(ptr, &end, 10);
  620. if (ptr == end)
  621. break;
  622. if (*end == '\0' || *end == ',') {
  623. /* single CPU index, count as one more CPU */
  624. cpu_count++;
  625. } else if (*end == '-') {
  626. /* CPU range, count how many CPUs in range */
  627. ptr = end + 1;
  628. int secondint = (int)strtol(ptr, &end, 10);
  629. if (secondint > firstint)
  630. cpu_count += secondint - firstint + 1; // inclusive (e.g., 0-7, or 8-16)
  631. }
  632. ptr = end;
  633. }
  634. INLINE_SYSCALL(close, 1, fd);
  635. if (cpu_count == 0)
  636. return -PAL_ERROR_STREAMNOTEXIST;
  637. return cpu_count;
  638. }
  639. static int load_enclave (struct pal_enclave * enclave,
  640. int manifest_fd,
  641. char * manifest_uri,
  642. char * exec_uri,
  643. char * args, size_t args_size,
  644. char * env, size_t env_size,
  645. bool exec_uri_inferred)
  646. {
  647. struct pal_sec * pal_sec = &enclave->pal_sec;
  648. int ret;
  649. struct timeval tv;
  650. #if PRINT_ENCLAVE_STAT == 1
  651. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  652. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  653. #endif
  654. ret = open_gsgx();
  655. if (ret < 0)
  656. return ret;
  657. if (!is_wrfsbase_supported())
  658. return -EPERM;
  659. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  660. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  661. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  662. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  663. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  664. int num_cpus = get_cpu_count();
  665. if (num_cpus < 0) {
  666. return num_cpus;
  667. }
  668. pal_sec->num_cpus = num_cpus;
  669. #ifdef DEBUG
  670. size_t env_i = 0;
  671. while (env_i < env_size) {
  672. if (!strcmp_static(&env[env_i], "IN_GDB=1")) {
  673. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  674. pal_sec->in_gdb = true;
  675. } else if (strstartswith_static(&env[env_i], "LD_PRELOAD=")) {
  676. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  677. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  678. env_size -= env_i_size;
  679. continue;
  680. }
  681. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  682. }
  683. #endif
  684. enclave->manifest = manifest_fd;
  685. ret = load_manifest(enclave->manifest, &enclave->config);
  686. if (ret < 0) {
  687. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  688. return -EINVAL;
  689. }
  690. char cfgbuf[CONFIG_MAX];
  691. const char * errstring;
  692. // A manifest can specify an executable with a different base name
  693. // than the manifest itself. Always give the exec field of the manifest
  694. // precedence if specified.
  695. if (get_config(enclave->config, "loader.exec", cfgbuf, sizeof(cfgbuf)) > 0) {
  696. exec_uri = resolve_uri(cfgbuf, &errstring);
  697. exec_uri_inferred = false;
  698. if (!exec_uri) {
  699. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  700. return -EINVAL;
  701. }
  702. }
  703. enclave->exec = INLINE_SYSCALL(open, 3, exec_uri + static_strlen("file:"),
  704. O_RDONLY|O_CLOEXEC, 0);
  705. if (IS_ERR(enclave->exec)) {
  706. if (exec_uri_inferred) {
  707. // It is valid for an enclave not to have an executable.
  708. // We need to catch the case where we inferred the executable
  709. // from the manifest file name, but it doesn't exist, and let
  710. // the enclave go a bit further. Go ahead and warn the user,
  711. // though.
  712. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  713. "or may represent a manifest misconfiguration. This typically "
  714. "represents advanced usage, and if it is not what you intended, "
  715. "try setting the loader.exec field in the manifest.\n", exec_uri);
  716. enclave->exec = -1;
  717. } else {
  718. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  719. return -EINVAL;
  720. }
  721. }
  722. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, sizeof(cfgbuf)) < 0) {
  723. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  724. return -EINVAL;
  725. }
  726. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  727. if (!sig_uri) {
  728. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  729. return -EINVAL;
  730. }
  731. if (!strendswith(sig_uri, ".sig")) {
  732. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  733. free(sig_uri);
  734. return -EINVAL;
  735. }
  736. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + static_strlen("file:"),
  737. O_RDONLY|O_CLOEXEC, 0);
  738. if (IS_ERR(enclave->sigfile)) {
  739. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  740. free(sig_uri);
  741. return -EINVAL;
  742. }
  743. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - static_strlen(".sig"), ".token", -1);
  744. free(sig_uri);
  745. if (!token_uri) {
  746. INLINE_SYSCALL(close, 1, enclave->sigfile);
  747. return -ENOMEM;
  748. }
  749. enclave->token = INLINE_SYSCALL(open, 3, token_uri + static_strlen("file:"),
  750. O_RDONLY|O_CLOEXEC, 0);
  751. if (IS_ERR(enclave->token)) {
  752. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  753. PAL_FILE("pal-sgx-get-token")
  754. "\' on the runtime host or run \'make SGX=1 sgx-tokens\' "
  755. "in the Graphene source to create the token file.\n",
  756. token_uri);
  757. free(token_uri);
  758. return -EINVAL;
  759. }
  760. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  761. free(token_uri);
  762. ret = initialize_enclave(enclave);
  763. if (ret < 0)
  764. return ret;
  765. if (!pal_sec->instance_id)
  766. create_instance(&enclave->pal_sec);
  767. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  768. if (enclave->exec == -1) {
  769. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  770. } else {
  771. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  772. }
  773. if (!pal_sec->mcast_port) {
  774. unsigned short mcast_port;
  775. getrand(&mcast_port, sizeof(unsigned short));
  776. pal_sec->mcast_port = mcast_port > 1024 ? mcast_port : mcast_port + 1024;
  777. }
  778. if ((ret = mcast_s(pal_sec->mcast_port)) >= 0) {
  779. pal_sec->mcast_srv = ret;
  780. if ((ret = mcast_c(pal_sec->mcast_port)) >= 0) {
  781. pal_sec->mcast_cli = ret;
  782. } else {
  783. INLINE_SYSCALL(close, 1, pal_sec->mcast_srv);
  784. pal_sec->mcast_srv = 0;
  785. }
  786. }
  787. ret = sgx_signal_setup();
  788. if (ret < 0)
  789. return ret;
  790. ret = init_aesm_targetinfo(&pal_sec->aesm_targetinfo);
  791. if (ret < 0)
  792. return ret;
  793. void* alt_stack = (void*)INLINE_SYSCALL(mmap, 6, NULL, ALT_STACK_SIZE,
  794. PROT_READ | PROT_WRITE,
  795. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  796. if (IS_ERR_P(alt_stack))
  797. return -ENOMEM;
  798. /* initialize TCB at the top of the alternative stack */
  799. PAL_TCB_LINUX* tcb = alt_stack + ALT_STACK_SIZE - sizeof(PAL_TCB_LINUX);
  800. tcb->common.self = &tcb->common;
  801. tcb->alt_stack = alt_stack;
  802. tcb->stack = NULL; /* main thread uses the stack provided by Linux */
  803. tcb->tcs = NULL; /* initialized by child thread */
  804. pal_thread_init(tcb);
  805. /* start running trusted PAL */
  806. ecall_enclave_start(args, args_size, env, env_size);
  807. #if PRINT_ENCLAVE_STAT == 1
  808. PAL_NUM exit_time = 0;
  809. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  810. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  811. #endif
  812. unmap_tcs();
  813. INLINE_SYSCALL(munmap, 2, alt_stack, ALT_STACK_SIZE);
  814. INLINE_SYSCALL(exit, 0);
  815. return 0;
  816. }
  817. /* Grow stack of main thread to THREAD_STACK_SIZE by allocating a large dummy array and probing
  818. * each stack page (Linux dynamically grows the stack of the main thread but gets confused with
  819. * huge-jump stack accesses coming from within the enclave). Note that other, non-main threads
  820. * are created manually via clone(.., THREAD_STACK_SIZE, ..) and thus do not need this hack. */
  821. static void __attribute__ ((noinline)) force_linux_to_grow_stack() {
  822. char dummy[THREAD_STACK_SIZE];
  823. for (uint64_t i = 0; i < sizeof(dummy); i += PRESET_PAGESIZE) {
  824. /* touch each page on the stack just to make it is not optimized away */
  825. __asm__ volatile("movq %0, %%rbx\r\n"
  826. "movq (%%rbx), %%rbx\r\n"
  827. : : "r"(&dummy[i]) : "%rbx");
  828. }
  829. }
  830. int main (int argc, char ** argv, char ** envp)
  831. {
  832. char * manifest_uri = NULL;
  833. char * exec_uri = NULL;
  834. const char * pal_loader = argv[0];
  835. int fd = -1;
  836. int ret = 0;
  837. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  838. // inferred from the manifest name somewhat
  839. // differently
  840. force_linux_to_grow_stack();
  841. argc--;
  842. argv++;
  843. int is_child = sgx_init_child_process(&pal_enclave.pal_sec);
  844. if (is_child < 0) {
  845. ret = is_child;
  846. goto out;
  847. }
  848. if (!is_child) {
  849. /* occupy PROC_INIT_FD so no one will use it */
  850. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  851. if (!argc)
  852. goto usage;
  853. if (!strcmp_static(argv[0], "file:")) {
  854. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  855. } else {
  856. exec_uri = alloc_concat("file:", -1, argv[0], -1);
  857. }
  858. } else {
  859. exec_uri = alloc_concat(pal_enclave.pal_sec.exec_name, -1, NULL, -1);
  860. }
  861. if (!exec_uri) {
  862. ret = -ENOMEM;
  863. goto out;
  864. }
  865. fd = INLINE_SYSCALL(open, 3, exec_uri + static_strlen("file:"), O_RDONLY|O_CLOEXEC, 0);
  866. if (IS_ERR(fd)) {
  867. SGX_DBG(DBG_E, "Input file not found: %s\n", exec_uri);
  868. ret = fd;
  869. goto usage;
  870. }
  871. char file_first_four_bytes[4];
  872. ret = INLINE_SYSCALL(read, 3, fd, file_first_four_bytes, sizeof(file_first_four_bytes));
  873. if (IS_ERR(ret)) {
  874. goto out;
  875. }
  876. if (ret != sizeof(file_first_four_bytes)) {
  877. ret = -EINVAL;
  878. goto out;
  879. }
  880. char manifest_base_name[URI_MAX];
  881. size_t manifest_base_name_len = sizeof(manifest_base_name);
  882. ret = get_base_name(exec_uri + static_strlen("file:"), manifest_base_name,
  883. &manifest_base_name_len);
  884. if (ret < 0) {
  885. goto out;
  886. }
  887. if (strendswith(manifest_base_name, ".manifest")) {
  888. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".sgx",
  889. sizeof(manifest_base_name) - manifest_base_name_len)) {
  890. ret = -E2BIG;
  891. goto out;
  892. }
  893. } else if (!strendswith(manifest_base_name, ".manifest.sgx")) {
  894. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".manifest.sgx",
  895. sizeof(manifest_base_name) - manifest_base_name_len)) {
  896. ret = -E2BIG;
  897. goto out;
  898. }
  899. }
  900. int manifest_fd = -1;
  901. if (memcmp(file_first_four_bytes, "\177ELF", sizeof(file_first_four_bytes))) {
  902. /* exec_uri doesn't refer to ELF executable, so it must refer to the
  903. * manifest. Verify this and update exec_uri with the manifest suffix
  904. * removed.
  905. */
  906. size_t exec_uri_len = strlen(exec_uri);
  907. if (strendswith(exec_uri, ".manifest")) {
  908. exec_uri[exec_uri_len - static_strlen(".manifest")] = '\0';
  909. } else if (strendswith(exec_uri, ".manifest.sgx")) {
  910. INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET);
  911. manifest_fd = fd;
  912. exec_uri[exec_uri_len - static_strlen(".manifest.sgx")] = '\0';
  913. } else {
  914. SGX_DBG(DBG_E, "Invalid manifest file specified: %s\n", exec_uri);
  915. goto usage;
  916. }
  917. exec_uri_inferred = true;
  918. }
  919. if (manifest_fd == -1) {
  920. INLINE_SYSCALL(close, 1, fd);
  921. fd = manifest_fd = INLINE_SYSCALL(open, 3, manifest_base_name, O_RDONLY|O_CLOEXEC, 0);
  922. if (IS_ERR(fd)) {
  923. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", manifest_base_name);
  924. goto usage;
  925. }
  926. }
  927. manifest_uri = alloc_concat("file:", static_strlen("file:"), manifest_base_name, -1);
  928. if (!manifest_uri) {
  929. ret = -ENOMEM;
  930. goto out;
  931. }
  932. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  933. if (exec_uri_inferred)
  934. SGX_DBG(DBG_I, "Inferred executable file: %s\n", exec_uri);
  935. else
  936. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  937. /*
  938. * While C does not guarantee that the argv[i] and envp[i] strings are
  939. * continuous we know that we are running on Linux, which does this. This
  940. * saves us creating a copy of all argv and envp strings.
  941. */
  942. char * args = argv[0];
  943. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  944. int envc = 0;
  945. while (envp[envc] != NULL) {
  946. envc++;
  947. }
  948. char * env = envp[0];
  949. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  950. ret = load_enclave(&pal_enclave, manifest_fd, manifest_uri, exec_uri, args, args_size, env, env_size,
  951. exec_uri_inferred);
  952. out:
  953. if (pal_enclave.exec >= 0)
  954. INLINE_SYSCALL(close, 1, pal_enclave.exec);
  955. if (pal_enclave.sigfile >= 0)
  956. INLINE_SYSCALL(close, 1, pal_enclave.sigfile);
  957. if (pal_enclave.token >= 0)
  958. INLINE_SYSCALL(close, 1, pal_enclave.token);
  959. if (!IS_ERR(fd))
  960. INLINE_SYSCALL(close, 1, fd);
  961. free(exec_uri);
  962. free(manifest_uri);
  963. return ret;
  964. usage:
  965. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  966. ret = -EINVAL;
  967. goto out;
  968. }