sgx_main.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. #include <pal_linux.h>
  2. #include <pal_linux_error.h>
  3. #include <pal_rtld.h>
  4. #include <hex.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <ctype.h>
  16. #include <sysdep.h>
  17. #include <sysdeps/generic/ldsodefs.h>
  18. size_t g_page_size = PRESET_PAGESIZE;
  19. struct pal_enclave pal_enclave;
  20. static inline
  21. char * alloc_concat(const char * p, size_t plen,
  22. const char * s, size_t slen)
  23. {
  24. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  25. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  26. char * buf = malloc(plen + slen + 1);
  27. if (!buf)
  28. return NULL;
  29. if (plen)
  30. memcpy(buf, p, plen);
  31. if (slen)
  32. memcpy(buf + plen, s, slen);
  33. buf[plen + slen] = '\0';
  34. return buf;
  35. }
  36. static unsigned long parse_int (const char * str)
  37. {
  38. unsigned long num = 0;
  39. int radix = 10;
  40. char c;
  41. if (str[0] == '0') {
  42. str++;
  43. radix = 8;
  44. if (str[0] == 'x') {
  45. str++;
  46. radix = 16;
  47. }
  48. }
  49. while ((c = *(str++))) {
  50. int8_t val = hex2dec(c);
  51. if (val < 0)
  52. break;
  53. if ((uint8_t) val >= radix)
  54. break;
  55. num = num * radix + (uint8_t) val;
  56. }
  57. if (c == 'G' || c == 'g')
  58. num *= 1024 * 1024 * 1024;
  59. else if (c == 'M' || c == 'm')
  60. num *= 1024 * 1024;
  61. else if (c == 'K' || c == 'k')
  62. num *= 1024;
  63. return num;
  64. }
  65. static char * resolve_uri (const char * uri, const char ** errstring)
  66. {
  67. if (!strstartswith_static(uri, "file:")) {
  68. *errstring = "Invalid URI";
  69. return NULL;
  70. }
  71. char path_buf[URI_MAX];
  72. size_t len = URI_MAX;
  73. int ret = get_norm_path(uri + 5, path_buf, &len);
  74. if (ret < 0) {
  75. *errstring = "Invalid URI";
  76. return NULL;
  77. }
  78. return alloc_concat("file:", static_strlen("file:"), path_buf, len);
  79. }
  80. static
  81. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  82. unsigned long * entry)
  83. {
  84. int ret = 0;
  85. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  86. return -ERRNO(ret);
  87. char filebuf[FILEBUF_SIZE];
  88. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  89. if (IS_ERR(ret))
  90. return -ERRNO(ret);
  91. if ((size_t)ret < sizeof(ElfW(Ehdr)))
  92. return -ENOEXEC;
  93. const ElfW(Ehdr) * header = (void *) filebuf;
  94. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  95. const ElfW(Phdr) * ph;
  96. if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0)
  97. return -ENOEXEC;
  98. struct loadcmd {
  99. ElfW(Addr) mapstart, mapend;
  100. } loadcmds[16], *c;
  101. int nloadcmds = 0;
  102. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  103. if (ph->p_type == PT_LOAD) {
  104. if (nloadcmds == 16)
  105. return -EINVAL;
  106. c = &loadcmds[nloadcmds++];
  107. c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
  108. c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_memsz);
  109. }
  110. *base = loadcmds[0].mapstart;
  111. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  112. if (entry)
  113. *entry = header->e_entry;
  114. return 0;
  115. }
  116. static
  117. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  118. unsigned long base, unsigned long prot)
  119. {
  120. int ret = 0;
  121. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  122. return -ERRNO(ret);
  123. char filebuf[FILEBUF_SIZE];
  124. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  125. if (IS_ERR(ret))
  126. return -ERRNO(ret);
  127. const ElfW(Ehdr) * header = (void *) filebuf;
  128. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  129. const ElfW(Phdr) * ph;
  130. struct loadcmd {
  131. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  132. unsigned int mapoff;
  133. int prot;
  134. } loadcmds[16], *c;
  135. int nloadcmds = 0;
  136. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  137. if (ph->p_type == PT_LOAD) {
  138. if (nloadcmds == 16)
  139. return -EINVAL;
  140. c = &loadcmds[nloadcmds++];
  141. c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
  142. c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
  143. c->datastart = ph->p_vaddr;
  144. c->dataend = ph->p_vaddr + ph->p_filesz;
  145. c->allocend = ph->p_vaddr + ph->p_memsz;
  146. c->mapoff = ALLOC_ALIGN_DOWN(ph->p_offset);
  147. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  148. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  149. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  150. }
  151. base -= loadcmds[0].mapstart;
  152. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  153. ElfW(Addr) zero = c->dataend;
  154. ElfW(Addr) zeroend = ALLOC_ALIGN_UP(c->allocend);
  155. ElfW(Addr) zeropage = ALLOC_ALIGN_UP(zero);
  156. if (zeroend < zeropage)
  157. zeropage = zeroend;
  158. if (c->mapend > c->mapstart) {
  159. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  160. c->mapend - c->mapstart,
  161. PROT_READ|PROT_WRITE,
  162. MAP_PRIVATE | MAP_FILE,
  163. fd, c->mapoff);
  164. if (IS_ERR_P(addr))
  165. return -ERRNO_P(addr);
  166. if (c->datastart > c->mapstart)
  167. memset(addr, 0, c->datastart - c->mapstart);
  168. if (zeropage > zero)
  169. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  170. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  171. c->mapend - c->mapstart,
  172. SGX_PAGE_REG, c->prot, 0,
  173. (c->prot & PROT_EXEC) ? "code" : "data");
  174. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  175. if (ret < 0)
  176. return ret;
  177. }
  178. if (zeroend > zeropage) {
  179. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  180. zeroend - zeropage,
  181. SGX_PAGE_REG, c->prot, false, "bss");
  182. if (ret < 0)
  183. return ret;
  184. }
  185. }
  186. return 0;
  187. }
  188. int initialize_enclave (struct pal_enclave * enclave)
  189. {
  190. int ret = 0;
  191. int enclave_image;
  192. sgx_arch_token_t enclave_token;
  193. sgx_arch_enclave_css_t enclave_sigstruct;
  194. sgx_arch_secs_t enclave_secs;
  195. unsigned long enclave_entry_addr;
  196. unsigned long heap_min = DEFAULT_HEAP_MIN;
  197. /* this array may overflow the stack, so we allocate it in BSS */
  198. static void* tcs_addrs[MAX_DBG_THREADS];
  199. enclave_image = INLINE_SYSCALL(open, 3, ENCLAVE_FILENAME, O_RDONLY, 0);
  200. if (IS_ERR(enclave_image)) {
  201. SGX_DBG(DBG_E, "Cannot find %s\n", ENCLAVE_FILENAME);
  202. ret = -ERRNO(enclave_image);
  203. goto out;
  204. }
  205. char cfgbuf[CONFIG_MAX];
  206. /* Reading sgx.enclave_size from manifest */
  207. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, sizeof(cfgbuf)) <= 0) {
  208. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  209. ret = -EINVAL;
  210. goto out;
  211. }
  212. enclave->size = parse_int(cfgbuf);
  213. if (!enclave->size || !IS_POWER_OF_2(enclave->size)) {
  214. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  215. ret = -EINVAL;
  216. goto out;
  217. }
  218. /* Reading sgx.thread_num from manifest */
  219. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, sizeof(cfgbuf)) > 0) {
  220. enclave->thread_num = parse_int(cfgbuf);
  221. if (enclave->thread_num > MAX_DBG_THREADS) {
  222. SGX_DBG(DBG_E, "Too many threads to debug\n");
  223. ret = -EINVAL;
  224. goto out;
  225. }
  226. } else {
  227. enclave->thread_num = 1;
  228. }
  229. if (get_config(enclave->config, "sgx.static_address", cfgbuf, sizeof(cfgbuf)) > 0 && cfgbuf[0] == '1') {
  230. enclave->baseaddr = ALIGN_DOWN_POW2(heap_min, enclave->size);
  231. } else {
  232. enclave->baseaddr = ENCLAVE_HIGH_ADDRESS;
  233. heap_min = 0;
  234. }
  235. ret = read_enclave_token(enclave->token, &enclave_token);
  236. if (ret < 0) {
  237. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  238. goto out;
  239. }
  240. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  241. if (ret < 0) {
  242. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  243. goto out;
  244. }
  245. memset(&enclave_secs, 0, sizeof(enclave_secs));
  246. enclave_secs.base = enclave->baseaddr;
  247. enclave_secs.size = enclave->size;
  248. ret = create_enclave(&enclave_secs, &enclave_token);
  249. if (ret < 0) {
  250. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  251. goto out;
  252. }
  253. enclave->ssaframesize = enclave_secs.ssa_frame_size * g_page_size;
  254. struct stat stat;
  255. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  256. if (IS_ERR(ret)) {
  257. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  258. ret = -ERRNO(ret);
  259. goto out;
  260. }
  261. int manifest_size = stat.st_size;
  262. /* Start populating enclave memory */
  263. struct mem_area {
  264. const char * desc;
  265. bool skip_eextend;
  266. int fd;
  267. bool is_binary; /* only meaningful if fd != -1 */
  268. unsigned long addr, size, prot;
  269. enum sgx_page_type type;
  270. };
  271. struct mem_area * areas =
  272. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  273. int area_num = 0;
  274. /* The manifest needs to be allocated at the upper end of the enclave
  275. * memory. That's used by pal_linux_main to find the manifest area. So add
  276. * it first to the list with memory areas. */
  277. areas[area_num] = (struct mem_area) {
  278. .desc = "manifest", .skip_eextend = false, .fd = enclave->manifest,
  279. .is_binary = false, .addr = 0, .size = ALLOC_ALIGN_UP(manifest_size),
  280. .prot = PROT_READ, .type = SGX_PAGE_REG
  281. };
  282. area_num++;
  283. areas[area_num] = (struct mem_area) {
  284. .desc = "ssa", .skip_eextend = false, .fd = -1,
  285. .is_binary = false, .addr = 0,
  286. .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  287. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  288. };
  289. struct mem_area* ssa_area = &areas[area_num++];
  290. areas[area_num] = (struct mem_area) {
  291. .desc = "tcs", .skip_eextend = false, .fd = -1,
  292. .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
  293. .prot = 0, .type = SGX_PAGE_TCS
  294. };
  295. struct mem_area* tcs_area = &areas[area_num++];
  296. areas[area_num] = (struct mem_area) {
  297. .desc = "tls", .skip_eextend = false, .fd = -1,
  298. .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
  299. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  300. };
  301. struct mem_area* tls_area = &areas[area_num++];
  302. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  303. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  304. areas[area_num] = (struct mem_area) {
  305. .desc = "stack", .skip_eextend = false, .fd = -1,
  306. .is_binary = false, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  307. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  308. };
  309. area_num++;
  310. }
  311. areas[area_num] = (struct mem_area) {
  312. .desc = "pal", .skip_eextend = false, .fd = enclave_image,
  313. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  314. .prot = 0, .type = SGX_PAGE_REG
  315. };
  316. struct mem_area* pal_area = &areas[area_num++];
  317. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  318. if (ret < 0) {
  319. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", ENCLAVE_FILENAME, -ret);
  320. goto out;
  321. }
  322. struct mem_area* exec_area = NULL;
  323. if (enclave->exec != -1) {
  324. areas[area_num] = (struct mem_area) {
  325. .desc = "exec", .skip_eextend = false, .fd = enclave->exec,
  326. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  327. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  328. };
  329. exec_area = &areas[area_num++];
  330. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  331. if (ret < 0) {
  332. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  333. goto out;
  334. }
  335. }
  336. unsigned long populating = enclave->size;
  337. for (int i = 0 ; i < area_num ; i++) {
  338. if (areas[i].addr)
  339. continue;
  340. areas[i].addr = populating - areas[i].size;
  341. populating = SATURATED_P_SUB(areas[i].addr, MEMORY_GAP, 0);
  342. }
  343. enclave_entry_addr += pal_area->addr;
  344. if (exec_area) {
  345. if (exec_area->addr + exec_area->size > pal_area->addr - MEMORY_GAP) {
  346. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  347. ret = -EINVAL;
  348. goto out;
  349. }
  350. if (exec_area->addr + exec_area->size + MEMORY_GAP < populating) {
  351. if (populating > heap_min) {
  352. unsigned long addr = exec_area->addr + exec_area->size + MEMORY_GAP;
  353. if (addr < heap_min)
  354. addr = heap_min;
  355. areas[area_num] = (struct mem_area) {
  356. .desc = "free", .skip_eextend = true, .fd = -1,
  357. .is_binary = false, .addr = addr, .size = populating - addr,
  358. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  359. };
  360. area_num++;
  361. }
  362. populating = SATURATED_P_SUB(exec_area->addr, MEMORY_GAP, 0);
  363. }
  364. }
  365. if (populating > heap_min) {
  366. areas[area_num] = (struct mem_area) {
  367. .desc = "free", .skip_eextend = true, .fd = -1,
  368. .is_binary = false, .addr = heap_min, .size = populating - heap_min,
  369. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  370. };
  371. area_num++;
  372. }
  373. for (int i = 0 ; i < area_num ; i++) {
  374. if (areas[i].fd != -1 && areas[i].is_binary) {
  375. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  376. if (ret < 0) {
  377. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  378. goto out;
  379. }
  380. continue;
  381. }
  382. void * data = NULL;
  383. if (!strcmp_static(areas[i].desc, "tls")) {
  384. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  385. PROT_READ|PROT_WRITE,
  386. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  387. if (IS_ERR_P(data) || data == NULL) {
  388. /* Note that Graphene currently doesn't handle 0x0 addresses */
  389. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  390. goto out;
  391. }
  392. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  393. struct enclave_tls * gs = data + g_page_size * t;
  394. memset(gs, 0, g_page_size);
  395. assert(sizeof(*gs) <= g_page_size);
  396. gs->common.self = (PAL_TCB *)(
  397. tls_area->addr + g_page_size * t + enclave_secs.base);
  398. gs->enclave_size = enclave->size;
  399. gs->tcs_offset = tcs_area->addr + g_page_size * t;
  400. gs->initial_stack_offset =
  401. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  402. gs->ssa = (void *) ssa_area->addr +
  403. enclave->ssaframesize * SSAFRAMENUM * t +
  404. enclave_secs.base;
  405. gs->gpr = gs->ssa +
  406. enclave->ssaframesize - sizeof(sgx_pal_gpr_t);
  407. gs->manifest_size = manifest_size;
  408. gs->heap_min = (void *) enclave_secs.base + heap_min;
  409. gs->heap_max = (void *) enclave_secs.base + pal_area->addr - MEMORY_GAP;
  410. if (exec_area) {
  411. gs->exec_addr = (void *) enclave_secs.base + exec_area->addr;
  412. gs->exec_size = exec_area->size;
  413. }
  414. gs->thread = NULL;
  415. }
  416. } else if (!strcmp_static(areas[i].desc, "tcs")) {
  417. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  418. PROT_READ|PROT_WRITE,
  419. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  420. if (IS_ERR_P(data) || data == NULL) {
  421. /* Note that Graphene currently doesn't handle 0x0 addresses */
  422. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  423. goto out;
  424. }
  425. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  426. sgx_arch_tcs_t * tcs = data + g_page_size * t;
  427. memset(tcs, 0, g_page_size);
  428. tcs->ossa = ssa_area->addr +
  429. enclave->ssaframesize * SSAFRAMENUM * t;
  430. tcs->nssa = SSAFRAMENUM;
  431. tcs->oentry = enclave_entry_addr;
  432. tcs->ofs_base = 0;
  433. tcs->ogs_base = tls_area->addr + t * g_page_size;
  434. tcs->ofs_limit = 0xfff;
  435. tcs->ogs_limit = 0xfff;
  436. tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + g_page_size * t;
  437. }
  438. } else if (areas[i].fd != -1) {
  439. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  440. PROT_READ,
  441. MAP_FILE|MAP_PRIVATE,
  442. areas[i].fd, 0);
  443. if (IS_ERR_P(data) || data == NULL) {
  444. /* Note that Graphene currently doesn't handle 0x0 addresses */
  445. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  446. goto out;
  447. }
  448. }
  449. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  450. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  451. if (data)
  452. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  453. if (ret < 0) {
  454. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  455. goto out;
  456. }
  457. }
  458. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  459. if (ret < 0) {
  460. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  461. goto out;
  462. }
  463. create_tcs_mapper((void *) enclave_secs.base + tcs_area->addr, enclave->thread_num);
  464. struct enclave_dbginfo * dbg = (void *)
  465. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  466. sizeof(struct enclave_dbginfo),
  467. PROT_READ|PROT_WRITE,
  468. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  469. -1, 0);
  470. if (IS_ERR_P(dbg)) {
  471. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  472. } else {
  473. dbg->pid = INLINE_SYSCALL(getpid, 0);
  474. dbg->base = enclave->baseaddr;
  475. dbg->size = enclave->size;
  476. dbg->ssaframesize = enclave->ssaframesize;
  477. dbg->aep = async_exit_pointer;
  478. dbg->thread_tids[0] = dbg->pid;
  479. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  480. dbg->tcs_addrs[i] = tcs_addrs[i];
  481. }
  482. ret = 0;
  483. out:
  484. if (enclave_image >= 0)
  485. INLINE_SYSCALL(close, 1, enclave_image);
  486. return ret;
  487. }
  488. static int mcast_s (int port)
  489. {
  490. struct sockaddr_in addr;
  491. int ret = 0;
  492. addr.sin_family = AF_INET;
  493. addr.sin_addr.s_addr = INADDR_ANY;
  494. addr.sin_port = htons(port);
  495. int fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  496. if (IS_ERR(fd))
  497. return -ERRNO(fd);
  498. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  499. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  500. if (IS_ERR(ret))
  501. return -ERRNO(ret);
  502. return fd;
  503. }
  504. static int mcast_c (int port)
  505. {
  506. int ret = 0, fd;
  507. struct sockaddr_in addr;
  508. addr.sin_family = AF_INET;
  509. addr.sin_addr.s_addr = INADDR_ANY;
  510. addr.sin_port = htons(port);
  511. fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  512. if (IS_ERR(fd))
  513. return -ERRNO(fd);
  514. int reuse = 1;
  515. INLINE_SYSCALL(setsockopt, 5, fd, SOL_SOCKET, SO_REUSEADDR,
  516. &reuse, sizeof(reuse));
  517. ret = INLINE_SYSCALL(bind, 3, fd, &addr, sizeof(addr));
  518. if (IS_ERR(ret))
  519. return -ERRNO(ret);
  520. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  521. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  522. if (IS_ERR(ret))
  523. return -ERRNO(ret);
  524. inet_pton4(MCAST_GROUP, sizeof(MCAST_GROUP) - 1,
  525. &addr.sin_addr.s_addr);
  526. struct ip_mreq group;
  527. group.imr_multiaddr.s_addr = addr.sin_addr.s_addr;
  528. group.imr_interface.s_addr = INADDR_ANY;
  529. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
  530. &group, sizeof(group));
  531. if (IS_ERR(ret))
  532. return -ERRNO(ret);
  533. return fd;
  534. }
  535. static unsigned long randval = 0;
  536. void getrand (void * buffer, size_t size)
  537. {
  538. size_t bytes = 0;
  539. while (bytes + sizeof(uint64_t) <= size) {
  540. *(uint64_t*) (buffer + bytes) = randval;
  541. randval = hash64(randval);
  542. bytes += sizeof(uint64_t);
  543. }
  544. if (bytes < size) {
  545. memcpy(buffer + bytes, &randval, size - bytes);
  546. randval = hash64(randval);
  547. }
  548. }
  549. static void create_instance (struct pal_sec * pal_sec)
  550. {
  551. PAL_NUM id;
  552. getrand(&id, sizeof(id));
  553. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix), "/graphene/%016lx/", id);
  554. pal_sec->instance_id = id;
  555. }
  556. static int load_manifest (int fd, struct config_store ** config_ptr)
  557. {
  558. int ret = 0;
  559. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  560. if (IS_ERR(nbytes)) {
  561. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  562. return -ERRNO(nbytes);
  563. }
  564. struct config_store * config = malloc(sizeof(struct config_store));
  565. if (!config) {
  566. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  567. return -ENOMEM;
  568. }
  569. void * config_raw = (void *)
  570. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  571. if (IS_ERR_P(config_raw)) {
  572. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  573. ret = -ERRNO_P(config_raw);
  574. goto out;
  575. }
  576. config->raw_data = config_raw;
  577. config->raw_size = nbytes;
  578. config->malloc = malloc;
  579. config->free = NULL;
  580. const char * errstring = NULL;
  581. ret = read_config(config, NULL, &errstring);
  582. if (ret < 0) {
  583. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  584. goto out;
  585. }
  586. *config_ptr = config;
  587. ret = 0;
  588. out:
  589. if (ret < 0) {
  590. free(config);
  591. if (!IS_ERR_P(config_raw))
  592. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  593. }
  594. return ret;
  595. }
  596. /*
  597. * Returns the number of online CPUs read from /sys/devices/system/cpu/online, -errno on failure.
  598. * Understands complex formats like "1,3-5,6".
  599. */
  600. static int get_cpu_count(void) {
  601. int fd = INLINE_SYSCALL(open, 3, "/sys/devices/system/cpu/online", O_RDONLY|O_CLOEXEC, 0);
  602. if (fd < 0)
  603. return unix_to_pal_error(ERRNO(fd));
  604. char buf[64];
  605. int ret = INLINE_SYSCALL(read, 3, fd, buf, sizeof(buf) - 1);
  606. if (ret < 0) {
  607. INLINE_SYSCALL(close, 1, fd);
  608. return unix_to_pal_error(ERRNO(ret));
  609. }
  610. buf[ret] = '\0'; /* ensure null-terminated buf even in partial read */
  611. char* end;
  612. char* ptr = buf;
  613. int cpu_count = 0;
  614. while (*ptr) {
  615. while (*ptr == ' ' || *ptr == '\t' || *ptr == ',')
  616. ptr++;
  617. int firstint = (int)strtol(ptr, &end, 10);
  618. if (ptr == end)
  619. break;
  620. if (*end == '\0' || *end == ',') {
  621. /* single CPU index, count as one more CPU */
  622. cpu_count++;
  623. } else if (*end == '-') {
  624. /* CPU range, count how many CPUs in range */
  625. ptr = end + 1;
  626. int secondint = (int)strtol(ptr, &end, 10);
  627. if (secondint > firstint)
  628. cpu_count += secondint - firstint + 1; // inclusive (e.g., 0-7, or 8-16)
  629. }
  630. ptr = end;
  631. }
  632. INLINE_SYSCALL(close, 1, fd);
  633. if (cpu_count == 0)
  634. return -PAL_ERROR_STREAMNOTEXIST;
  635. return cpu_count;
  636. }
  637. static int load_enclave (struct pal_enclave * enclave,
  638. int manifest_fd,
  639. char * manifest_uri,
  640. char * exec_uri,
  641. char * args, size_t args_size,
  642. char * env, size_t env_size,
  643. bool exec_uri_inferred)
  644. {
  645. struct pal_sec * pal_sec = &enclave->pal_sec;
  646. int ret;
  647. struct timeval tv;
  648. #if PRINT_ENCLAVE_STAT == 1
  649. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  650. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  651. #endif
  652. ret = open_gsgx();
  653. if (ret < 0)
  654. return ret;
  655. if (!is_wrfsbase_supported())
  656. return -EPERM;
  657. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  658. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  659. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  660. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  661. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  662. int num_cpus = get_cpu_count();
  663. if (num_cpus < 0) {
  664. return num_cpus;
  665. }
  666. pal_sec->num_cpus = num_cpus;
  667. #ifdef DEBUG
  668. size_t env_i = 0;
  669. while (env_i < env_size) {
  670. if (!strcmp_static(&env[env_i], "IN_GDB=1")) {
  671. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  672. pal_sec->in_gdb = true;
  673. } else if (strstartswith_static(&env[env_i], "LD_PRELOAD=")) {
  674. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  675. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  676. env_size -= env_i_size;
  677. continue;
  678. }
  679. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  680. }
  681. #endif
  682. enclave->manifest = manifest_fd;
  683. ret = load_manifest(enclave->manifest, &enclave->config);
  684. if (ret < 0) {
  685. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  686. return -EINVAL;
  687. }
  688. char cfgbuf[CONFIG_MAX];
  689. const char * errstring;
  690. // A manifest can specify an executable with a different base name
  691. // than the manifest itself. Always give the exec field of the manifest
  692. // precedence if specified.
  693. if (get_config(enclave->config, "loader.exec", cfgbuf, sizeof(cfgbuf)) > 0) {
  694. exec_uri = resolve_uri(cfgbuf, &errstring);
  695. exec_uri_inferred = false;
  696. if (!exec_uri) {
  697. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  698. return -EINVAL;
  699. }
  700. }
  701. enclave->exec = INLINE_SYSCALL(open, 3, exec_uri + static_strlen("file:"),
  702. O_RDONLY|O_CLOEXEC, 0);
  703. if (IS_ERR(enclave->exec)) {
  704. if (exec_uri_inferred) {
  705. // It is valid for an enclave not to have an executable.
  706. // We need to catch the case where we inferred the executable
  707. // from the manifest file name, but it doesn't exist, and let
  708. // the enclave go a bit further. Go ahead and warn the user,
  709. // though.
  710. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  711. "or may represent a manifest misconfiguration. This typically "
  712. "represents advanced usage, and if it is not what you intended, "
  713. "try setting the loader.exec field in the manifest.\n", exec_uri);
  714. enclave->exec = -1;
  715. } else {
  716. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  717. return -EINVAL;
  718. }
  719. }
  720. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, sizeof(cfgbuf)) < 0) {
  721. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  722. return -EINVAL;
  723. }
  724. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  725. if (!sig_uri) {
  726. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  727. return -EINVAL;
  728. }
  729. if (!strendswith(sig_uri, ".sig")) {
  730. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  731. free(sig_uri);
  732. return -EINVAL;
  733. }
  734. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + static_strlen("file:"),
  735. O_RDONLY|O_CLOEXEC, 0);
  736. if (IS_ERR(enclave->sigfile)) {
  737. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  738. free(sig_uri);
  739. return -EINVAL;
  740. }
  741. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - static_strlen(".sig"), ".token", -1);
  742. free(sig_uri);
  743. if (!token_uri) {
  744. INLINE_SYSCALL(close, 1, enclave->sigfile);
  745. return -ENOMEM;
  746. }
  747. enclave->token = INLINE_SYSCALL(open, 3, token_uri + static_strlen("file:"),
  748. O_RDONLY|O_CLOEXEC, 0);
  749. if (IS_ERR(enclave->token)) {
  750. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  751. PAL_FILE("pal-sgx-get-token")
  752. "\' on the runtime host or run \'make SGX=1 sgx-tokens\' "
  753. "in the Graphene source to create the token file.\n",
  754. token_uri);
  755. free(token_uri);
  756. return -EINVAL;
  757. }
  758. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  759. free(token_uri);
  760. ret = initialize_enclave(enclave);
  761. if (ret < 0)
  762. return ret;
  763. if (!pal_sec->instance_id)
  764. create_instance(&enclave->pal_sec);
  765. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  766. if (enclave->exec == -1) {
  767. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  768. } else {
  769. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  770. }
  771. if (!pal_sec->mcast_port) {
  772. unsigned short mcast_port;
  773. getrand(&mcast_port, sizeof(unsigned short));
  774. pal_sec->mcast_port = mcast_port > 1024 ? mcast_port : mcast_port + 1024;
  775. }
  776. if ((ret = mcast_s(pal_sec->mcast_port)) >= 0) {
  777. pal_sec->mcast_srv = ret;
  778. if ((ret = mcast_c(pal_sec->mcast_port)) >= 0) {
  779. pal_sec->mcast_cli = ret;
  780. } else {
  781. INLINE_SYSCALL(close, 1, pal_sec->mcast_srv);
  782. pal_sec->mcast_srv = 0;
  783. }
  784. }
  785. ret = sgx_signal_setup();
  786. if (ret < 0)
  787. return ret;
  788. ret = init_aesm_targetinfo(&pal_sec->aesm_targetinfo);
  789. if (ret < 0)
  790. return ret;
  791. void* alt_stack = (void*)INLINE_SYSCALL(mmap, 6, NULL, ALT_STACK_SIZE,
  792. PROT_READ | PROT_WRITE,
  793. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  794. if (IS_ERR_P(alt_stack))
  795. return -ENOMEM;
  796. /* initialize TCB at the top of the alternative stack */
  797. PAL_TCB_LINUX* tcb = alt_stack + ALT_STACK_SIZE - sizeof(PAL_TCB_LINUX);
  798. tcb->common.self = &tcb->common;
  799. tcb->alt_stack = alt_stack;
  800. tcb->stack = NULL; /* main thread uses the stack provided by Linux */
  801. tcb->tcs = NULL; /* initialized by child thread */
  802. pal_thread_init(tcb);
  803. /* start running trusted PAL */
  804. ecall_enclave_start(args, args_size, env, env_size);
  805. #if PRINT_ENCLAVE_STAT == 1
  806. PAL_NUM exit_time = 0;
  807. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  808. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  809. #endif
  810. unmap_tcs();
  811. INLINE_SYSCALL(munmap, 2, alt_stack, ALT_STACK_SIZE);
  812. INLINE_SYSCALL(exit, 0);
  813. return 0;
  814. }
  815. /* Grow stack of main thread to THREAD_STACK_SIZE by allocating a large dummy array and probing
  816. * each stack page (Linux dynamically grows the stack of the main thread but gets confused with
  817. * huge-jump stack accesses coming from within the enclave). Note that other, non-main threads
  818. * are created manually via clone(.., THREAD_STACK_SIZE, ..) and thus do not need this hack. */
  819. static void __attribute__ ((noinline)) force_linux_to_grow_stack() {
  820. char dummy[THREAD_STACK_SIZE];
  821. for (uint64_t i = 0; i < sizeof(dummy); i += PRESET_PAGESIZE) {
  822. /* touch each page on the stack just to make it is not optimized away */
  823. __asm__ volatile("movq %0, %%rbx\r\n"
  824. "movq (%%rbx), %%rbx\r\n"
  825. : : "r"(&dummy[i]) : "%rbx");
  826. }
  827. }
  828. int main (int argc, char ** argv, char ** envp)
  829. {
  830. char * manifest_uri = NULL;
  831. char * exec_uri = NULL;
  832. const char * pal_loader = argv[0];
  833. int fd = -1;
  834. int ret = 0;
  835. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  836. // inferred from the manifest name somewhat
  837. // differently
  838. force_linux_to_grow_stack();
  839. argc--;
  840. argv++;
  841. int is_child = sgx_init_child_process(&pal_enclave.pal_sec);
  842. if (is_child < 0) {
  843. ret = is_child;
  844. goto out;
  845. }
  846. if (!is_child) {
  847. /* occupy PROC_INIT_FD so no one will use it */
  848. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  849. if (!argc)
  850. goto usage;
  851. if (!strcmp_static(argv[0], "file:")) {
  852. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  853. } else {
  854. exec_uri = alloc_concat("file:", -1, argv[0], -1);
  855. }
  856. } else {
  857. exec_uri = alloc_concat(pal_enclave.pal_sec.exec_name, -1, NULL, -1);
  858. }
  859. if (!exec_uri) {
  860. ret = -ENOMEM;
  861. goto out;
  862. }
  863. fd = INLINE_SYSCALL(open, 3, exec_uri + static_strlen("file:"), O_RDONLY|O_CLOEXEC, 0);
  864. if (IS_ERR(fd)) {
  865. SGX_DBG(DBG_E, "Input file not found: %s\n", exec_uri);
  866. ret = fd;
  867. goto usage;
  868. }
  869. char file_first_four_bytes[4];
  870. ret = INLINE_SYSCALL(read, 3, fd, file_first_four_bytes, sizeof(file_first_four_bytes));
  871. if (IS_ERR(ret)) {
  872. goto out;
  873. }
  874. if (ret != sizeof(file_first_four_bytes)) {
  875. ret = -EINVAL;
  876. goto out;
  877. }
  878. char manifest_base_name[URI_MAX];
  879. size_t manifest_base_name_len = sizeof(manifest_base_name);
  880. ret = get_base_name(exec_uri + static_strlen("file:"), manifest_base_name,
  881. &manifest_base_name_len);
  882. if (ret < 0) {
  883. goto out;
  884. }
  885. if (strendswith(manifest_base_name, ".manifest")) {
  886. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".sgx",
  887. sizeof(manifest_base_name) - manifest_base_name_len)) {
  888. ret = -E2BIG;
  889. goto out;
  890. }
  891. } else if (!strendswith(manifest_base_name, ".manifest.sgx")) {
  892. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".manifest.sgx",
  893. sizeof(manifest_base_name) - manifest_base_name_len)) {
  894. ret = -E2BIG;
  895. goto out;
  896. }
  897. }
  898. int manifest_fd = -1;
  899. if (memcmp(file_first_four_bytes, "\177ELF", sizeof(file_first_four_bytes))) {
  900. /* exec_uri doesn't refer to ELF executable, so it must refer to the
  901. * manifest. Verify this and update exec_uri with the manifest suffix
  902. * removed.
  903. */
  904. size_t exec_uri_len = strlen(exec_uri);
  905. if (strendswith(exec_uri, ".manifest")) {
  906. exec_uri[exec_uri_len - static_strlen(".manifest")] = '\0';
  907. } else if (strendswith(exec_uri, ".manifest.sgx")) {
  908. INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET);
  909. manifest_fd = fd;
  910. exec_uri[exec_uri_len - static_strlen(".manifest.sgx")] = '\0';
  911. } else {
  912. SGX_DBG(DBG_E, "Invalid manifest file specified: %s\n", exec_uri);
  913. goto usage;
  914. }
  915. exec_uri_inferred = true;
  916. }
  917. if (manifest_fd == -1) {
  918. INLINE_SYSCALL(close, 1, fd);
  919. fd = manifest_fd = INLINE_SYSCALL(open, 3, manifest_base_name, O_RDONLY|O_CLOEXEC, 0);
  920. if (IS_ERR(fd)) {
  921. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", manifest_base_name);
  922. goto usage;
  923. }
  924. }
  925. manifest_uri = alloc_concat("file:", static_strlen("file:"), manifest_base_name, -1);
  926. if (!manifest_uri) {
  927. ret = -ENOMEM;
  928. goto out;
  929. }
  930. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  931. if (exec_uri_inferred)
  932. SGX_DBG(DBG_I, "Inferred executable file: %s\n", exec_uri);
  933. else
  934. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  935. /*
  936. * While C does not guarantee that the argv[i] and envp[i] strings are
  937. * continuous we know that we are running on Linux, which does this. This
  938. * saves us creating a copy of all argv and envp strings.
  939. */
  940. char * args = argv[0];
  941. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  942. int envc = 0;
  943. while (envp[envc] != NULL) {
  944. envc++;
  945. }
  946. char * env = envp[0];
  947. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  948. ret = load_enclave(&pal_enclave, manifest_fd, manifest_uri, exec_uri, args, args_size, env, env_size,
  949. exec_uri_inferred);
  950. out:
  951. if (pal_enclave.exec >= 0)
  952. INLINE_SYSCALL(close, 1, pal_enclave.exec);
  953. if (pal_enclave.sigfile >= 0)
  954. INLINE_SYSCALL(close, 1, pal_enclave.sigfile);
  955. if (pal_enclave.token >= 0)
  956. INLINE_SYSCALL(close, 1, pal_enclave.token);
  957. if (!IS_ERR(fd))
  958. INLINE_SYSCALL(close, 1, fd);
  959. free(exec_uri);
  960. free(manifest_uri);
  961. return ret;
  962. usage:
  963. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  964. ret = -EINVAL;
  965. goto out;
  966. }