sgx_main.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. #include <pal_linux.h>
  2. #include <pal_rtld.h>
  3. #include "sgx_internal.h"
  4. #include "sgx_tls.h"
  5. #include "sgx_enclave.h"
  6. #include "debugger/sgx_gdb.h"
  7. #include <asm/fcntl.h>
  8. #include <asm/socket.h>
  9. #include <linux/fs.h>
  10. #include <linux/in.h>
  11. #include <linux/in6.h>
  12. #include <asm/errno.h>
  13. #include <sysdep.h>
  14. #include <sysdeps/generic/ldsodefs.h>
  15. unsigned long pagesize = PRESET_PAGESIZE;
  16. unsigned long pagemask = ~(PRESET_PAGESIZE - 1);
  17. unsigned long pageshift = PRESET_PAGESIZE - 1;
  18. static inline
  19. char * alloc_concat(const char * p, size_t plen,
  20. const char * s, size_t slen)
  21. {
  22. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  23. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  24. char * buf = malloc(plen + slen + 1);
  25. if (plen)
  26. memcpy(buf, p, plen);
  27. if (slen)
  28. memcpy(buf + plen, s, slen);
  29. buf[plen + slen] = '\0';
  30. return buf;
  31. }
  32. static unsigned long parse_int (const char * str)
  33. {
  34. unsigned long num = 0;
  35. int radix = 10;
  36. char c;
  37. if (str[0] == '0') {
  38. str++;
  39. radix = 8;
  40. if (str[0] == 'x') {
  41. str++;
  42. radix = 16;
  43. }
  44. }
  45. while ((c = *(str++))) {
  46. int val;
  47. if (c >= 'A' && c <= 'F')
  48. val = c - 'A' + 10;
  49. else if (c >= 'a' && c <= 'f')
  50. val = c - 'a' + 10;
  51. else if (c >= '0' && c <= '9')
  52. val = c - '0';
  53. else
  54. break;
  55. if (val >= radix)
  56. break;
  57. num = num * radix + val;
  58. }
  59. if (c == 'G' || c == 'g')
  60. num *= 1024 * 1024 * 1024;
  61. else if (c == 'M' || c == 'm')
  62. num *= 1024 * 1024;
  63. else if (c == 'K' || c == 'k')
  64. num *= 1024;
  65. return num;
  66. }
  67. static char * resolve_uri (const char * uri, const char ** errstring)
  68. {
  69. if (!strpartcmp_static(uri, "file:")) {
  70. *errstring = "Invalid URI";
  71. return NULL;
  72. }
  73. char path_buf[URI_MAX];
  74. int len = get_norm_path(uri + 5, path_buf, 0, URI_MAX);
  75. if (len < 0) {
  76. *errstring = "Invalid URI";
  77. return NULL;
  78. }
  79. return alloc_concat("file:", static_strlen("file:"), path_buf, len);
  80. }
  81. static
  82. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  83. unsigned long * entry)
  84. {
  85. int ret = 0;
  86. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  87. return -ERRNO(ret);
  88. char filebuf[FILEBUF_SIZE];
  89. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  90. if (IS_ERR(ret))
  91. return -ERRNO(ret);
  92. const ElfW(Ehdr) * header = (void *) filebuf;
  93. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  94. const ElfW(Phdr) * ph;
  95. struct loadcmd {
  96. ElfW(Addr) mapstart, mapend;
  97. } loadcmds[16], *c;
  98. int nloadcmds = 0;
  99. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  100. if (ph->p_type == PT_LOAD) {
  101. if (nloadcmds == 16)
  102. return -EINVAL;
  103. c = &loadcmds[nloadcmds++];
  104. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  105. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz);
  106. }
  107. *base = loadcmds[0].mapstart;
  108. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  109. if (entry)
  110. *entry = header->e_entry;
  111. return 0;
  112. }
  113. static
  114. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  115. unsigned long base, unsigned long prot)
  116. {
  117. int ret = 0;
  118. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  119. return -ERRNO(ret);
  120. char filebuf[FILEBUF_SIZE];
  121. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  122. if (IS_ERR(ret))
  123. return -ERRNO(ret);
  124. const ElfW(Ehdr) * header = (void *) filebuf;
  125. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  126. const ElfW(Phdr) * ph;
  127. struct loadcmd {
  128. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  129. unsigned int mapoff;
  130. int prot;
  131. } loadcmds[16], *c;
  132. int nloadcmds = 0;
  133. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  134. if (ph->p_type == PT_LOAD) {
  135. if (nloadcmds == 16)
  136. return -EINVAL;
  137. c = &loadcmds[nloadcmds++];
  138. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  139. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
  140. c->datastart = ph->p_vaddr;
  141. c->dataend = ph->p_vaddr + ph->p_filesz;
  142. c->allocend = ph->p_vaddr + ph->p_memsz;
  143. c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
  144. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  145. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  146. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  147. }
  148. base -= loadcmds[0].mapstart;
  149. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  150. ElfW(Addr) zero = c->dataend;
  151. ElfW(Addr) zeroend = ALLOC_ALIGNUP(c->allocend);
  152. ElfW(Addr) zeropage = ALLOC_ALIGNUP(zero);
  153. if (zeroend < zeropage)
  154. zeropage = zeroend;
  155. if (c->mapend > c->mapstart) {
  156. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  157. c->mapend - c->mapstart,
  158. PROT_READ|PROT_WRITE,
  159. MAP_PRIVATE | MAP_FILE,
  160. fd, c->mapoff);
  161. if (IS_ERR_P(addr))
  162. return -ERRNO_P(addr);
  163. if (c->datastart > c->mapstart)
  164. memset(addr, 0, c->datastart - c->mapstart);
  165. if (zeropage > zero)
  166. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  167. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  168. c->mapend - c->mapstart,
  169. SGX_PAGE_REG, c->prot, 0,
  170. (c->prot & PROT_EXEC) ? "code" : "data");
  171. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  172. if (ret < 0)
  173. return ret;
  174. }
  175. if (zeroend > zeropage) {
  176. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  177. zeroend - zeropage,
  178. SGX_PAGE_REG, c->prot, false, "bss");
  179. if (ret < 0)
  180. return ret;
  181. }
  182. }
  183. return 0;
  184. }
  185. int initialize_enclave (struct pal_enclave * enclave)
  186. {
  187. int ret = 0;
  188. int enclave_image = -1;
  189. int enclave_thread_num = 1;
  190. sgx_arch_token_t enclave_token;
  191. sgx_arch_sigstruct_t enclave_sigstruct;
  192. sgx_arch_secs_t enclave_secs;
  193. unsigned long enclave_entry_addr;
  194. void * tcs_addrs[MAX_DBG_THREADS];
  195. unsigned long heap_min = DEFAULT_HEAP_MIN;
  196. enclave_image = INLINE_SYSCALL(open, 3, ENCLAVE_FILENAME, O_RDONLY, 0);
  197. if (IS_ERR(enclave_image)) {
  198. SGX_DBG(DBG_E, "Cannot find %s\n", ENCLAVE_FILENAME);
  199. ret = -ERRNO(enclave_image);
  200. goto out;
  201. }
  202. char cfgbuf[CONFIG_MAX];
  203. /* Reading sgx.enclave_size from manifest */
  204. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, CONFIG_MAX) <= 0) {
  205. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  206. ret = -EINVAL;
  207. goto out;
  208. }
  209. enclave->size = parse_int(cfgbuf);
  210. if (enclave->size & (enclave->size - 1)) {
  211. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  212. ret = -EINVAL;
  213. goto out;
  214. }
  215. /* Reading sgx.thread_num from manifest */
  216. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, CONFIG_MAX) > 0)
  217. enclave->thread_num = parse_int(cfgbuf);
  218. if (enclave_thread_num > MAX_DBG_THREADS) {
  219. SGX_DBG(DBG_E, "Too many threads to debug\n");
  220. ret = -EINVAL;
  221. goto out;
  222. }
  223. /* Reading sgx.static_address from manifest */
  224. if (get_config(enclave->config, "sgx.static_address", cfgbuf, CONFIG_MAX) > 0 && cfgbuf[0] == '1')
  225. enclave->baseaddr = heap_min;
  226. else
  227. enclave->baseaddr = heap_min = 0;
  228. ret = read_enclave_token(enclave->token, &enclave_token);
  229. if (ret < 0) {
  230. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  231. goto out;
  232. }
  233. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  234. if (ret < 0) {
  235. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  236. goto out;
  237. }
  238. ret = create_enclave(&enclave_secs, enclave->baseaddr, enclave->size, &enclave_token);
  239. if (ret < 0) {
  240. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  241. goto out;
  242. }
  243. enclave->baseaddr = enclave_secs.baseaddr;
  244. enclave->size = enclave_secs.size;
  245. enclave->ssaframesize = enclave_secs.ssaframesize * pagesize;
  246. struct stat stat;
  247. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  248. if (IS_ERR(ret)) {
  249. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  250. ret = -ERRNO(ret);
  251. goto out;
  252. }
  253. int manifest_size = stat.st_size;
  254. /* Start populating enclave memory */
  255. struct mem_area {
  256. const char * desc;
  257. bool skip_eextend;
  258. bool is_binary;
  259. int fd;
  260. unsigned long addr, size, prot;
  261. enum sgx_page_type type;
  262. };
  263. struct mem_area * areas =
  264. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  265. int area_num = 0;
  266. /* The manifest needs to be allocated at the upper end of the enclave
  267. * memory. That's used by pal_linux_main to find the manifest area. So add
  268. * it first to the list with memory areas. */
  269. areas[area_num] = (struct mem_area) {
  270. .desc = "manifest", .skip_eextend = false, .is_binary = false,
  271. .fd = enclave->manifest, .addr = 0, .size = ALLOC_ALIGNUP(manifest_size),
  272. .prot = PROT_READ, .type = SGX_PAGE_REG
  273. };
  274. area_num++;
  275. areas[area_num] = (struct mem_area) {
  276. .desc = "ssa", .skip_eextend = false, .is_binary = false,
  277. .fd = -1, .addr = 0, .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  278. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  279. };
  280. struct mem_area* ssa_area = &areas[area_num++];
  281. areas[area_num] = (struct mem_area) {
  282. .desc = "tcs", .skip_eextend = false, .is_binary = false,
  283. .fd = -1, .addr = 0, .size = enclave->thread_num * pagesize,
  284. .prot = 0, .type = SGX_PAGE_TCS
  285. };
  286. struct mem_area* tcs_area = &areas[area_num++];
  287. areas[area_num] = (struct mem_area) {
  288. .desc = "tls", .skip_eextend = false, .is_binary = false,
  289. .fd = -1, .addr = 0, .size = enclave->thread_num * pagesize,
  290. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  291. };
  292. struct mem_area* tls_area = &areas[area_num++];
  293. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  294. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  295. areas[area_num] = (struct mem_area) {
  296. .desc = "stack", .skip_eextend = false, .is_binary = false,
  297. .fd = -1, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  298. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  299. };
  300. area_num++;
  301. }
  302. areas[area_num] = (struct mem_area) {
  303. .desc = "pal", .skip_eextend = false, .is_binary = true,
  304. .fd = enclave_image, .addr = 0, .size = 0 /* set below */,
  305. .prot = 0, .type = SGX_PAGE_REG
  306. };
  307. struct mem_area* pal_area = &areas[area_num++];
  308. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  309. if (ret < 0) {
  310. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", ENCLAVE_FILENAME, -ret);
  311. goto out;
  312. }
  313. struct mem_area* exec_area = NULL;
  314. if (enclave->exec != -1) {
  315. areas[area_num] = (struct mem_area) {
  316. .desc = "exec", .skip_eextend = false, .is_binary = true,
  317. .fd = enclave->exec, .addr = 0, .size = 0 /* set below */,
  318. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  319. };
  320. exec_area = &areas[area_num++];
  321. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  322. if (ret < 0) {
  323. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  324. goto out;
  325. }
  326. }
  327. unsigned long populating = enclave->size;
  328. for (int i = 0 ; i < area_num ; i++) {
  329. if (areas[i].addr)
  330. continue;
  331. areas[i].addr = populating - areas[i].size;
  332. if (&areas[i] == exec_area)
  333. populating = areas[i].addr;
  334. else
  335. populating = areas[i].addr - MEMORY_GAP;
  336. }
  337. enclave_entry_addr += pal_area->addr;
  338. if (exec_area) {
  339. if (exec_area->addr + exec_area->size > pal_area->addr) {
  340. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  341. ret = -EINVAL;
  342. goto out;
  343. }
  344. if (exec_area->addr + exec_area->size < populating) {
  345. if (populating > heap_min) {
  346. unsigned long addr = exec_area->addr + exec_area->size;
  347. if (addr < heap_min)
  348. addr = heap_min;
  349. areas[area_num] = (struct mem_area) {
  350. .desc = "free", .skip_eextend = true, .is_binary = false,
  351. .fd = -1, .addr = addr, .size = populating - addr,
  352. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  353. };
  354. area_num++;
  355. }
  356. populating = exec_area->addr;
  357. }
  358. }
  359. if (populating > heap_min) {
  360. areas[area_num] = (struct mem_area) {
  361. .desc = "free", .skip_eextend = true, .is_binary = false,
  362. .fd = -1, .addr = heap_min, .size = populating - heap_min,
  363. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  364. };
  365. area_num++;
  366. }
  367. for (int i = 0 ; i < area_num ; i++) {
  368. if (areas[i].fd != -1 && areas[i].is_binary) {
  369. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  370. if (ret < 0) {
  371. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  372. goto out;
  373. }
  374. continue;
  375. }
  376. void * data = NULL;
  377. if (strcmp_static(areas[i].desc, "tls")) {
  378. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  379. PROT_READ|PROT_WRITE,
  380. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  381. if (data == (void *)-1 || data == NULL) {
  382. /* Note that Graphene currently doesn't handle 0x0 addresses */
  383. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  384. goto out;
  385. }
  386. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  387. struct enclave_tls * gs = data + pagesize * t;
  388. memset(gs, 0, pagesize);
  389. assert(sizeof(*gs) <= pagesize);
  390. gs->common.self = (PAL_TCB *)(
  391. tls_area->addr + pagesize * t + enclave_secs.baseaddr);
  392. gs->enclave_size = enclave->size;
  393. gs->tcs_offset = tcs_area->addr + pagesize * t;
  394. gs->initial_stack_offset =
  395. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  396. gs->ssa = (void *) ssa_area->addr +
  397. enclave->ssaframesize * SSAFRAMENUM * t +
  398. enclave_secs.baseaddr;
  399. gs->gpr = gs->ssa +
  400. enclave->ssaframesize - sizeof(sgx_arch_gpr_t);
  401. gs->manifest_size = manifest_size;
  402. gs->heap_min = (void *) enclave_secs.baseaddr + heap_min;
  403. gs->heap_max = (void *) enclave_secs.baseaddr + pal_area->addr - MEMORY_GAP;
  404. if (exec_area) {
  405. gs->exec_addr = (void *) enclave_secs.baseaddr + exec_area->addr;
  406. gs->exec_size = exec_area->size;
  407. }
  408. }
  409. } else if (strcmp_static(areas[i].desc, "tcs")) {
  410. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  411. PROT_READ|PROT_WRITE,
  412. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  413. if (data == (void *)-1 || data == NULL) {
  414. /* Note that Graphene currently doesn't handle 0x0 addresses */
  415. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  416. goto out;
  417. }
  418. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  419. sgx_arch_tcs_t * tcs = data + pagesize * t;
  420. memset(tcs, 0, pagesize);
  421. tcs->ossa = ssa_area->addr +
  422. enclave->ssaframesize * SSAFRAMENUM * t;
  423. tcs->nssa = SSAFRAMENUM;
  424. tcs->oentry = enclave_entry_addr;
  425. tcs->ofsbasgx = 0;
  426. tcs->ogsbasgx = tls_area->addr + t * pagesize;
  427. tcs->fslimit = 0xfff;
  428. tcs->gslimit = 0xfff;
  429. tcs_addrs[t] = (void *) enclave_secs.baseaddr + tcs_area->addr
  430. + pagesize * t;
  431. }
  432. } else if (areas[i].fd != -1) {
  433. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  434. PROT_READ,
  435. MAP_FILE|MAP_PRIVATE,
  436. areas[i].fd, 0);
  437. if (data == (void *)-1 || data == NULL) {
  438. /* Note that Graphene currently doesn't handle 0x0 addresses */
  439. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  440. goto out;
  441. }
  442. }
  443. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  444. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  445. if (data)
  446. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  447. if (ret < 0) {
  448. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  449. goto out;
  450. }
  451. }
  452. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  453. if (ret < 0) {
  454. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  455. goto out;
  456. }
  457. create_tcs_mapper((void *) enclave_secs.baseaddr + tcs_area->addr,
  458. enclave->thread_num);
  459. struct enclave_dbginfo * dbg = (void *)
  460. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  461. sizeof(struct enclave_dbginfo),
  462. PROT_READ|PROT_WRITE,
  463. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  464. -1, 0);
  465. if (IS_ERR_P(dbg)) {
  466. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  467. } else {
  468. dbg->pid = INLINE_SYSCALL(getpid, 0);
  469. dbg->base = enclave->baseaddr;
  470. dbg->size = enclave->size;
  471. dbg->ssaframesize = enclave->ssaframesize;
  472. dbg->aep = async_exit_pointer;
  473. dbg->thread_tids[0] = dbg->pid;
  474. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  475. dbg->tcs_addrs[i] = tcs_addrs[i];
  476. }
  477. ret = 0;
  478. out:
  479. if (enclave_image >= 0)
  480. INLINE_SYSCALL(close, 1, enclave_image);
  481. return ret;
  482. }
  483. static int mcast_s (int port)
  484. {
  485. struct sockaddr_in addr;
  486. int ret = 0;
  487. addr.sin_family = AF_INET;
  488. addr.sin_addr.s_addr = INADDR_ANY;
  489. addr.sin_port = htons(port);
  490. int fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  491. if (IS_ERR(fd))
  492. return -ERRNO(fd);
  493. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  494. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  495. if (IS_ERR(ret))
  496. return -ERRNO(ret);
  497. return fd;
  498. }
  499. static int mcast_c (int port)
  500. {
  501. int ret = 0, fd;
  502. struct sockaddr_in addr;
  503. addr.sin_family = AF_INET;
  504. addr.sin_addr.s_addr = INADDR_ANY;
  505. addr.sin_port = htons(port);
  506. fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  507. if (IS_ERR(fd))
  508. return -ERRNO(fd);
  509. int reuse = 1;
  510. INLINE_SYSCALL(setsockopt, 5, fd, SOL_SOCKET, SO_REUSEADDR,
  511. &reuse, sizeof(reuse));
  512. ret = INLINE_SYSCALL(bind, 3, fd, &addr, sizeof(addr));
  513. if (IS_ERR(ret))
  514. return -ERRNO(ret);
  515. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  516. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  517. if (IS_ERR(ret))
  518. return -ERRNO(ret);
  519. inet_pton4(MCAST_GROUP, sizeof(MCAST_GROUP) - 1,
  520. &addr.sin_addr.s_addr);
  521. struct ip_mreq group;
  522. group.imr_multiaddr.s_addr = addr.sin_addr.s_addr;
  523. group.imr_interface.s_addr = INADDR_ANY;
  524. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
  525. &group, sizeof(group));
  526. if (IS_ERR(ret))
  527. return -ERRNO(ret);
  528. return fd;
  529. }
  530. static unsigned long randval = 0;
  531. void getrand (void * buffer, size_t size)
  532. {
  533. size_t bytes = 0;
  534. while (bytes + sizeof(uint64_t) <= size) {
  535. *(uint64_t*) (buffer + bytes) = randval;
  536. randval = hash64(randval);
  537. bytes += sizeof(uint64_t);
  538. }
  539. if (bytes < size) {
  540. memcpy(buffer + bytes, &randval, size - bytes);
  541. randval = hash64(randval);
  542. }
  543. }
  544. static void create_instance (struct pal_sec * pal_sec)
  545. {
  546. unsigned int id;
  547. getrand(&id, sizeof(id));
  548. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix),
  549. "/graphene/%x/", id);
  550. pal_sec->instance_id = id;
  551. }
  552. int load_manifest (int fd, struct config_store ** config_ptr)
  553. {
  554. int ret = 0;
  555. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  556. if (IS_ERR(nbytes)) {
  557. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  558. return -ERRNO(nbytes);
  559. }
  560. struct config_store * config = malloc(sizeof(struct config_store));
  561. if (!config) {
  562. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  563. return -ENOMEM;
  564. }
  565. void * config_raw = (void *)
  566. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  567. if (IS_ERR_P(config_raw)) {
  568. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  569. ret = -ERRNO_P(config_raw);
  570. goto out;
  571. }
  572. config->raw_data = config_raw;
  573. config->raw_size = nbytes;
  574. config->malloc = malloc;
  575. config->free = NULL;
  576. const char * errstring = NULL;
  577. ret = read_config(config, NULL, &errstring);
  578. if (ret < 0) {
  579. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  580. goto out;
  581. }
  582. *config_ptr = config;
  583. ret = 0;
  584. out:
  585. if (ret < 0) {
  586. if (config)
  587. free(config);
  588. if (!IS_ERR_P(config_raw))
  589. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  590. }
  591. return ret;
  592. }
  593. static int load_enclave (struct pal_enclave * enclave,
  594. char * manifest_uri,
  595. char * exec_uri,
  596. char * args, size_t args_size,
  597. char * env, size_t env_size,
  598. bool exec_uri_inferred)
  599. {
  600. struct pal_sec * pal_sec = &enclave->pal_sec;
  601. int ret;
  602. const char * errstring;
  603. struct timeval tv;
  604. #if PRINT_ENCLAVE_STAT == 1
  605. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  606. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  607. #endif
  608. ret = open_gsgx();
  609. if (ret < 0)
  610. return ret;
  611. if (!is_wrfsbase_supported())
  612. return -EPERM;
  613. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  614. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  615. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  616. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  617. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  618. #ifdef DEBUG
  619. size_t env_i = 0;
  620. while (env_i < env_size) {
  621. if (strcmp_static(&env[env_i], "IN_GDB=1")) {
  622. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  623. pal_sec->in_gdb = true;
  624. }
  625. if (strcmp_static(&env[env_i], "LD_PRELOAD=")) {
  626. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  627. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  628. env_size -= env_i_size;
  629. continue;
  630. }
  631. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  632. }
  633. #endif
  634. char cfgbuf[CONFIG_MAX];
  635. enclave->manifest = INLINE_SYSCALL(open, 3, manifest_uri + 5,
  636. O_RDONLY|O_CLOEXEC, 0);
  637. if (IS_ERR(enclave->manifest)) {
  638. SGX_DBG(DBG_E, "Cannot open manifest %s\n", manifest_uri);
  639. return -EINVAL;
  640. }
  641. ret = load_manifest(enclave->manifest, &enclave->config);
  642. if (ret < 0) {
  643. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  644. return -EINVAL;
  645. }
  646. // A manifest can specify an executable with a different base name
  647. // than the manifest itself. Always give the exec field of the manifest
  648. // precedence if specified.
  649. if (get_config(enclave->config, "loader.exec", cfgbuf, CONFIG_MAX) > 0) {
  650. exec_uri = resolve_uri(cfgbuf, &errstring);
  651. exec_uri_inferred = false;
  652. if (!exec_uri) {
  653. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  654. return -EINVAL;
  655. }
  656. }
  657. if (exec_uri) {
  658. enclave->exec = INLINE_SYSCALL(open, 3,
  659. exec_uri + static_strlen("file:"),
  660. O_RDONLY|O_CLOEXEC, 0);
  661. if (IS_ERR(enclave->exec)) {
  662. if (exec_uri_inferred) {
  663. // It is valid for an enclave not to have an executable.
  664. // We need to catch the case where we inferred the executable
  665. // from the manifest file name, but it doesn't exist, and let
  666. // the enclave go a bit further. Go ahead and warn the user,
  667. // though.
  668. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  669. "or may represent a manifest misconfiguration. This typically "
  670. "represents advanced usage, and if it is not what you intended, "
  671. "try setting the loader.exec field in the manifest.\n", exec_uri);
  672. enclave->exec = -1;
  673. } else {
  674. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  675. return -EINVAL;
  676. }
  677. }
  678. } else {
  679. enclave->exec = -1;
  680. }
  681. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, CONFIG_MAX) < 0) {
  682. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  683. return -EINVAL;
  684. }
  685. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  686. if (!sig_uri) {
  687. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  688. return -EINVAL;
  689. }
  690. if (!strcmp_static(sig_uri + strlen(sig_uri) - 4, ".sig")) {
  691. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  692. free(sig_uri);
  693. return -EINVAL;
  694. }
  695. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  696. if (IS_ERR(enclave->sigfile)) {
  697. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  698. free(sig_uri);
  699. return -EINVAL;
  700. }
  701. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - 4, ".token", -1);
  702. free(sig_uri);
  703. enclave->token = INLINE_SYSCALL(open, 3, token_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  704. if (IS_ERR(enclave->token)) {
  705. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  706. PAL_FILE("pal-sgx-get-token")
  707. "\' on the runtime host or run \'make SGX_RUN=1\' "
  708. "in the Graphene source to create the token file.\n",
  709. token_uri);
  710. free(token_uri);
  711. return -EINVAL;
  712. }
  713. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  714. free(token_uri);
  715. ret = initialize_enclave(enclave);
  716. if (ret < 0)
  717. return ret;
  718. if (!pal_sec->instance_id)
  719. create_instance(&enclave->pal_sec);
  720. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  721. if (enclave->exec == -1) {
  722. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  723. } else {
  724. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  725. }
  726. if (!pal_sec->mcast_port) {
  727. unsigned short mcast_port;
  728. getrand(&mcast_port, sizeof(unsigned short));
  729. pal_sec->mcast_port = mcast_port > 1024 ? mcast_port : mcast_port + 1024;
  730. }
  731. if ((ret = mcast_s(pal_sec->mcast_port)) >= 0) {
  732. pal_sec->mcast_srv = ret;
  733. if ((ret = mcast_c(pal_sec->mcast_port)) >= 0) {
  734. pal_sec->mcast_cli = ret;
  735. } else {
  736. INLINE_SYSCALL(close, 1, pal_sec->mcast_srv);
  737. pal_sec->mcast_srv = 0;
  738. }
  739. }
  740. ret = sgx_signal_setup();
  741. if (ret < 0)
  742. return ret;
  743. current_enclave = enclave;
  744. map_tcs(INLINE_SYSCALL(gettid, 0));
  745. /* start running trusted PAL */
  746. ecall_enclave_start(args, args_size, env, env_size);
  747. #if PRINT_ENCLAVE_STAT == 1
  748. PAL_NUM exit_time = 0;
  749. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  750. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  751. #endif
  752. unmap_tcs();
  753. INLINE_SYSCALL(exit, 0);
  754. return 0;
  755. }
  756. int main (int argc, char ** argv, char ** envp)
  757. {
  758. char * manifest_uri = NULL;
  759. char * exec_uri = NULL;
  760. const char * pal_loader = argv[0];
  761. int ret = 0;
  762. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  763. // inferred from the manifest name somewhat
  764. // differently
  765. argc--;
  766. argv++;
  767. struct pal_enclave * enclave = malloc(sizeof(struct pal_enclave));
  768. if (!enclave)
  769. return -ENOMEM;
  770. memset(enclave, 0, sizeof(struct pal_enclave));
  771. int is_child = sgx_init_child_process(&enclave->pal_sec);
  772. if (is_child < 0) {
  773. ret = is_child;
  774. goto out;
  775. }
  776. if (!is_child) {
  777. /* occupy PROC_INIT_FD so no one will use it */
  778. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  779. if (!argc)
  780. goto usage;
  781. if (strcmp_static(argv[0], "file:")) {
  782. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  783. } else {
  784. exec_uri = alloc_concat("file:", -1, argv[0], -1);
  785. }
  786. } else {
  787. exec_uri = alloc_concat(enclave->pal_sec.exec_name, -1, NULL, -1);
  788. }
  789. int fd = INLINE_SYSCALL(open, 3, exec_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  790. if (IS_ERR(fd)) {
  791. SGX_DBG(DBG_E, "Executable not found\n");
  792. goto usage;
  793. }
  794. char filebuf[4];
  795. /* Check if the first argument is a executable. If it is, try finding
  796. all the possible manifest files. */
  797. INLINE_SYSCALL(read, 3, fd, filebuf, 4);
  798. INLINE_SYSCALL(close, 1, fd);
  799. char sgx_manifest[URI_MAX];
  800. int len = get_base_name(exec_uri + static_strlen("file:"), sgx_manifest,
  801. URI_MAX);
  802. if (len < 0) {
  803. ret = len;
  804. goto out;
  805. }
  806. if (strcmp_static(sgx_manifest + len - strlen(".manifest"), ".manifest")) {
  807. strcpy_static(sgx_manifest + len, ".sgx", URI_MAX - (size_t)len);
  808. } else if (!strcmp_static(sgx_manifest + len - strlen(".manifest.sgx"),
  809. ".manifest.sgx")) {
  810. strcpy_static(sgx_manifest + len, ".manifest.sgx", URI_MAX - (size_t)len);
  811. }
  812. if (memcmp(filebuf, "\177ELF", 4)) {
  813. // In this case the manifest is given as the executable. Set
  814. // manifest_uri to sgx_manifest (should be the same), and
  815. // and drop the .manifest* from exec_uri, so that the program
  816. // loads properly.
  817. manifest_uri = sgx_manifest;
  818. size_t exec_len = strlen(exec_uri);
  819. if (strcmp_static(exec_uri + exec_len - strlen(".manifest"), ".manifest")) {
  820. exec_uri[exec_len - strlen(".manifest")] = '\0';
  821. exec_uri_inferred = true;
  822. } else if (strcmp_static(exec_uri + exec_len - strlen(".manifest.sgx"), ".manifest.sgx")) {
  823. exec_uri[exec_len - strlen(".manifest.sgx")] = '\0';
  824. exec_uri_inferred = true;
  825. }
  826. }
  827. fd = INLINE_SYSCALL(open, 3, sgx_manifest, O_RDONLY|O_CLOEXEC, 0);
  828. if (!IS_ERR(fd)) {
  829. manifest_uri = alloc_concat("file:", static_strlen("file:"),
  830. sgx_manifest, -1);
  831. INLINE_SYSCALL(close, 1, fd);
  832. } else if (!manifest_uri) {
  833. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", sgx_manifest);
  834. goto usage;
  835. }
  836. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  837. if (exec_uri)
  838. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  839. else
  840. SGX_DBG(DBG_I, "Executable file not found\n");
  841. /*
  842. * While C does not guarantee that the argv[i] and envp[i] strings are
  843. * continuous we know that we are running on Linux, which does this. This
  844. * saves us creating a copy of all argv and envp strings.
  845. */
  846. char * args = argv[0];
  847. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  848. int envc = 0;
  849. while (envp[envc] != NULL) {
  850. envc++;
  851. }
  852. char * env = envp[0];
  853. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  854. ret = load_enclave(enclave, manifest_uri, exec_uri, args, args_size,
  855. env, env_size, exec_uri_inferred);
  856. out:
  857. if (enclave->manifest >= 0)
  858. INLINE_SYSCALL(close, 1, enclave->manifest);
  859. if (enclave->exec >= 0)
  860. INLINE_SYSCALL(close, 1, enclave->exec);
  861. if (enclave->sigfile >= 0)
  862. INLINE_SYSCALL(close, 1, enclave->sigfile);
  863. if (enclave->token >= 0)
  864. INLINE_SYSCALL(close, 1, enclave->token);
  865. if (enclave)
  866. free(enclave);
  867. if (exec_uri)
  868. free(exec_uri);
  869. if (manifest_uri && manifest_uri != sgx_manifest)
  870. free(manifest_uri);
  871. return ret;
  872. usage:
  873. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  874. ret = -EINVAL;
  875. goto out;
  876. }