sgx_main.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. #include <pal_linux.h>
  4. #include <pal_rtld.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <sysdep.h>
  16. #include <sysdeps/generic/ldsodefs.h>
  17. #define ENCLAVE_FILENAME RUNTIME_FILE("libpal-Linux-SGX.so")
  18. unsigned long pagesize = PRESET_PAGESIZE;
  19. unsigned long pagemask = ~(PRESET_PAGESIZE - 1);
  20. unsigned long pageshift = PRESET_PAGESIZE - 1;
  21. static inline
  22. const char * alloc_concat(const char * p, int plen,
  23. const char * s, int slen)
  24. {
  25. plen = (plen != -1) ? plen : (p ? strlen(p) : 0);
  26. slen = (slen != -1) ? slen : (s ? strlen(s) : 0);
  27. char * buf = malloc(plen + slen + 1);
  28. if (plen)
  29. memcpy(buf, p, plen);
  30. if (slen)
  31. memcpy(buf + plen, s, slen);
  32. buf[plen + slen] = '\0';
  33. return buf;
  34. }
  35. static unsigned long parse_int (const char * str)
  36. {
  37. unsigned long num = 0;
  38. int radix = 10;
  39. char c;
  40. if (str[0] == '0') {
  41. str++;
  42. radix = 8;
  43. if (str[0] == 'x') {
  44. str++;
  45. radix = 16;
  46. }
  47. }
  48. while ((c = *(str++))) {
  49. int val;
  50. if (c >= 'A' && c <= 'F')
  51. val = c - 'A' + 10;
  52. else if (c >= 'a' && c <= 'f')
  53. val = c - 'a' + 10;
  54. else if (c >= '0' && c <= '9')
  55. val = c - '0';
  56. else
  57. break;
  58. if (val >= radix)
  59. break;
  60. num = num * radix + val;
  61. }
  62. if (c == 'G' || c == 'g')
  63. num *= 1024 * 1024 * 1024;
  64. else if (c == 'M' || c == 'm')
  65. num *= 1024 * 1024;
  66. else if (c == 'K' || c == 'k')
  67. num *= 1024;
  68. return num;
  69. }
  70. static const char * resolve_uri (const char * uri, const char ** errstring)
  71. {
  72. if (!strpartcmp_static(uri, "file:")) {
  73. *errstring = "Invalid URI";
  74. return NULL;
  75. }
  76. char path_buf[URI_MAX];
  77. int len = get_norm_path(uri + 5, path_buf, 0, URI_MAX);
  78. if (len < 0) {
  79. *errstring = "Invalid URI";
  80. return NULL;
  81. }
  82. return alloc_concat("file:", static_strlen("file:"), path_buf, len);
  83. }
  84. static
  85. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  86. unsigned long * entry)
  87. {
  88. int ret = 0;
  89. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  90. return -ERRNO(ret);
  91. char filebuf[FILEBUF_SIZE];
  92. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  93. if (IS_ERR(ret))
  94. return -ERRNO(ret);
  95. const ElfW(Ehdr) * header = (void *) filebuf;
  96. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  97. const ElfW(Phdr) * ph;
  98. struct loadcmd {
  99. ElfW(Addr) mapstart, mapend;
  100. } loadcmds[16], *c;
  101. int nloadcmds = 0;
  102. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  103. if (ph->p_type == PT_LOAD) {
  104. if (nloadcmds == 16)
  105. return -EINVAL;
  106. c = &loadcmds[nloadcmds++];
  107. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  108. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz);
  109. }
  110. *base = loadcmds[0].mapstart;
  111. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  112. if (entry)
  113. *entry = header->e_entry;
  114. return 0;
  115. }
  116. static
  117. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  118. unsigned long base, unsigned long prot)
  119. {
  120. int ret = 0;
  121. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  122. return -ERRNO(ret);
  123. char filebuf[FILEBUF_SIZE];
  124. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  125. if (IS_ERR(ret))
  126. return -ERRNO(ret);
  127. const ElfW(Ehdr) * header = (void *) filebuf;
  128. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  129. const ElfW(Phdr) * ph;
  130. struct loadcmd {
  131. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  132. unsigned int mapoff;
  133. int prot;
  134. } loadcmds[16], *c;
  135. int nloadcmds = 0;
  136. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  137. if (ph->p_type == PT_LOAD) {
  138. if (nloadcmds == 16)
  139. return -EINVAL;
  140. c = &loadcmds[nloadcmds++];
  141. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  142. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
  143. c->datastart = ph->p_vaddr;
  144. c->dataend = ph->p_vaddr + ph->p_filesz;
  145. c->allocend = ph->p_vaddr + ph->p_memsz;
  146. c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
  147. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  148. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  149. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  150. #define SGX_SECINFO_FLAGS_R 0x001
  151. }
  152. base -= loadcmds[0].mapstart;
  153. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  154. ElfW(Addr) zero = c->dataend;
  155. ElfW(Addr) zeroend = ALLOC_ALIGNUP(c->allocend);
  156. ElfW(Addr) zeropage = ALLOC_ALIGNUP(zero);
  157. if (zeroend < zeropage)
  158. zeropage = zeroend;
  159. if (c->mapend > c->mapstart) {
  160. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  161. c->mapend - c->mapstart,
  162. PROT_READ|PROT_WRITE,
  163. MAP_PRIVATE | MAP_FILE,
  164. fd, c->mapoff);
  165. if (IS_ERR_P(addr))
  166. return -ERRNO_P(addr);
  167. if (c->datastart > c->mapstart)
  168. memset(addr, 0, c->datastart - c->mapstart);
  169. if (zeropage > zero)
  170. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  171. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  172. c->mapend - c->mapstart,
  173. SGX_PAGE_REG, c->prot, 0,
  174. (c->prot & PROT_EXEC) ? "code" : "data");
  175. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  176. if (ret < 0)
  177. return ret;
  178. }
  179. if (zeroend > zeropage) {
  180. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  181. zeroend - zeropage,
  182. SGX_PAGE_REG, c->prot, 1, "bss");
  183. if (ret < 0)
  184. return ret;
  185. }
  186. }
  187. return 0;
  188. }
  189. int initialize_enclave (struct pal_enclave * enclave)
  190. {
  191. int ret = 0;
  192. int enclave_image;
  193. int enclave_thread_num = 1;
  194. sgx_arch_token_t enclave_token;
  195. sgx_arch_sigstruct_t enclave_sigstruct;
  196. sgx_arch_secs_t enclave_secs;
  197. unsigned long enclave_entry_addr;
  198. unsigned long enclave_thread_gprs[MAX_DBG_THREADS];
  199. #define TRY(func, ...) \
  200. ({ \
  201. ret = func(__VA_ARGS__); \
  202. if (ret < 0) { \
  203. SGX_DBG(DBG_E, "initializing enclave failed: " #func ": %d\n", \
  204. -ret); \
  205. goto err; \
  206. } ret; \
  207. })
  208. enclave_image = INLINE_SYSCALL(open, 3, ENCLAVE_FILENAME, O_RDONLY, 0);
  209. if (IS_ERR(enclave_image)) {
  210. SGX_DBG(DBG_E, "cannot find %s\n", ENCLAVE_FILENAME);
  211. ret = -ERRNO(ret);
  212. goto err;
  213. }
  214. char cfgbuf[CONFIG_MAX];
  215. /* Reading sgx.enclave_size from manifest */
  216. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, CONFIG_MAX) <= 0) {
  217. SGX_DBG(DBG_E, "enclave_size is not specified\n");
  218. ret = -EINVAL;
  219. goto err;
  220. }
  221. enclave->size = parse_int(cfgbuf);
  222. /* DEP 1/21/17: SGX currently only supports power-of-two enclaves.
  223. * Give users a better warning about this. */
  224. if (enclave->size & (enclave->size - 1)) {
  225. SGX_DBG(DBG_E, "Enclave size not a power of two. SGX requires power-of-two enclaves.\n");
  226. ret = -EINVAL;
  227. goto err;
  228. }
  229. /* Reading sgx.thread_num from manifest */
  230. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, CONFIG_MAX) > 0)
  231. enclave->thread_num = parse_int(cfgbuf);
  232. if (enclave_thread_num > MAX_DBG_THREADS) {
  233. SGX_DBG(DBG_E, "Too many threads to debug\n");
  234. ret = -EINVAL;
  235. goto err;
  236. }
  237. /* Reading sgx.static_address from manifest */
  238. if (get_config(enclave->config, "sgx.static_address", cfgbuf, CONFIG_MAX) > 0 &&
  239. cfgbuf[0] == '1')
  240. enclave->baseaddr = ENCLAVE_MIN_ADDR;
  241. else
  242. enclave->baseaddr = 0;
  243. TRY(read_enclave_token, enclave->token, &enclave_token);
  244. TRY(read_enclave_sigstruct, enclave->sigfile, &enclave_sigstruct);
  245. TRY(create_enclave,
  246. &enclave_secs, enclave->baseaddr, enclave->size, &enclave_token);
  247. enclave->baseaddr = enclave_secs.baseaddr;
  248. enclave->size = enclave_secs.size;
  249. enclave->ssaframesize = enclave_secs.ssaframesize * pagesize;
  250. struct stat stat;
  251. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  252. if (IS_ERR(ret))
  253. return -ERRNO(ret);
  254. int manifest_size = stat.st_size;
  255. /* Start populating enclave memory */
  256. struct mem_area {
  257. const char * desc;
  258. bool is_binary;
  259. int fd;
  260. unsigned long addr, size, prot;
  261. enum sgx_page_type type;
  262. };
  263. struct mem_area * areas =
  264. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  265. int area_num = 0;
  266. #define set_area(_desc, _is_binary, _fd, _addr, _size, _prot, _type) \
  267. ({ \
  268. struct mem_area * _a = &areas[area_num++]; \
  269. _a->desc = _desc; _a->is_binary = _is_binary; \
  270. _a->fd = _fd; _a->addr = _addr; _a->size = _size; \
  271. _a->prot = _prot; _a->type = _type; _a; \
  272. })
  273. struct mem_area * manifest_area =
  274. set_area("manifest", false, enclave->manifest,
  275. 0, ALLOC_ALIGNUP(manifest_size),
  276. PROT_READ, SGX_PAGE_REG);
  277. struct mem_area * ssa_area =
  278. set_area("ssa", false, -1, 0,
  279. enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  280. PROT_READ|PROT_WRITE, SGX_PAGE_REG);
  281. struct mem_area * tcs_area =
  282. set_area("tcs", false, -1, 0, enclave->thread_num * pagesize,
  283. 0, SGX_PAGE_TCS);
  284. struct mem_area * tls_area =
  285. set_area("tls", false, -1, 0, enclave->thread_num * pagesize,
  286. PROT_READ|PROT_WRITE, SGX_PAGE_REG);
  287. struct mem_area * stack_areas = &areas[area_num];
  288. for (int t = 0 ; t < enclave->thread_num ; t++)
  289. set_area("stack", false, -1, 0, ENCLAVE_STACK_SIZE,
  290. PROT_READ|PROT_WRITE, SGX_PAGE_REG);
  291. struct mem_area * pal_area =
  292. set_area("pal", true, enclave_image, 0, 0, 0, SGX_PAGE_REG);
  293. TRY(scan_enclave_binary,
  294. enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  295. struct mem_area * exec_area = NULL;
  296. if (enclave->exec != -1) {
  297. exec_area = set_area("exec", true, enclave->exec, 0, 0,
  298. PROT_WRITE, SGX_PAGE_REG);
  299. TRY(scan_enclave_binary,
  300. enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  301. }
  302. unsigned long populating = enclave->size;
  303. for (int i = 0 ; i < area_num ; i++) {
  304. if (areas[i].addr)
  305. continue;
  306. areas[i].addr = populating - areas[i].size;
  307. populating = areas[i].addr - MEMORY_GAP;
  308. }
  309. enclave_entry_addr += pal_area->addr;
  310. if (exec_area) {
  311. if (exec_area->addr + exec_area->size > pal_area->addr)
  312. return -EINVAL;
  313. if (exec_area->addr + exec_area->size < populating) {
  314. unsigned long addr = exec_area->addr + exec_area->size;
  315. set_area("free", false, -1, addr, populating - addr,
  316. PROT_READ|PROT_WRITE|PROT_EXEC, SGX_PAGE_REG);
  317. }
  318. populating = exec_area->addr;
  319. }
  320. if (populating > ENCLAVE_MIN_ADDR) {
  321. unsigned long addr = ENCLAVE_MIN_ADDR;
  322. set_area("free", false, -1, addr, populating - addr,
  323. PROT_READ|PROT_WRITE|PROT_EXEC, SGX_PAGE_REG);
  324. }
  325. for (int i = 0 ; i < area_num ; i++) {
  326. if (areas[i].fd != -1 && areas[i].is_binary) {
  327. TRY(load_enclave_binary,
  328. &enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  329. continue;
  330. }
  331. void * data = NULL;
  332. if (strcmp_static(areas[i].desc, "tls")) {
  333. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  334. PROT_READ|PROT_WRITE,
  335. MAP_ANON|MAP_PRIVATE, -1, 0);
  336. for (int t = 0 ; t < enclave->thread_num ; t++) {
  337. struct enclave_tls * gs = data + pagesize * t;
  338. gs->self = (void *) tls_area->addr + pagesize * t +
  339. enclave_secs.baseaddr;
  340. gs->initial_stack = (void *)
  341. stack_areas[t].addr + ENCLAVE_STACK_SIZE +
  342. enclave_secs.baseaddr;
  343. gs->ssaframesize = enclave->ssaframesize;
  344. gs->ssa = (void *) ssa_area->addr +
  345. enclave->ssaframesize * SSAFRAMENUM * t +
  346. enclave_secs.baseaddr;
  347. gs->gpr = gs->ssa +
  348. enclave->ssaframesize - sizeof(sgx_arch_gpr_t);
  349. enclave_thread_gprs[t] = (unsigned long) gs->gpr;
  350. }
  351. goto add_pages;
  352. }
  353. if (strcmp_static(areas[i].desc, "tcs")) {
  354. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  355. PROT_READ|PROT_WRITE,
  356. MAP_ANON|MAP_PRIVATE, -1, 0);
  357. for (int t = 0 ; t < enclave->thread_num ; t++) {
  358. sgx_arch_tcs_t * tcs = data + pagesize * t;
  359. memset(tcs, 0, pagesize);
  360. tcs->ossa = ssa_area->addr +
  361. enclave->ssaframesize * SSAFRAMENUM * t;
  362. tcs->nssa = 2;
  363. tcs->oentry = enclave_entry_addr;
  364. tcs->ofsbasgx = 0;
  365. tcs->ogsbasgx = tls_area->addr + t * pagesize;
  366. tcs->fslimit = 0xfff;
  367. tcs->gslimit = 0xfff;
  368. }
  369. goto add_pages;
  370. }
  371. if (areas[i].fd != -1)
  372. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  373. PROT_READ,
  374. MAP_FILE|MAP_PRIVATE,
  375. areas[i].fd, 0);
  376. add_pages:
  377. TRY(add_pages_to_enclave,
  378. &enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  379. areas[i].type, areas[i].prot, (areas[i].fd == -1),
  380. areas[i].desc);
  381. if (data)
  382. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  383. }
  384. TRY(init_enclave, &enclave_secs, &enclave_sigstruct, &enclave_token);
  385. create_tcs_mapper((void *) enclave_secs.baseaddr + tcs_area->addr,
  386. enclave->thread_num);
  387. struct pal_sec * pal_sec = &enclave->pal_sec;
  388. pal_sec->enclave_addr = (PAL_PTR) (enclave_secs.baseaddr + pal_area->addr);
  389. pal_sec->heap_min = (void *) enclave_secs.baseaddr + ENCLAVE_MIN_ADDR;
  390. pal_sec->heap_max = (void *) enclave_secs.baseaddr + pal_area->addr - MEMORY_GAP;
  391. if (exec_area) {
  392. pal_sec->exec_addr = (void *) enclave_secs.baseaddr + exec_area->addr;
  393. pal_sec->exec_size = exec_area->size;
  394. }
  395. pal_sec->manifest_addr = (void *) enclave_secs.baseaddr + manifest_area->addr;
  396. pal_sec->manifest_size = manifest_size;
  397. memcpy(pal_sec->mrenclave, enclave_secs.mrenclave,
  398. sizeof(sgx_arch_hash_t));
  399. memcpy(pal_sec->mrsigner, enclave_secs.mrsigner,
  400. sizeof(sgx_arch_hash_t));
  401. memcpy(&pal_sec->enclave_attributes, &enclave_secs.attributes,
  402. sizeof(sgx_arch_attributes_t));
  403. struct enclave_dbginfo * dbg = (void *)
  404. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  405. sizeof(struct enclave_dbginfo),
  406. PROT_READ|PROT_WRITE,
  407. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  408. -1, 0);
  409. if (IS_ERR_P(dbg)) {
  410. SGX_DBG(DBG_E, "Cannot allocate debug info\n");
  411. return 0;
  412. }
  413. dbg->pid = INLINE_SYSCALL(getpid, 0);
  414. dbg->base = enclave->baseaddr;
  415. dbg->size = enclave->size;
  416. dbg->aep = (unsigned long) async_exit_pointer;
  417. dbg->thread_tids[0] = dbg->pid;
  418. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  419. dbg->thread_gprs[i] = enclave_thread_gprs[i];
  420. return 0;
  421. err:
  422. return ret;
  423. }
  424. static int mcast_s (int port)
  425. {
  426. struct sockaddr_in addr;
  427. int ret = 0;
  428. addr.sin_family = AF_INET;
  429. addr.sin_addr.s_addr = INADDR_ANY;
  430. addr.sin_port = htons(port);
  431. int fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  432. if (IS_ERR(fd))
  433. return -PAL_ERROR_DENIED;
  434. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  435. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  436. if (IS_ERR(ret))
  437. return -PAL_ERROR_DENIED;
  438. return fd;
  439. }
  440. static int mcast_c (int port)
  441. {
  442. int ret = 0, fd;
  443. struct sockaddr_in addr;
  444. addr.sin_family = AF_INET;
  445. addr.sin_addr.s_addr = INADDR_ANY;
  446. addr.sin_port = htons(port);
  447. fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  448. if (IS_ERR(fd))
  449. return -PAL_ERROR_DENIED;
  450. int reuse = 1;
  451. INLINE_SYSCALL(setsockopt, 5, fd, SOL_SOCKET, SO_REUSEADDR,
  452. &reuse, sizeof(reuse));
  453. ret = INLINE_SYSCALL(bind, 3, fd, &addr, sizeof(addr));
  454. if (IS_ERR(ret))
  455. return -PAL_ERROR_DENIED;
  456. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  457. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  458. if (IS_ERR(ret))
  459. return -PAL_ERROR_DENIED;
  460. inet_pton4(MCAST_GROUP, sizeof(MCAST_GROUP) - 1,
  461. &addr.sin_addr.s_addr);
  462. struct ip_mreq group;
  463. group.imr_multiaddr.s_addr = addr.sin_addr.s_addr;
  464. group.imr_interface.s_addr = INADDR_ANY;
  465. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
  466. &group, sizeof(group));
  467. if (IS_ERR(ret))
  468. return -PAL_ERROR_DENIED;
  469. return fd;
  470. }
  471. static unsigned long randval = 0;
  472. int getrand (void * buffer, int size)
  473. {
  474. unsigned long val;
  475. int bytes = 0;
  476. val = randval;
  477. randval++;
  478. while (bytes + sizeof(unsigned long) <= size) {
  479. *(unsigned long *) (buffer + bytes) = val;
  480. val = hash64(val);
  481. bytes += sizeof(unsigned long);
  482. }
  483. if (bytes < size) {
  484. switch (size - bytes) {
  485. case 4:
  486. *(unsigned int *) (buffer + bytes) = randval & 0xffffffff;
  487. bytes += 4;
  488. break;
  489. case 2:
  490. *(unsigned short *) (buffer + bytes) = randval & 0xffff;
  491. bytes += 2;
  492. break;
  493. case 1:
  494. *(unsigned char *) (buffer + bytes) = randval & 0xff;
  495. bytes++;
  496. break;
  497. default: break;
  498. }
  499. randval = hash64(randval);
  500. }
  501. randval = val;
  502. return bytes;
  503. }
  504. static int create_instance (struct pal_sec * pal_sec)
  505. {
  506. int ret = 0;
  507. const char * path;
  508. ret = INLINE_SYSCALL(mkdir, 2, (path = GRAPHENE_PIPEDIR), 0777);
  509. if (IS_ERR(ret) && ERRNO(ret) != EEXIST) {
  510. if (ERRNO(ret) == ENOENT) {
  511. ret = INLINE_SYSCALL(mkdir, 2, (path = GRAPHENE_TEMPDIR), 0777);
  512. if (!IS_ERR(ret)) {
  513. INLINE_SYSCALL(chmod, 2, GRAPHENE_TEMPDIR, 0777);
  514. ret = INLINE_SYSCALL(mkdir, 2, (path = GRAPHENE_PIPEDIR), 0777);
  515. }
  516. }
  517. if (IS_ERR(ret)) {
  518. SGX_DBG(DBG_E, "Cannot create directory %s (%s), "
  519. "please check permission\n", path, PAL_STRERROR(-ERRNO(ret)));
  520. return -PAL_ERROR_DENIED;
  521. }
  522. }
  523. if (!IS_ERR(ret))
  524. INLINE_SYSCALL(chmod, 2, GRAPHENE_PIPEDIR, 0777);
  525. unsigned int id;
  526. do {
  527. if (!getrand(&id, sizeof(unsigned int))) {
  528. SGX_DBG(DBG_E, "Unable to generate random numbers\n");
  529. return -PAL_ERROR_DENIED;
  530. }
  531. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix),
  532. GRAPHENE_PIPEDIR "/%08x/", id);
  533. ret = INLINE_SYSCALL(mkdir, 2, pal_sec->pipe_prefix, 0700);
  534. if (IS_ERR(ret) && ERRNO(ret) != EEXIST) {
  535. SGX_DBG(DBG_E, "Cannot create directory %s (%s), "
  536. "please fix permission\n",
  537. pal_sec->pipe_prefix, PAL_STRERROR(-ERRNO(ret)));
  538. return -PAL_ERROR_DENIED;
  539. }
  540. } while (IS_ERR(ret));
  541. pal_sec->instance_id = id;
  542. return 0;
  543. }
  544. int load_manifest (int fd, struct config_store ** config_ptr)
  545. {
  546. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  547. if (IS_ERR(nbytes))
  548. return -ERRNO(nbytes);
  549. struct config_store * config = malloc(sizeof(struct config_store));
  550. if (!config)
  551. return -ENOMEM;
  552. void * config_raw = (void *)
  553. INLINE_SYSCALL(mmap, 6, NULL, nbytes,
  554. PROT_READ|PROT_WRITE, MAP_PRIVATE,
  555. fd, 0);
  556. if (IS_ERR_P(config_raw))
  557. return -ERRNO_P(config_raw);
  558. config->raw_data = config_raw;
  559. config->raw_size = nbytes;
  560. config->malloc = malloc;
  561. config->free = NULL;
  562. const char * errstring = NULL;
  563. int ret = read_config(config, NULL, &errstring);
  564. if (ret < 0) {
  565. SGX_DBG(DBG_E, "can't read manifest: %s\n", errstring);
  566. return ret;
  567. }
  568. *config_ptr = config;
  569. return 0;
  570. }
  571. static int load_enclave (struct pal_enclave * enclave,
  572. const char * manifest_uri,
  573. const char * exec_uri,
  574. const char ** arguments, const char ** environments)
  575. {
  576. struct pal_sec * pal_sec = &enclave->pal_sec;
  577. int ret;
  578. const char * errstring;
  579. ret = open_gsgx();
  580. if (ret < 0) {
  581. SGX_DBG(DBG_E, "cannot open device /dev/gsgx, possibly the kernel "
  582. "module is not loaded.\n");
  583. return ret;
  584. }
  585. ret = check_wrfsbase_support();
  586. if (ret < 0)
  587. return ret;
  588. if (!ret)
  589. return -EPERM;
  590. struct timeval tv;
  591. ret = INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  592. if (IS_ERR(ret))
  593. return -ERRNO(ret);
  594. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  595. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  596. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  597. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  598. #ifdef DEBUG
  599. for (const char ** e = environments ; *e ; e++) {
  600. if (strcmp_static(*e, "IN_GDB=1")) {
  601. SGX_DBG(DBG_I, "being GDB'ed!!!\n");
  602. pal_sec->in_gdb = true;
  603. }
  604. if (strcmp_static(*e, "LD_PRELOAD="))
  605. *e = "\0";
  606. }
  607. #endif
  608. char cfgbuf[CONFIG_MAX];
  609. enclave->manifest = INLINE_SYSCALL(open, 3, manifest_uri + 5,
  610. O_RDONLY|O_CLOEXEC, 0);
  611. if (IS_ERR(enclave->manifest)) {
  612. SGX_DBG(DBG_E, "cannot open manifest %s\n", manifest_uri);
  613. return -EINVAL;
  614. }
  615. ret = load_manifest(enclave->manifest, &enclave->config);
  616. if (ret < 0) {
  617. SGX_DBG(DBG_E, "invalid manifest: %s\n", manifest_uri);
  618. return -EINVAL;
  619. }
  620. if (exec_uri == NULL) {
  621. if (get_config(enclave->config, "loader.exec", cfgbuf, CONFIG_MAX) > 0) {
  622. exec_uri = resolve_uri(cfgbuf, &errstring);
  623. if (!exec_uri) {
  624. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  625. return -EINVAL;
  626. }
  627. }
  628. }
  629. if (exec_uri) {
  630. enclave->exec = INLINE_SYSCALL(open, 3,
  631. exec_uri + static_strlen("file:"),
  632. O_RDONLY|O_CLOEXEC, 0);
  633. if (IS_ERR(enclave->exec)) {
  634. SGX_DBG(DBG_E, "cannot open executable %s\n", exec_uri);
  635. return -EINVAL;
  636. }
  637. } else {
  638. enclave->exec = -1;
  639. }
  640. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, CONFIG_MAX) < 0) {
  641. SGX_DBG(DBG_E, "sigstruct file not found. Must have \'sgx.sigfile\' in the manifest\n");
  642. return -EINVAL;
  643. }
  644. const char * uri = resolve_uri(cfgbuf, &errstring);
  645. if (!uri) {
  646. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  647. return -EINVAL;
  648. }
  649. if (!strcmp_static(uri + strlen(uri) - 4, ".sig")) {
  650. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  651. return -EINVAL;
  652. }
  653. enclave->sigfile = INLINE_SYSCALL(open, 3, uri + 5, O_RDONLY|O_CLOEXEC, 0);
  654. if (IS_ERR(enclave->sigfile)) {
  655. SGX_DBG(DBG_E, "cannot open sigstruct file %s\n", uri);
  656. return -EINVAL;
  657. }
  658. uri = alloc_concat(uri, strlen(uri) - 4, ".token", -1);
  659. enclave->token = INLINE_SYSCALL(open, 3, uri + 5, O_RDONLY|O_CLOEXEC, 0);
  660. if (IS_ERR(enclave->token)) {
  661. SGX_DBG(DBG_E, "cannot open token \'%s\'. Use \'"
  662. PAL_FILE("pal-sgx-get-token")
  663. "\' on the runtime host, or run \'make SGX_RUN=1\' "
  664. "in the Graphene source, to create the token file.\n",
  665. uri);
  666. return -EINVAL;
  667. }
  668. /* Initialize the enclave */
  669. ret = initialize_enclave(enclave);
  670. if (ret < 0)
  671. return ret;
  672. snprintf(pal_sec->enclave_image, sizeof(PAL_SEC_STR), "%s",
  673. ENCLAVE_FILENAME);
  674. if (!pal_sec->instance_id)
  675. create_instance(&enclave->pal_sec);
  676. pal_sec->manifest_fd = enclave->manifest;
  677. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  678. if (enclave->exec == -1) {
  679. pal_sec->exec_fd = PAL_IDX_POISON;
  680. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  681. } else {
  682. pal_sec->exec_fd = enclave->exec;
  683. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  684. }
  685. if (!pal_sec->mcast_port) {
  686. unsigned short mcast_port;
  687. getrand(&mcast_port, sizeof(unsigned short));
  688. pal_sec->mcast_port = mcast_port > 1024 ? mcast_port : mcast_port + 1024;
  689. }
  690. if ((ret = mcast_s(pal_sec->mcast_port)) >= 0) {
  691. pal_sec->mcast_srv = ret;
  692. if ((ret = mcast_c(pal_sec->mcast_port)) >= 0) {
  693. pal_sec->mcast_cli = ret;
  694. } else {
  695. INLINE_SYSCALL(close, 1, pal_sec->mcast_srv);
  696. pal_sec->mcast_srv = 0;
  697. }
  698. }
  699. /* setup signal handling */
  700. ret = sgx_signal_setup();
  701. if (ret < 0)
  702. return ret;
  703. current_enclave = enclave;
  704. map_tcs(INLINE_SYSCALL(gettid, 0));
  705. /* start running trusted PAL */
  706. ecall_pal_main(arguments, environments);
  707. unmap_tcs();
  708. exit_process(0);
  709. return 0;
  710. }
  711. int main (int argc, const char ** argv, const char ** envp)
  712. {
  713. const char * manifest_uri = NULL, * exec_uri = NULL;
  714. const char * pal_loader = argv[0];
  715. argc--;
  716. argv++;
  717. struct pal_enclave * enclave = malloc(sizeof(struct pal_enclave));
  718. if (!enclave)
  719. return -ENOMEM;
  720. memset(enclave, 0, sizeof(struct pal_enclave));
  721. int is_child = sgx_init_child_process(&enclave->pal_sec);
  722. if (is_child < 0)
  723. return is_child;
  724. if (!is_child) {
  725. /* occupy PROC_INIT_FD so no one will use it */
  726. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  727. if (!argc)
  728. goto usage;
  729. if (strcmp_static(argv[0], "file:")) {
  730. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  731. } else {
  732. exec_uri = alloc_concat("file:", -1, argv[0], -1);
  733. }
  734. } else {
  735. exec_uri = alloc_concat(enclave->pal_sec.exec_name, -1, NULL, -1);
  736. }
  737. int fd = INLINE_SYSCALL(open, 3, exec_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  738. if (IS_ERR(fd))
  739. return -ERRNO(fd);
  740. char filebuf[4];
  741. /* check if the first argument is a executable. If it is, try finding
  742. all the possible manifest files */
  743. INLINE_SYSCALL(read, 3, fd, filebuf, 4);
  744. INLINE_SYSCALL(close, 1, fd);
  745. if (memcmp(filebuf, "\177ELF", 4)) {
  746. manifest_uri = exec_uri;
  747. exec_uri = NULL;
  748. SGX_DBG(DBG_I, "manifest file: %s\n", manifest_uri);
  749. } else {
  750. char path[URI_MAX];
  751. int len = get_base_name(exec_uri + static_strlen("file:"),
  752. path, URI_MAX);
  753. if (len < 0)
  754. return len;
  755. strcpy_static(path + len, ".manifest.sgx", URI_MAX - len);
  756. fd = INLINE_SYSCALL(open, 3, path, O_RDONLY|O_CLOEXEC, 0);
  757. if (IS_ERR(fd)) {
  758. SGX_DBG(DBG_E, "cannot open manifest file: %s\n", path);
  759. goto usage;
  760. }
  761. manifest_uri = alloc_concat("file:", static_strlen("file:"), path, -1);
  762. INLINE_SYSCALL(close, 1, fd);
  763. SGX_DBG(DBG_I, "manifest file: %s\n", manifest_uri);
  764. }
  765. return load_enclave(enclave, manifest_uri, exec_uri, argv, envp);
  766. usage:
  767. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  768. return -EINVAL;
  769. }
  770. int pal_init_enclave (const char * manifest_uri,
  771. const char * exec_uri,
  772. const char ** arguments, const char ** environments)
  773. {
  774. if (!manifest_uri)
  775. return -PAL_ERROR_INVAL;
  776. struct pal_enclave * enclave = malloc(sizeof(struct pal_enclave));
  777. if (!enclave)
  778. return -PAL_ERROR_NOMEM;
  779. memset(enclave, 0, sizeof(struct pal_enclave));
  780. return load_enclave(enclave, manifest_uri, exec_uri,
  781. arguments, environments);
  782. }
  783. void exit_process (int status)
  784. {
  785. struct pal_enclave * enclave = current_enclave;
  786. destroy_enclave((void *) enclave->baseaddr, enclave->size);
  787. free(enclave->config);
  788. free(enclave);
  789. INLINE_SYSCALL(exit, 1, status);
  790. }