sgx_main.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. #include <pal_linux.h>
  2. #include <pal_linux_error.h>
  3. #include <pal_rtld.h>
  4. #include <hex.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <ctype.h>
  16. #include <sysdep.h>
  17. #include <sysdeps/generic/ldsodefs.h>
  18. size_t g_page_size = PRESET_PAGESIZE;
  19. struct pal_enclave pal_enclave;
  20. static inline
  21. char * alloc_concat(const char * p, size_t plen,
  22. const char * s, size_t slen)
  23. {
  24. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  25. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  26. char * buf = malloc(plen + slen + 1);
  27. if (!buf)
  28. return NULL;
  29. if (plen)
  30. memcpy(buf, p, plen);
  31. if (slen)
  32. memcpy(buf + plen, s, slen);
  33. buf[plen + slen] = '\0';
  34. return buf;
  35. }
  36. static unsigned long parse_int (const char * str)
  37. {
  38. unsigned long num = 0;
  39. int radix = 10;
  40. char c;
  41. if (str[0] == '0') {
  42. str++;
  43. radix = 8;
  44. if (str[0] == 'x') {
  45. str++;
  46. radix = 16;
  47. }
  48. }
  49. while ((c = *(str++))) {
  50. int8_t val = hex2dec(c);
  51. if (val < 0)
  52. break;
  53. if ((uint8_t) val >= radix)
  54. break;
  55. num = num * radix + (uint8_t) val;
  56. }
  57. if (c == 'G' || c == 'g')
  58. num *= 1024 * 1024 * 1024;
  59. else if (c == 'M' || c == 'm')
  60. num *= 1024 * 1024;
  61. else if (c == 'K' || c == 'k')
  62. num *= 1024;
  63. return num;
  64. }
  65. static char * resolve_uri (const char * uri, const char ** errstring)
  66. {
  67. if (!strstartswith_static(uri, URI_PREFIX_FILE)) {
  68. *errstring = "Invalid URI";
  69. return NULL;
  70. }
  71. char path_buf[URI_MAX];
  72. size_t len = URI_MAX;
  73. int ret = get_norm_path(uri + 5, path_buf, &len);
  74. if (ret < 0) {
  75. *errstring = "Invalid URI";
  76. return NULL;
  77. }
  78. return alloc_concat(URI_PREFIX_FILE, URI_PREFIX_FILE_LEN, path_buf, len);
  79. }
  80. static
  81. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  82. unsigned long * entry)
  83. {
  84. int ret = 0;
  85. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  86. return -ERRNO(ret);
  87. char filebuf[FILEBUF_SIZE];
  88. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  89. if (IS_ERR(ret))
  90. return -ERRNO(ret);
  91. if ((size_t)ret < sizeof(ElfW(Ehdr)))
  92. return -ENOEXEC;
  93. const ElfW(Ehdr) * header = (void *) filebuf;
  94. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  95. const ElfW(Phdr) * ph;
  96. if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0)
  97. return -ENOEXEC;
  98. struct loadcmd {
  99. ElfW(Addr) mapstart, mapend;
  100. } loadcmds[16], *c;
  101. int nloadcmds = 0;
  102. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  103. if (ph->p_type == PT_LOAD) {
  104. if (nloadcmds == 16)
  105. return -EINVAL;
  106. c = &loadcmds[nloadcmds++];
  107. c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
  108. c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_memsz);
  109. }
  110. *base = loadcmds[0].mapstart;
  111. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  112. if (entry)
  113. *entry = header->e_entry;
  114. return 0;
  115. }
  116. static
  117. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  118. unsigned long base, unsigned long prot)
  119. {
  120. int ret = 0;
  121. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  122. return -ERRNO(ret);
  123. char filebuf[FILEBUF_SIZE];
  124. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  125. if (IS_ERR(ret))
  126. return -ERRNO(ret);
  127. const ElfW(Ehdr) * header = (void *) filebuf;
  128. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  129. const ElfW(Phdr) * ph;
  130. struct loadcmd {
  131. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  132. unsigned int mapoff;
  133. int prot;
  134. } loadcmds[16], *c;
  135. int nloadcmds = 0;
  136. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  137. if (ph->p_type == PT_LOAD) {
  138. if (nloadcmds == 16)
  139. return -EINVAL;
  140. c = &loadcmds[nloadcmds++];
  141. c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
  142. c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
  143. c->datastart = ph->p_vaddr;
  144. c->dataend = ph->p_vaddr + ph->p_filesz;
  145. c->allocend = ph->p_vaddr + ph->p_memsz;
  146. c->mapoff = ALLOC_ALIGN_DOWN(ph->p_offset);
  147. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  148. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  149. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  150. }
  151. base -= loadcmds[0].mapstart;
  152. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  153. ElfW(Addr) zero = c->dataend;
  154. ElfW(Addr) zeroend = ALLOC_ALIGN_UP(c->allocend);
  155. ElfW(Addr) zeropage = ALLOC_ALIGN_UP(zero);
  156. if (zeroend < zeropage)
  157. zeropage = zeroend;
  158. if (c->mapend > c->mapstart) {
  159. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  160. c->mapend - c->mapstart,
  161. PROT_READ|PROT_WRITE,
  162. MAP_PRIVATE | MAP_FILE,
  163. fd, c->mapoff);
  164. if (IS_ERR_P(addr))
  165. return -ERRNO_P(addr);
  166. if (c->datastart > c->mapstart)
  167. memset(addr, 0, c->datastart - c->mapstart);
  168. if (zeropage > zero)
  169. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  170. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  171. c->mapend - c->mapstart,
  172. SGX_PAGE_REG, c->prot, /*skip_eextend=*/false,
  173. (c->prot & PROT_EXEC) ? "code" : "data");
  174. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  175. if (ret < 0)
  176. return ret;
  177. }
  178. if (zeroend > zeropage) {
  179. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  180. zeroend - zeropage,
  181. SGX_PAGE_REG, c->prot, false, "bss");
  182. if (ret < 0)
  183. return ret;
  184. }
  185. }
  186. return 0;
  187. }
  188. int initialize_enclave (struct pal_enclave * enclave)
  189. {
  190. int ret = 0;
  191. int enclave_image = -1;
  192. char* enclave_uri = NULL;
  193. sgx_arch_token_t enclave_token;
  194. sgx_arch_enclave_css_t enclave_sigstruct;
  195. sgx_arch_secs_t enclave_secs;
  196. unsigned long enclave_entry_addr;
  197. unsigned long heap_min = DEFAULT_HEAP_MIN;
  198. /* this array may overflow the stack, so we allocate it in BSS */
  199. static void* tcs_addrs[MAX_DBG_THREADS];
  200. char cfgbuf[CONFIG_MAX];
  201. const char* errstring = "out of memory";
  202. /* Use sgx.enclave_pal_file from manifest if exists */
  203. if (get_config(enclave->config, "sgx.enclave_pal_file", cfgbuf, sizeof(cfgbuf)) > 0) {
  204. enclave_uri = resolve_uri(cfgbuf, &errstring);
  205. } else {
  206. enclave_uri = alloc_concat(URI_PREFIX_FILE, URI_PREFIX_FILE_LEN, ENCLAVE_PAL_FILENAME, -1);
  207. }
  208. if (!enclave_uri) {
  209. SGX_DBG(DBG_E,
  210. "Cannot open in-enclave PAL: %s (incorrect sgx.enclave_pal_file in manifest?)\n",
  211. errstring);
  212. ret = -EINVAL;
  213. goto out;
  214. }
  215. enclave_image = INLINE_SYSCALL(open, 3, enclave_uri + URI_PREFIX_FILE_LEN, O_RDONLY, 0);
  216. if (IS_ERR(enclave_image)) {
  217. SGX_DBG(DBG_E, "Cannot find enclave image: %s\n", enclave_uri);
  218. ret = -ERRNO(enclave_image);
  219. goto out;
  220. }
  221. /* Reading sgx.enclave_size from manifest */
  222. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, sizeof(cfgbuf)) <= 0) {
  223. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  224. ret = -EINVAL;
  225. goto out;
  226. }
  227. enclave->size = parse_int(cfgbuf);
  228. if (!enclave->size || !IS_POWER_OF_2(enclave->size)) {
  229. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  230. ret = -EINVAL;
  231. goto out;
  232. }
  233. /* Reading sgx.thread_num from manifest */
  234. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, sizeof(cfgbuf)) > 0) {
  235. enclave->thread_num = parse_int(cfgbuf);
  236. if (enclave->thread_num > MAX_DBG_THREADS) {
  237. SGX_DBG(DBG_E, "Too many threads to debug\n");
  238. ret = -EINVAL;
  239. goto out;
  240. }
  241. } else {
  242. enclave->thread_num = 1;
  243. }
  244. if (get_config(enclave->config, "sgx.static_address", cfgbuf, sizeof(cfgbuf)) > 0 && cfgbuf[0] == '1') {
  245. enclave->baseaddr = ALIGN_DOWN_POW2(heap_min, enclave->size);
  246. } else {
  247. enclave->baseaddr = ENCLAVE_HIGH_ADDRESS;
  248. heap_min = 0;
  249. }
  250. ret = read_enclave_token(enclave->token, &enclave_token);
  251. if (ret < 0) {
  252. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  253. goto out;
  254. }
  255. enclave->pal_sec.enclave_attributes = enclave_token.body.attributes;
  256. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  257. if (ret < 0) {
  258. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  259. goto out;
  260. }
  261. memset(&enclave_secs, 0, sizeof(enclave_secs));
  262. enclave_secs.base = enclave->baseaddr;
  263. enclave_secs.size = enclave->size;
  264. ret = create_enclave(&enclave_secs, &enclave_token);
  265. if (ret < 0) {
  266. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  267. goto out;
  268. }
  269. enclave->ssaframesize = enclave_secs.ssa_frame_size * g_page_size;
  270. struct stat stat;
  271. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  272. if (IS_ERR(ret)) {
  273. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  274. ret = -ERRNO(ret);
  275. goto out;
  276. }
  277. int manifest_size = stat.st_size;
  278. /* Start populating enclave memory */
  279. struct mem_area {
  280. const char * desc;
  281. bool skip_eextend;
  282. int fd;
  283. bool is_binary; /* only meaningful if fd != -1 */
  284. unsigned long addr, size, prot;
  285. enum sgx_page_type type;
  286. };
  287. /*
  288. * 10 for manifest, SSA, TCS, etc
  289. * + enclave->thread_num for normal stack
  290. * + enclave->thread_num for signal stack
  291. */
  292. int area_num_max = 10 + enclave->thread_num * 2;
  293. struct mem_area * areas = __alloca(sizeof(areas[0]) * area_num_max);
  294. int area_num = 0;
  295. /* The manifest needs to be allocated at the upper end of the enclave
  296. * memory. That's used by pal_linux_main to find the manifest area. So add
  297. * it first to the list with memory areas. */
  298. areas[area_num] = (struct mem_area) {
  299. .desc = "manifest", .skip_eextend = false, .fd = enclave->manifest,
  300. .is_binary = false, .addr = 0, .size = ALLOC_ALIGN_UP(manifest_size),
  301. .prot = PROT_READ, .type = SGX_PAGE_REG
  302. };
  303. area_num++;
  304. areas[area_num] = (struct mem_area) {
  305. .desc = "ssa", .skip_eextend = false, .fd = -1,
  306. .is_binary = false, .addr = 0,
  307. .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  308. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  309. };
  310. struct mem_area* ssa_area = &areas[area_num++];
  311. areas[area_num] = (struct mem_area) {
  312. .desc = "tcs", .skip_eextend = false, .fd = -1,
  313. .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
  314. .prot = 0, .type = SGX_PAGE_TCS
  315. };
  316. struct mem_area* tcs_area = &areas[area_num++];
  317. areas[area_num] = (struct mem_area) {
  318. .desc = "tls", .skip_eextend = false, .fd = -1,
  319. .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
  320. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  321. };
  322. struct mem_area* tls_area = &areas[area_num++];
  323. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  324. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  325. areas[area_num] = (struct mem_area) {
  326. .desc = "stack", .skip_eextend = false, .fd = -1,
  327. .is_binary = false, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  328. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  329. };
  330. area_num++;
  331. }
  332. struct mem_area* sig_stack_areas = &areas[area_num]; /* memorize for later use */
  333. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  334. areas[area_num] = (struct mem_area) {
  335. .desc = "sig_stack", .skip_eextend = false, .fd = -1,
  336. .is_binary = false, .addr = 0, .size = ENCLAVE_SIG_STACK_SIZE,
  337. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  338. };
  339. area_num++;
  340. }
  341. areas[area_num] = (struct mem_area) {
  342. .desc = "pal", .skip_eextend = false, .fd = enclave_image,
  343. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  344. .prot = 0, .type = SGX_PAGE_REG
  345. };
  346. struct mem_area* pal_area = &areas[area_num++];
  347. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  348. if (ret < 0) {
  349. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", enclave_uri, -ret);
  350. goto out;
  351. }
  352. struct mem_area* exec_area = NULL;
  353. if (enclave->exec != -1) {
  354. areas[area_num] = (struct mem_area) {
  355. .desc = "exec", .skip_eextend = false, .fd = enclave->exec,
  356. .is_binary = true, .addr = 0, .size = 0 /* set below */,
  357. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  358. };
  359. exec_area = &areas[area_num++];
  360. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  361. if (ret < 0) {
  362. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  363. goto out;
  364. }
  365. }
  366. unsigned long populating = enclave->size;
  367. for (int i = 0 ; i < area_num ; i++) {
  368. if (areas[i].addr)
  369. continue;
  370. areas[i].addr = populating - areas[i].size;
  371. populating = SATURATED_P_SUB(areas[i].addr, MEMORY_GAP, 0);
  372. }
  373. enclave_entry_addr += pal_area->addr;
  374. if (exec_area) {
  375. if (exec_area->addr + exec_area->size > pal_area->addr - MEMORY_GAP) {
  376. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  377. ret = -EINVAL;
  378. goto out;
  379. }
  380. if (exec_area->addr + exec_area->size + MEMORY_GAP < populating) {
  381. if (populating > heap_min) {
  382. unsigned long addr = exec_area->addr + exec_area->size + MEMORY_GAP;
  383. if (addr < heap_min)
  384. addr = heap_min;
  385. areas[area_num] = (struct mem_area) {
  386. .desc = "free", .skip_eextend = true, .fd = -1,
  387. .is_binary = false, .addr = addr, .size = populating - addr,
  388. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  389. };
  390. area_num++;
  391. }
  392. populating = SATURATED_P_SUB(exec_area->addr, MEMORY_GAP, 0);
  393. }
  394. }
  395. if (populating > heap_min) {
  396. areas[area_num] = (struct mem_area) {
  397. .desc = "free", .skip_eextend = true, .fd = -1,
  398. .is_binary = false, .addr = heap_min, .size = populating - heap_min,
  399. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  400. };
  401. area_num++;
  402. }
  403. for (int i = 0 ; i < area_num ; i++) {
  404. if (areas[i].fd != -1 && areas[i].is_binary) {
  405. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  406. if (ret < 0) {
  407. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  408. goto out;
  409. }
  410. continue;
  411. }
  412. void * data = NULL;
  413. if (!strcmp_static(areas[i].desc, "tls")) {
  414. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  415. PROT_READ|PROT_WRITE,
  416. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  417. if (IS_ERR_P(data) || data == NULL) {
  418. /* Note that Graphene currently doesn't handle 0x0 addresses */
  419. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  420. goto out;
  421. }
  422. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  423. struct enclave_tls * gs = data + g_page_size * t;
  424. memset(gs, 0, g_page_size);
  425. assert(sizeof(*gs) <= g_page_size);
  426. gs->common.self = (PAL_TCB *)(
  427. tls_area->addr + g_page_size * t + enclave_secs.base);
  428. gs->enclave_size = enclave->size;
  429. gs->tcs_offset = tcs_area->addr + g_page_size * t;
  430. gs->initial_stack_offset =
  431. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  432. gs->sig_stack_low =
  433. sig_stack_areas[t].addr + enclave_secs.base;
  434. gs->sig_stack_high =
  435. sig_stack_areas[t].addr + ENCLAVE_SIG_STACK_SIZE +
  436. enclave_secs.base;
  437. gs->ssa = (void *) ssa_area->addr +
  438. enclave->ssaframesize * SSAFRAMENUM * t +
  439. enclave_secs.base;
  440. gs->gpr = gs->ssa +
  441. enclave->ssaframesize - sizeof(sgx_pal_gpr_t);
  442. gs->manifest_size = manifest_size;
  443. gs->heap_min = (void *) enclave_secs.base + heap_min;
  444. gs->heap_max = (void *) enclave_secs.base + pal_area->addr - MEMORY_GAP;
  445. if (exec_area) {
  446. gs->exec_addr = (void *) enclave_secs.base + exec_area->addr;
  447. gs->exec_size = exec_area->size;
  448. }
  449. gs->thread = NULL;
  450. }
  451. } else if (!strcmp_static(areas[i].desc, "tcs")) {
  452. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  453. PROT_READ|PROT_WRITE,
  454. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  455. if (IS_ERR_P(data) || data == NULL) {
  456. /* Note that Graphene currently doesn't handle 0x0 addresses */
  457. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  458. goto out;
  459. }
  460. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  461. sgx_arch_tcs_t * tcs = data + g_page_size * t;
  462. memset(tcs, 0, g_page_size);
  463. tcs->ossa = ssa_area->addr +
  464. enclave->ssaframesize * SSAFRAMENUM * t;
  465. tcs->nssa = SSAFRAMENUM;
  466. tcs->oentry = enclave_entry_addr;
  467. tcs->ofs_base = 0;
  468. tcs->ogs_base = tls_area->addr + t * g_page_size;
  469. tcs->ofs_limit = 0xfff;
  470. tcs->ogs_limit = 0xfff;
  471. tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + g_page_size * t;
  472. }
  473. } else if (areas[i].fd != -1) {
  474. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  475. PROT_READ,
  476. MAP_FILE|MAP_PRIVATE,
  477. areas[i].fd, 0);
  478. if (IS_ERR_P(data) || data == NULL) {
  479. /* Note that Graphene currently doesn't handle 0x0 addresses */
  480. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  481. goto out;
  482. }
  483. }
  484. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  485. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  486. if (data)
  487. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  488. if (ret < 0) {
  489. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  490. goto out;
  491. }
  492. }
  493. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  494. if (ret < 0) {
  495. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  496. goto out;
  497. }
  498. create_tcs_mapper((void *) enclave_secs.base + tcs_area->addr, enclave->thread_num);
  499. struct enclave_dbginfo * dbg = (void *)
  500. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  501. sizeof(struct enclave_dbginfo),
  502. PROT_READ|PROT_WRITE,
  503. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  504. -1, 0);
  505. if (IS_ERR_P(dbg)) {
  506. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  507. } else {
  508. dbg->pid = INLINE_SYSCALL(getpid, 0);
  509. dbg->base = enclave->baseaddr;
  510. dbg->size = enclave->size;
  511. dbg->ssaframesize = enclave->ssaframesize;
  512. dbg->aep = async_exit_pointer;
  513. dbg->thread_tids[0] = dbg->pid;
  514. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  515. dbg->tcs_addrs[i] = tcs_addrs[i];
  516. }
  517. ret = 0;
  518. out:
  519. if (enclave_image >= 0)
  520. INLINE_SYSCALL(close, 1, enclave_image);
  521. free(enclave_uri);
  522. return ret;
  523. }
  524. static unsigned long randval = 0;
  525. void getrand (void * buffer, size_t size)
  526. {
  527. size_t bytes = 0;
  528. while (bytes + sizeof(uint64_t) <= size) {
  529. *(uint64_t*) (buffer + bytes) = randval;
  530. randval = hash64(randval);
  531. bytes += sizeof(uint64_t);
  532. }
  533. if (bytes < size) {
  534. memcpy(buffer + bytes, &randval, size - bytes);
  535. randval = hash64(randval);
  536. }
  537. }
  538. static void create_instance (struct pal_sec * pal_sec)
  539. {
  540. PAL_NUM id;
  541. getrand(&id, sizeof(id));
  542. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix), "/graphene/%016lx/", id);
  543. pal_sec->instance_id = id;
  544. }
  545. static int load_manifest (int fd, struct config_store ** config_ptr)
  546. {
  547. int ret = 0;
  548. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  549. if (IS_ERR(nbytes)) {
  550. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  551. return -ERRNO(nbytes);
  552. }
  553. struct config_store * config = malloc(sizeof(struct config_store));
  554. if (!config) {
  555. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  556. return -ENOMEM;
  557. }
  558. void * config_raw = (void *)
  559. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  560. if (IS_ERR_P(config_raw)) {
  561. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  562. ret = -ERRNO_P(config_raw);
  563. goto out;
  564. }
  565. config->raw_data = config_raw;
  566. config->raw_size = nbytes;
  567. config->malloc = malloc;
  568. config->free = NULL;
  569. const char * errstring = NULL;
  570. ret = read_config(config, NULL, &errstring);
  571. if (ret < 0) {
  572. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  573. goto out;
  574. }
  575. *config_ptr = config;
  576. ret = 0;
  577. out:
  578. if (ret < 0) {
  579. free(config);
  580. if (!IS_ERR_P(config_raw))
  581. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  582. }
  583. return ret;
  584. }
  585. /*
  586. * Returns the number of online CPUs read from /sys/devices/system/cpu/online, -errno on failure.
  587. * Understands complex formats like "1,3-5,6".
  588. */
  589. static int get_cpu_count(void) {
  590. int fd = INLINE_SYSCALL(open, 3, "/sys/devices/system/cpu/online", O_RDONLY|O_CLOEXEC, 0);
  591. if (fd < 0)
  592. return unix_to_pal_error(ERRNO(fd));
  593. char buf[64];
  594. int ret = INLINE_SYSCALL(read, 3, fd, buf, sizeof(buf) - 1);
  595. if (ret < 0) {
  596. INLINE_SYSCALL(close, 1, fd);
  597. return unix_to_pal_error(ERRNO(ret));
  598. }
  599. buf[ret] = '\0'; /* ensure null-terminated buf even in partial read */
  600. char* end;
  601. char* ptr = buf;
  602. int cpu_count = 0;
  603. while (*ptr) {
  604. while (*ptr == ' ' || *ptr == '\t' || *ptr == ',')
  605. ptr++;
  606. int firstint = (int)strtol(ptr, &end, 10);
  607. if (ptr == end)
  608. break;
  609. if (*end == '\0' || *end == ',') {
  610. /* single CPU index, count as one more CPU */
  611. cpu_count++;
  612. } else if (*end == '-') {
  613. /* CPU range, count how many CPUs in range */
  614. ptr = end + 1;
  615. int secondint = (int)strtol(ptr, &end, 10);
  616. if (secondint > firstint)
  617. cpu_count += secondint - firstint + 1; // inclusive (e.g., 0-7, or 8-16)
  618. }
  619. ptr = end;
  620. }
  621. INLINE_SYSCALL(close, 1, fd);
  622. if (cpu_count == 0)
  623. return -PAL_ERROR_STREAMNOTEXIST;
  624. return cpu_count;
  625. }
  626. static int load_enclave (struct pal_enclave * enclave,
  627. int manifest_fd,
  628. char * manifest_uri,
  629. char * exec_uri,
  630. char * args, size_t args_size,
  631. char * env, size_t env_size,
  632. bool exec_uri_inferred)
  633. {
  634. struct pal_sec * pal_sec = &enclave->pal_sec;
  635. int ret;
  636. struct timeval tv;
  637. #if PRINT_ENCLAVE_STAT == 1
  638. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  639. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  640. #endif
  641. ret = open_gsgx();
  642. if (ret < 0)
  643. return ret;
  644. if (!is_wrfsbase_supported())
  645. return -EPERM;
  646. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  647. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  648. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  649. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  650. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  651. int num_cpus = get_cpu_count();
  652. if (num_cpus < 0) {
  653. return num_cpus;
  654. }
  655. pal_sec->num_cpus = num_cpus;
  656. #ifdef DEBUG
  657. size_t env_i = 0;
  658. while (env_i < env_size) {
  659. if (!strcmp_static(&env[env_i], "IN_GDB=1")) {
  660. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  661. pal_sec->in_gdb = true;
  662. } else if (strstartswith_static(&env[env_i], "LD_PRELOAD=")) {
  663. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  664. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  665. env_size -= env_i_size;
  666. continue;
  667. }
  668. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  669. }
  670. #endif
  671. enclave->manifest = manifest_fd;
  672. ret = load_manifest(enclave->manifest, &enclave->config);
  673. if (ret < 0) {
  674. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  675. return -EINVAL;
  676. }
  677. char cfgbuf[CONFIG_MAX];
  678. const char * errstring;
  679. // A manifest can specify an executable with a different base name
  680. // than the manifest itself. Always give the exec field of the manifest
  681. // precedence if specified.
  682. if (get_config(enclave->config, "loader.exec", cfgbuf, sizeof(cfgbuf)) > 0) {
  683. exec_uri = resolve_uri(cfgbuf, &errstring);
  684. exec_uri_inferred = false;
  685. if (!exec_uri) {
  686. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  687. return -EINVAL;
  688. }
  689. }
  690. enclave->exec = INLINE_SYSCALL(open, 3, exec_uri + URI_PREFIX_FILE_LEN,
  691. O_RDONLY|O_CLOEXEC, 0);
  692. if (IS_ERR(enclave->exec)) {
  693. if (exec_uri_inferred) {
  694. // It is valid for an enclave not to have an executable.
  695. // We need to catch the case where we inferred the executable
  696. // from the manifest file name, but it doesn't exist, and let
  697. // the enclave go a bit further. Go ahead and warn the user,
  698. // though.
  699. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  700. "or may represent a manifest misconfiguration. This typically "
  701. "represents advanced usage, and if it is not what you intended, "
  702. "try setting the loader.exec field in the manifest.\n", exec_uri);
  703. enclave->exec = -1;
  704. } else {
  705. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  706. return -EINVAL;
  707. }
  708. }
  709. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, sizeof(cfgbuf)) < 0) {
  710. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  711. return -EINVAL;
  712. }
  713. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  714. if (!sig_uri) {
  715. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  716. return -EINVAL;
  717. }
  718. if (!strendswith(sig_uri, ".sig")) {
  719. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  720. free(sig_uri);
  721. return -EINVAL;
  722. }
  723. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + URI_PREFIX_FILE_LEN,
  724. O_RDONLY|O_CLOEXEC, 0);
  725. if (IS_ERR(enclave->sigfile)) {
  726. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  727. free(sig_uri);
  728. return -EINVAL;
  729. }
  730. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - static_strlen(".sig"), ".token", -1);
  731. free(sig_uri);
  732. if (!token_uri) {
  733. INLINE_SYSCALL(close, 1, enclave->sigfile);
  734. return -ENOMEM;
  735. }
  736. enclave->token = INLINE_SYSCALL(open, 3, token_uri + URI_PREFIX_FILE_LEN,
  737. O_RDONLY|O_CLOEXEC, 0);
  738. if (IS_ERR(enclave->token)) {
  739. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  740. PAL_FILE("pal-sgx-get-token")
  741. "\' on the runtime host or run \'make SGX=1 sgx-tokens\' "
  742. "in the Graphene source to create the token file.\n",
  743. token_uri);
  744. free(token_uri);
  745. return -EINVAL;
  746. }
  747. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  748. free(token_uri);
  749. ret = initialize_enclave(enclave);
  750. if (ret < 0)
  751. return ret;
  752. if (!pal_sec->instance_id)
  753. create_instance(&enclave->pal_sec);
  754. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  755. if (enclave->exec == -1) {
  756. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  757. } else {
  758. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  759. }
  760. ret = sgx_signal_setup();
  761. if (ret < 0)
  762. return ret;
  763. if (get_config(enclave->config, "sgx.ra_client_key", cfgbuf, sizeof(cfgbuf)) > 0) {
  764. /* initialize communication with AESM enclave only if app requests remote attestation */
  765. ret = init_aesm_targetinfo(&pal_sec->aesm_targetinfo);
  766. if (ret < 0)
  767. return ret;
  768. }
  769. void* alt_stack = (void*)INLINE_SYSCALL(mmap, 6, NULL, ALT_STACK_SIZE,
  770. PROT_READ | PROT_WRITE,
  771. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  772. if (IS_ERR_P(alt_stack))
  773. return -ENOMEM;
  774. /* initialize TCB at the top of the alternative stack */
  775. PAL_TCB_URTS* tcb = alt_stack + ALT_STACK_SIZE - sizeof(PAL_TCB_URTS);
  776. pal_tcb_urts_init(
  777. tcb, /*stack=*/NULL, alt_stack); /* main thread uses the stack provided by Linux */
  778. pal_thread_init(tcb);
  779. /* start running trusted PAL */
  780. ecall_enclave_start(args, args_size, env, env_size);
  781. #if PRINT_ENCLAVE_STAT == 1
  782. PAL_NUM exit_time = 0;
  783. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  784. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  785. #endif
  786. unmap_tcs();
  787. INLINE_SYSCALL(munmap, 2, alt_stack, ALT_STACK_SIZE);
  788. INLINE_SYSCALL(exit, 0);
  789. return 0;
  790. }
  791. /* Grow stack of main thread to THREAD_STACK_SIZE by allocating a large dummy array and probing
  792. * each stack page (Linux dynamically grows the stack of the main thread but gets confused with
  793. * huge-jump stack accesses coming from within the enclave). Note that other, non-main threads
  794. * are created manually via clone(.., THREAD_STACK_SIZE, ..) and thus do not need this hack. */
  795. static void __attribute__ ((noinline)) force_linux_to_grow_stack() {
  796. char dummy[THREAD_STACK_SIZE];
  797. for (uint64_t i = 0; i < sizeof(dummy); i += PRESET_PAGESIZE) {
  798. /* touch each page on the stack just to make it is not optimized away */
  799. __asm__ volatile("movq %0, %%rbx\r\n"
  800. "movq (%%rbx), %%rbx\r\n"
  801. : : "r"(&dummy[i]) : "%rbx");
  802. }
  803. }
  804. int main (int argc, char ** argv, char ** envp)
  805. {
  806. char * manifest_uri = NULL;
  807. char * exec_uri = NULL;
  808. const char * pal_loader = argv[0];
  809. int fd = -1;
  810. int ret = 0;
  811. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  812. // inferred from the manifest name somewhat
  813. // differently
  814. force_linux_to_grow_stack();
  815. argc--;
  816. argv++;
  817. int is_child = sgx_init_child_process(&pal_enclave.pal_sec);
  818. if (is_child < 0) {
  819. ret = is_child;
  820. goto out;
  821. }
  822. if (!is_child) {
  823. /* occupy PROC_INIT_FD so no one will use it */
  824. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  825. if (!argc)
  826. goto usage;
  827. if (!strcmp_static(argv[0], URI_PREFIX_FILE)) {
  828. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  829. } else {
  830. exec_uri = alloc_concat(URI_PREFIX_FILE, -1, argv[0], -1);
  831. }
  832. } else {
  833. exec_uri = alloc_concat(pal_enclave.pal_sec.exec_name, -1, NULL, -1);
  834. }
  835. if (!exec_uri) {
  836. ret = -ENOMEM;
  837. goto out;
  838. }
  839. fd = INLINE_SYSCALL(open, 3, exec_uri + URI_PREFIX_FILE_LEN, O_RDONLY|O_CLOEXEC, 0);
  840. if (IS_ERR(fd)) {
  841. SGX_DBG(DBG_E, "Input file not found: %s\n", exec_uri);
  842. ret = fd;
  843. goto usage;
  844. }
  845. char file_first_four_bytes[4];
  846. ret = INLINE_SYSCALL(read, 3, fd, file_first_four_bytes, sizeof(file_first_four_bytes));
  847. if (IS_ERR(ret)) {
  848. goto out;
  849. }
  850. if (ret != sizeof(file_first_four_bytes)) {
  851. ret = -EINVAL;
  852. goto out;
  853. }
  854. char manifest_base_name[URI_MAX];
  855. size_t manifest_base_name_len = sizeof(manifest_base_name);
  856. ret = get_base_name(exec_uri + URI_PREFIX_FILE_LEN, manifest_base_name,
  857. &manifest_base_name_len);
  858. if (ret < 0) {
  859. goto out;
  860. }
  861. if (strendswith(manifest_base_name, ".manifest")) {
  862. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".sgx",
  863. sizeof(manifest_base_name) - manifest_base_name_len)) {
  864. ret = -E2BIG;
  865. goto out;
  866. }
  867. } else if (!strendswith(manifest_base_name, ".manifest.sgx")) {
  868. if (!strcpy_static(manifest_base_name + manifest_base_name_len, ".manifest.sgx",
  869. sizeof(manifest_base_name) - manifest_base_name_len)) {
  870. ret = -E2BIG;
  871. goto out;
  872. }
  873. }
  874. int manifest_fd = -1;
  875. if (memcmp(file_first_four_bytes, "\177ELF", sizeof(file_first_four_bytes))) {
  876. /* exec_uri doesn't refer to ELF executable, so it must refer to the
  877. * manifest. Verify this and update exec_uri with the manifest suffix
  878. * removed.
  879. */
  880. size_t exec_uri_len = strlen(exec_uri);
  881. if (strendswith(exec_uri, ".manifest")) {
  882. exec_uri[exec_uri_len - static_strlen(".manifest")] = '\0';
  883. } else if (strendswith(exec_uri, ".manifest.sgx")) {
  884. INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET);
  885. manifest_fd = fd;
  886. exec_uri[exec_uri_len - static_strlen(".manifest.sgx")] = '\0';
  887. } else {
  888. SGX_DBG(DBG_E, "Invalid manifest file specified: %s\n", exec_uri);
  889. goto usage;
  890. }
  891. exec_uri_inferred = true;
  892. }
  893. if (manifest_fd == -1) {
  894. INLINE_SYSCALL(close, 1, fd);
  895. fd = manifest_fd = INLINE_SYSCALL(open, 3, manifest_base_name, O_RDONLY|O_CLOEXEC, 0);
  896. if (IS_ERR(fd)) {
  897. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", manifest_base_name);
  898. goto usage;
  899. }
  900. }
  901. manifest_uri = alloc_concat(URI_PREFIX_FILE, URI_PREFIX_FILE_LEN, manifest_base_name, -1);
  902. if (!manifest_uri) {
  903. ret = -ENOMEM;
  904. goto out;
  905. }
  906. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  907. if (exec_uri_inferred)
  908. SGX_DBG(DBG_I, "Inferred executable file: %s\n", exec_uri);
  909. else
  910. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  911. /*
  912. * While C does not guarantee that the argv[i] and envp[i] strings are
  913. * continuous we know that we are running on Linux, which does this. This
  914. * saves us creating a copy of all argv and envp strings.
  915. */
  916. char * args = argv[0];
  917. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  918. int envc = 0;
  919. while (envp[envc] != NULL) {
  920. envc++;
  921. }
  922. char * env = envp[0];
  923. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  924. ret = load_enclave(&pal_enclave, manifest_fd, manifest_uri, exec_uri, args, args_size, env, env_size,
  925. exec_uri_inferred);
  926. out:
  927. if (pal_enclave.exec >= 0)
  928. INLINE_SYSCALL(close, 1, pal_enclave.exec);
  929. if (pal_enclave.sigfile >= 0)
  930. INLINE_SYSCALL(close, 1, pal_enclave.sigfile);
  931. if (pal_enclave.token >= 0)
  932. INLINE_SYSCALL(close, 1, pal_enclave.token);
  933. if (!IS_ERR(fd))
  934. INLINE_SYSCALL(close, 1, fd);
  935. free(exec_uri);
  936. free(manifest_uri);
  937. return ret;
  938. usage:
  939. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  940. ret = -EINVAL;
  941. goto out;
  942. }