sgx_main.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. #include <pal_linux.h>
  2. #include <pal_linux_error.h>
  3. #include <pal_rtld.h>
  4. #include <hex.h>
  5. #include "sgx_internal.h"
  6. #include "sgx_tls.h"
  7. #include "sgx_enclave.h"
  8. #include "debugger/sgx_gdb.h"
  9. #include <asm/fcntl.h>
  10. #include <asm/socket.h>
  11. #include <linux/fs.h>
  12. #include <linux/in.h>
  13. #include <linux/in6.h>
  14. #include <asm/errno.h>
  15. #include <ctype.h>
  16. #include <sysdep.h>
  17. #include <sysdeps/generic/ldsodefs.h>
  18. unsigned long pagesize = PRESET_PAGESIZE;
  19. unsigned long pagemask = ~(PRESET_PAGESIZE - 1);
  20. unsigned long pageshift = PRESET_PAGESIZE - 1;
  21. static inline
  22. char * alloc_concat(const char * p, size_t plen,
  23. const char * s, size_t slen)
  24. {
  25. plen = (plen != (size_t)-1) ? plen : (p ? strlen(p) : 0);
  26. slen = (slen != (size_t)-1) ? slen : (s ? strlen(s) : 0);
  27. char * buf = malloc(plen + slen + 1);
  28. if (plen)
  29. memcpy(buf, p, plen);
  30. if (slen)
  31. memcpy(buf + plen, s, slen);
  32. buf[plen + slen] = '\0';
  33. return buf;
  34. }
  35. static unsigned long parse_int (const char * str)
  36. {
  37. unsigned long num = 0;
  38. int radix = 10;
  39. char c;
  40. if (str[0] == '0') {
  41. str++;
  42. radix = 8;
  43. if (str[0] == 'x') {
  44. str++;
  45. radix = 16;
  46. }
  47. }
  48. while ((c = *(str++))) {
  49. int8_t val = hex2dec(c);
  50. if (val < 0)
  51. break;
  52. if ((uint8_t) val >= radix)
  53. break;
  54. num = num * radix + (uint8_t) val;
  55. }
  56. if (c == 'G' || c == 'g')
  57. num *= 1024 * 1024 * 1024;
  58. else if (c == 'M' || c == 'm')
  59. num *= 1024 * 1024;
  60. else if (c == 'K' || c == 'k')
  61. num *= 1024;
  62. return num;
  63. }
  64. static char * resolve_uri (const char * uri, const char ** errstring)
  65. {
  66. if (!strpartcmp_static(uri, "file:")) {
  67. *errstring = "Invalid URI";
  68. return NULL;
  69. }
  70. char path_buf[URI_MAX];
  71. size_t len = URI_MAX;
  72. int ret = get_norm_path(uri + 5, path_buf, &len);
  73. if (ret < 0) {
  74. *errstring = "Invalid URI";
  75. return NULL;
  76. }
  77. return alloc_concat("file:", static_strlen("file:"), path_buf, len);
  78. }
  79. static
  80. int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
  81. unsigned long * entry)
  82. {
  83. int ret = 0;
  84. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  85. return -ERRNO(ret);
  86. char filebuf[FILEBUF_SIZE];
  87. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  88. if (IS_ERR(ret))
  89. return -ERRNO(ret);
  90. const ElfW(Ehdr) * header = (void *) filebuf;
  91. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  92. const ElfW(Phdr) * ph;
  93. struct loadcmd {
  94. ElfW(Addr) mapstart, mapend;
  95. } loadcmds[16], *c;
  96. int nloadcmds = 0;
  97. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  98. if (ph->p_type == PT_LOAD) {
  99. if (nloadcmds == 16)
  100. return -EINVAL;
  101. c = &loadcmds[nloadcmds++];
  102. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  103. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz);
  104. }
  105. *base = loadcmds[0].mapstart;
  106. *size = loadcmds[nloadcmds - 1].mapend - loadcmds[0].mapstart;
  107. if (entry)
  108. *entry = header->e_entry;
  109. return 0;
  110. }
  111. static
  112. int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
  113. unsigned long base, unsigned long prot)
  114. {
  115. int ret = 0;
  116. if (IS_ERR(ret = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_SET)))
  117. return -ERRNO(ret);
  118. char filebuf[FILEBUF_SIZE];
  119. ret = INLINE_SYSCALL(read, 3, fd, filebuf, FILEBUF_SIZE);
  120. if (IS_ERR(ret))
  121. return -ERRNO(ret);
  122. const ElfW(Ehdr) * header = (void *) filebuf;
  123. const ElfW(Phdr) * phdr = (void *) filebuf + header->e_phoff;
  124. const ElfW(Phdr) * ph;
  125. struct loadcmd {
  126. ElfW(Addr) mapstart, mapend, datastart, dataend, allocend;
  127. unsigned int mapoff;
  128. int prot;
  129. } loadcmds[16], *c;
  130. int nloadcmds = 0;
  131. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  132. if (ph->p_type == PT_LOAD) {
  133. if (nloadcmds == 16)
  134. return -EINVAL;
  135. c = &loadcmds[nloadcmds++];
  136. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  137. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
  138. c->datastart = ph->p_vaddr;
  139. c->dataend = ph->p_vaddr + ph->p_filesz;
  140. c->allocend = ph->p_vaddr + ph->p_memsz;
  141. c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
  142. c->prot = (ph->p_flags & PF_R ? PROT_READ : 0)|
  143. (ph->p_flags & PF_W ? PROT_WRITE : 0)|
  144. (ph->p_flags & PF_X ? PROT_EXEC : 0)|prot;
  145. }
  146. base -= loadcmds[0].mapstart;
  147. for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
  148. ElfW(Addr) zero = c->dataend;
  149. ElfW(Addr) zeroend = ALLOC_ALIGNUP(c->allocend);
  150. ElfW(Addr) zeropage = ALLOC_ALIGNUP(zero);
  151. if (zeroend < zeropage)
  152. zeropage = zeroend;
  153. if (c->mapend > c->mapstart) {
  154. void * addr = (void *) INLINE_SYSCALL(mmap, 6, NULL,
  155. c->mapend - c->mapstart,
  156. PROT_READ|PROT_WRITE,
  157. MAP_PRIVATE | MAP_FILE,
  158. fd, c->mapoff);
  159. if (IS_ERR_P(addr))
  160. return -ERRNO_P(addr);
  161. if (c->datastart > c->mapstart)
  162. memset(addr, 0, c->datastart - c->mapstart);
  163. if (zeropage > zero)
  164. memset(addr + zero - c->mapstart, 0, zeropage - zero);
  165. ret = add_pages_to_enclave(secs, (void *) base + c->mapstart, addr,
  166. c->mapend - c->mapstart,
  167. SGX_PAGE_REG, c->prot, 0,
  168. (c->prot & PROT_EXEC) ? "code" : "data");
  169. INLINE_SYSCALL(munmap, 2, addr, c->mapend - c->mapstart);
  170. if (ret < 0)
  171. return ret;
  172. }
  173. if (zeroend > zeropage) {
  174. ret = add_pages_to_enclave(secs, (void *) base + zeropage, NULL,
  175. zeroend - zeropage,
  176. SGX_PAGE_REG, c->prot, false, "bss");
  177. if (ret < 0)
  178. return ret;
  179. }
  180. }
  181. return 0;
  182. }
  183. int initialize_enclave (struct pal_enclave * enclave)
  184. {
  185. int ret = 0;
  186. int enclave_image = -1;
  187. int enclave_thread_num = 1;
  188. sgx_arch_token_t enclave_token;
  189. sgx_arch_sigstruct_t enclave_sigstruct;
  190. sgx_arch_secs_t enclave_secs;
  191. unsigned long enclave_entry_addr;
  192. void * tcs_addrs[MAX_DBG_THREADS];
  193. unsigned long heap_min = DEFAULT_HEAP_MIN;
  194. enclave_image = INLINE_SYSCALL(open, 3, ENCLAVE_FILENAME, O_RDONLY, 0);
  195. if (IS_ERR(enclave_image)) {
  196. SGX_DBG(DBG_E, "Cannot find %s\n", ENCLAVE_FILENAME);
  197. ret = -ERRNO(enclave_image);
  198. goto out;
  199. }
  200. char cfgbuf[CONFIG_MAX];
  201. /* Reading sgx.enclave_size from manifest */
  202. if (get_config(enclave->config, "sgx.enclave_size", cfgbuf, CONFIG_MAX) <= 0) {
  203. SGX_DBG(DBG_E, "Enclave size is not specified\n");
  204. ret = -EINVAL;
  205. goto out;
  206. }
  207. enclave->size = parse_int(cfgbuf);
  208. if (enclave->size & (enclave->size - 1)) {
  209. SGX_DBG(DBG_E, "Enclave size not a power of two (an SGX-imposed requirement)\n");
  210. ret = -EINVAL;
  211. goto out;
  212. }
  213. /* Reading sgx.thread_num from manifest */
  214. if (get_config(enclave->config, "sgx.thread_num", cfgbuf, CONFIG_MAX) > 0)
  215. enclave->thread_num = parse_int(cfgbuf);
  216. if (enclave_thread_num > MAX_DBG_THREADS) {
  217. SGX_DBG(DBG_E, "Too many threads to debug\n");
  218. ret = -EINVAL;
  219. goto out;
  220. }
  221. /* Reading sgx.static_address from manifest */
  222. if (get_config(enclave->config, "sgx.static_address", cfgbuf, CONFIG_MAX) > 0 && cfgbuf[0] == '1')
  223. enclave->baseaddr = heap_min;
  224. else
  225. enclave->baseaddr = heap_min = 0;
  226. ret = read_enclave_token(enclave->token, &enclave_token);
  227. if (ret < 0) {
  228. SGX_DBG(DBG_E, "Reading enclave token failed: %d\n", -ret);
  229. goto out;
  230. }
  231. ret = read_enclave_sigstruct(enclave->sigfile, &enclave_sigstruct);
  232. if (ret < 0) {
  233. SGX_DBG(DBG_E, "Reading enclave sigstruct failed: %d\n", -ret);
  234. goto out;
  235. }
  236. ret = create_enclave(&enclave_secs, enclave->baseaddr, enclave->size, &enclave_token);
  237. if (ret < 0) {
  238. SGX_DBG(DBG_E, "Creating enclave failed: %d\n", -ret);
  239. goto out;
  240. }
  241. enclave->baseaddr = enclave_secs.baseaddr;
  242. enclave->size = enclave_secs.size;
  243. enclave->ssaframesize = enclave_secs.ssaframesize * pagesize;
  244. struct stat stat;
  245. ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
  246. if (IS_ERR(ret)) {
  247. SGX_DBG(DBG_E, "Reading manifest file's size failed: %d\n", -ret);
  248. ret = -ERRNO(ret);
  249. goto out;
  250. }
  251. int manifest_size = stat.st_size;
  252. /* Start populating enclave memory */
  253. struct mem_area {
  254. const char * desc;
  255. bool skip_eextend;
  256. bool is_binary;
  257. int fd;
  258. unsigned long addr, size, prot;
  259. enum sgx_page_type type;
  260. };
  261. struct mem_area * areas =
  262. __alloca(sizeof(areas[0]) * (10 + enclave->thread_num));
  263. int area_num = 0;
  264. /* The manifest needs to be allocated at the upper end of the enclave
  265. * memory. That's used by pal_linux_main to find the manifest area. So add
  266. * it first to the list with memory areas. */
  267. areas[area_num] = (struct mem_area) {
  268. .desc = "manifest", .skip_eextend = false, .is_binary = false,
  269. .fd = enclave->manifest, .addr = 0, .size = ALLOC_ALIGNUP(manifest_size),
  270. .prot = PROT_READ, .type = SGX_PAGE_REG
  271. };
  272. area_num++;
  273. areas[area_num] = (struct mem_area) {
  274. .desc = "ssa", .skip_eextend = false, .is_binary = false,
  275. .fd = -1, .addr = 0, .size = enclave->thread_num * enclave->ssaframesize * SSAFRAMENUM,
  276. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  277. };
  278. struct mem_area* ssa_area = &areas[area_num++];
  279. areas[area_num] = (struct mem_area) {
  280. .desc = "tcs", .skip_eextend = false, .is_binary = false,
  281. .fd = -1, .addr = 0, .size = enclave->thread_num * pagesize,
  282. .prot = 0, .type = SGX_PAGE_TCS
  283. };
  284. struct mem_area* tcs_area = &areas[area_num++];
  285. areas[area_num] = (struct mem_area) {
  286. .desc = "tls", .skip_eextend = false, .is_binary = false,
  287. .fd = -1, .addr = 0, .size = enclave->thread_num * pagesize,
  288. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  289. };
  290. struct mem_area* tls_area = &areas[area_num++];
  291. struct mem_area* stack_areas = &areas[area_num]; /* memorize for later use */
  292. for (uint32_t t = 0; t < enclave->thread_num; t++) {
  293. areas[area_num] = (struct mem_area) {
  294. .desc = "stack", .skip_eextend = false, .is_binary = false,
  295. .fd = -1, .addr = 0, .size = ENCLAVE_STACK_SIZE,
  296. .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
  297. };
  298. area_num++;
  299. }
  300. areas[area_num] = (struct mem_area) {
  301. .desc = "pal", .skip_eextend = false, .is_binary = true,
  302. .fd = enclave_image, .addr = 0, .size = 0 /* set below */,
  303. .prot = 0, .type = SGX_PAGE_REG
  304. };
  305. struct mem_area* pal_area = &areas[area_num++];
  306. ret = scan_enclave_binary(enclave_image, &pal_area->addr, &pal_area->size, &enclave_entry_addr);
  307. if (ret < 0) {
  308. SGX_DBG(DBG_E, "Scanning Pal binary (%s) failed: %d\n", ENCLAVE_FILENAME, -ret);
  309. goto out;
  310. }
  311. struct mem_area* exec_area = NULL;
  312. if (enclave->exec != -1) {
  313. areas[area_num] = (struct mem_area) {
  314. .desc = "exec", .skip_eextend = false, .is_binary = true,
  315. .fd = enclave->exec, .addr = 0, .size = 0 /* set below */,
  316. .prot = PROT_WRITE, .type = SGX_PAGE_REG
  317. };
  318. exec_area = &areas[area_num++];
  319. ret = scan_enclave_binary(enclave->exec, &exec_area->addr, &exec_area->size, NULL);
  320. if (ret < 0) {
  321. SGX_DBG(DBG_E, "Scanning application binary failed: %d\n", -ret);
  322. goto out;
  323. }
  324. }
  325. unsigned long populating = enclave->size;
  326. for (int i = 0 ; i < area_num ; i++) {
  327. if (areas[i].addr)
  328. continue;
  329. areas[i].addr = populating - areas[i].size;
  330. if (&areas[i] == exec_area)
  331. populating = areas[i].addr;
  332. else
  333. populating = areas[i].addr - MEMORY_GAP;
  334. }
  335. enclave_entry_addr += pal_area->addr;
  336. if (exec_area) {
  337. if (exec_area->addr + exec_area->size > pal_area->addr) {
  338. SGX_DBG(DBG_E, "Application binary overlaps with Pal binary\n");
  339. ret = -EINVAL;
  340. goto out;
  341. }
  342. if (exec_area->addr + exec_area->size < populating) {
  343. if (populating > heap_min) {
  344. unsigned long addr = exec_area->addr + exec_area->size;
  345. if (addr < heap_min)
  346. addr = heap_min;
  347. areas[area_num] = (struct mem_area) {
  348. .desc = "free", .skip_eextend = true, .is_binary = false,
  349. .fd = -1, .addr = addr, .size = populating - addr,
  350. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  351. };
  352. area_num++;
  353. }
  354. populating = exec_area->addr;
  355. }
  356. }
  357. if (populating > heap_min) {
  358. areas[area_num] = (struct mem_area) {
  359. .desc = "free", .skip_eextend = true, .is_binary = false,
  360. .fd = -1, .addr = heap_min, .size = populating - heap_min,
  361. .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .type = SGX_PAGE_REG
  362. };
  363. area_num++;
  364. }
  365. for (int i = 0 ; i < area_num ; i++) {
  366. if (areas[i].fd != -1 && areas[i].is_binary) {
  367. ret = load_enclave_binary(&enclave_secs, areas[i].fd, areas[i].addr, areas[i].prot);
  368. if (ret < 0) {
  369. SGX_DBG(DBG_E, "Loading enclave binary failed: %d\n", -ret);
  370. goto out;
  371. }
  372. continue;
  373. }
  374. void * data = NULL;
  375. if (strcmp_static(areas[i].desc, "tls")) {
  376. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  377. PROT_READ|PROT_WRITE,
  378. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  379. if (data == (void *)-1 || data == NULL) {
  380. /* Note that Graphene currently doesn't handle 0x0 addresses */
  381. SGX_DBG(DBG_E, "Allocating memory for tls pages failed\n");
  382. goto out;
  383. }
  384. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  385. struct enclave_tls * gs = data + pagesize * t;
  386. memset(gs, 0, pagesize);
  387. assert(sizeof(*gs) <= pagesize);
  388. gs->common.self = (PAL_TCB *)(
  389. tls_area->addr + pagesize * t + enclave_secs.baseaddr);
  390. gs->enclave_size = enclave->size;
  391. gs->tcs_offset = tcs_area->addr + pagesize * t;
  392. gs->initial_stack_offset =
  393. stack_areas[t].addr + ENCLAVE_STACK_SIZE;
  394. gs->ssa = (void *) ssa_area->addr +
  395. enclave->ssaframesize * SSAFRAMENUM * t +
  396. enclave_secs.baseaddr;
  397. gs->gpr = gs->ssa +
  398. enclave->ssaframesize - sizeof(sgx_arch_gpr_t);
  399. gs->manifest_size = manifest_size;
  400. gs->heap_min = (void *) enclave_secs.baseaddr + heap_min;
  401. gs->heap_max = (void *) enclave_secs.baseaddr + pal_area->addr - MEMORY_GAP;
  402. if (exec_area) {
  403. gs->exec_addr = (void *) enclave_secs.baseaddr + exec_area->addr;
  404. gs->exec_size = exec_area->size;
  405. }
  406. gs->thread = NULL;
  407. }
  408. } else if (strcmp_static(areas[i].desc, "tcs")) {
  409. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  410. PROT_READ|PROT_WRITE,
  411. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  412. if (data == (void *)-1 || data == NULL) {
  413. /* Note that Graphene currently doesn't handle 0x0 addresses */
  414. SGX_DBG(DBG_E, "Allocating memory for tcs pages failed\n");
  415. goto out;
  416. }
  417. for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
  418. sgx_arch_tcs_t * tcs = data + pagesize * t;
  419. memset(tcs, 0, pagesize);
  420. tcs->ossa = ssa_area->addr +
  421. enclave->ssaframesize * SSAFRAMENUM * t;
  422. tcs->nssa = SSAFRAMENUM;
  423. tcs->oentry = enclave_entry_addr;
  424. tcs->ofsbasgx = 0;
  425. tcs->ogsbasgx = tls_area->addr + t * pagesize;
  426. tcs->fslimit = 0xfff;
  427. tcs->gslimit = 0xfff;
  428. tcs_addrs[t] = (void *) enclave_secs.baseaddr + tcs_area->addr
  429. + pagesize * t;
  430. }
  431. } else if (areas[i].fd != -1) {
  432. data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,
  433. PROT_READ,
  434. MAP_FILE|MAP_PRIVATE,
  435. areas[i].fd, 0);
  436. if (data == (void *)-1 || data == NULL) {
  437. /* Note that Graphene currently doesn't handle 0x0 addresses */
  438. SGX_DBG(DBG_E, "Allocating memory for file %s failed\n", areas[i].desc);
  439. goto out;
  440. }
  441. }
  442. ret = add_pages_to_enclave(&enclave_secs, (void *) areas[i].addr, data, areas[i].size,
  443. areas[i].type, areas[i].prot, areas[i].skip_eextend, areas[i].desc);
  444. if (data)
  445. INLINE_SYSCALL(munmap, 2, data, areas[i].size);
  446. if (ret < 0) {
  447. SGX_DBG(DBG_E, "Adding pages (%s) to enclave failed: %d\n", areas[i].desc, -ret);
  448. goto out;
  449. }
  450. }
  451. ret = init_enclave(&enclave_secs, &enclave_sigstruct, &enclave_token);
  452. if (ret < 0) {
  453. SGX_DBG(DBG_E, "Initializing enclave failed: %d\n", -ret);
  454. goto out;
  455. }
  456. create_tcs_mapper((void *) enclave_secs.baseaddr + tcs_area->addr,
  457. enclave->thread_num);
  458. struct enclave_dbginfo * dbg = (void *)
  459. INLINE_SYSCALL(mmap, 6, DBGINFO_ADDR,
  460. sizeof(struct enclave_dbginfo),
  461. PROT_READ|PROT_WRITE,
  462. MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
  463. -1, 0);
  464. if (IS_ERR_P(dbg)) {
  465. SGX_DBG(DBG_E, "Cannot allocate debug information (GDB will not work)\n");
  466. } else {
  467. dbg->pid = INLINE_SYSCALL(getpid, 0);
  468. dbg->base = enclave->baseaddr;
  469. dbg->size = enclave->size;
  470. dbg->ssaframesize = enclave->ssaframesize;
  471. dbg->aep = async_exit_pointer;
  472. dbg->thread_tids[0] = dbg->pid;
  473. for (int i = 0 ; i < MAX_DBG_THREADS ; i++)
  474. dbg->tcs_addrs[i] = tcs_addrs[i];
  475. }
  476. ret = 0;
  477. out:
  478. if (enclave_image >= 0)
  479. INLINE_SYSCALL(close, 1, enclave_image);
  480. return ret;
  481. }
  482. static int mcast_s (int port)
  483. {
  484. struct sockaddr_in addr;
  485. int ret = 0;
  486. addr.sin_family = AF_INET;
  487. addr.sin_addr.s_addr = INADDR_ANY;
  488. addr.sin_port = htons(port);
  489. int fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  490. if (IS_ERR(fd))
  491. return -ERRNO(fd);
  492. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  493. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  494. if (IS_ERR(ret))
  495. return -ERRNO(ret);
  496. return fd;
  497. }
  498. static int mcast_c (int port)
  499. {
  500. int ret = 0, fd;
  501. struct sockaddr_in addr;
  502. addr.sin_family = AF_INET;
  503. addr.sin_addr.s_addr = INADDR_ANY;
  504. addr.sin_port = htons(port);
  505. fd = INLINE_SYSCALL(socket, 3, AF_INET, SOCK_DGRAM, 0);
  506. if (IS_ERR(fd))
  507. return -ERRNO(fd);
  508. int reuse = 1;
  509. INLINE_SYSCALL(setsockopt, 5, fd, SOL_SOCKET, SO_REUSEADDR,
  510. &reuse, sizeof(reuse));
  511. ret = INLINE_SYSCALL(bind, 3, fd, &addr, sizeof(addr));
  512. if (IS_ERR(ret))
  513. return -ERRNO(ret);
  514. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_MULTICAST_IF,
  515. &addr.sin_addr.s_addr, sizeof(addr.sin_addr.s_addr));
  516. if (IS_ERR(ret))
  517. return -ERRNO(ret);
  518. inet_pton4(MCAST_GROUP, sizeof(MCAST_GROUP) - 1,
  519. &addr.sin_addr.s_addr);
  520. struct ip_mreq group;
  521. group.imr_multiaddr.s_addr = addr.sin_addr.s_addr;
  522. group.imr_interface.s_addr = INADDR_ANY;
  523. ret = INLINE_SYSCALL(setsockopt, 5, fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
  524. &group, sizeof(group));
  525. if (IS_ERR(ret))
  526. return -ERRNO(ret);
  527. return fd;
  528. }
  529. static unsigned long randval = 0;
  530. void getrand (void * buffer, size_t size)
  531. {
  532. size_t bytes = 0;
  533. while (bytes + sizeof(uint64_t) <= size) {
  534. *(uint64_t*) (buffer + bytes) = randval;
  535. randval = hash64(randval);
  536. bytes += sizeof(uint64_t);
  537. }
  538. if (bytes < size) {
  539. memcpy(buffer + bytes, &randval, size - bytes);
  540. randval = hash64(randval);
  541. }
  542. }
  543. static void create_instance (struct pal_sec * pal_sec)
  544. {
  545. PAL_NUM id;
  546. getrand(&id, sizeof(id));
  547. snprintf(pal_sec->pipe_prefix, sizeof(pal_sec->pipe_prefix), "/graphene/%016lx/", id);
  548. pal_sec->instance_id = id;
  549. }
  550. int load_manifest (int fd, struct config_store ** config_ptr)
  551. {
  552. int ret = 0;
  553. int nbytes = INLINE_SYSCALL(lseek, 3, fd, 0, SEEK_END);
  554. if (IS_ERR(nbytes)) {
  555. SGX_DBG(DBG_E, "Cannot detect size of manifest file\n");
  556. return -ERRNO(nbytes);
  557. }
  558. struct config_store * config = malloc(sizeof(struct config_store));
  559. if (!config) {
  560. SGX_DBG(DBG_E, "Not enough memory for config_store of manifest\n");
  561. return -ENOMEM;
  562. }
  563. void * config_raw = (void *)
  564. INLINE_SYSCALL(mmap, 6, NULL, nbytes, PROT_READ, MAP_PRIVATE, fd, 0);
  565. if (IS_ERR_P(config_raw)) {
  566. SGX_DBG(DBG_E, "Cannot mmap manifest file\n");
  567. ret = -ERRNO_P(config_raw);
  568. goto out;
  569. }
  570. config->raw_data = config_raw;
  571. config->raw_size = nbytes;
  572. config->malloc = malloc;
  573. config->free = NULL;
  574. const char * errstring = NULL;
  575. ret = read_config(config, NULL, &errstring);
  576. if (ret < 0) {
  577. SGX_DBG(DBG_E, "Cannot read manifest: %s\n", errstring);
  578. goto out;
  579. }
  580. *config_ptr = config;
  581. ret = 0;
  582. out:
  583. if (ret < 0) {
  584. if (config)
  585. free(config);
  586. if (!IS_ERR_P(config_raw))
  587. INLINE_SYSCALL(munmap, 2, config_raw, nbytes);
  588. }
  589. return ret;
  590. }
  591. /*
  592. * Returns the number of online CPUs read from /sys/devices/system/cpu/online, -errno on failure.
  593. * Understands complex formats like "1,3-5,6".
  594. */
  595. int get_cpu_count(void) {
  596. int fd = INLINE_SYSCALL(open, 3, "/sys/devices/system/cpu/online", O_RDONLY|O_CLOEXEC, 0);
  597. if (fd < 0)
  598. return unix_to_pal_error(ERRNO(fd));
  599. char buf[64];
  600. int ret = INLINE_SYSCALL(read, 3, fd, buf, sizeof(buf) - 1);
  601. if (ret < 0) {
  602. INLINE_SYSCALL(close, 1, fd);
  603. return unix_to_pal_error(ERRNO(ret));
  604. }
  605. buf[ret] = '\0'; /* ensure null-terminated buf even in partial read */
  606. char* end;
  607. char* ptr = buf;
  608. int cpu_count = 0;
  609. while (*ptr) {
  610. while (*ptr == ' ' || *ptr == '\t' || *ptr == ',')
  611. ptr++;
  612. int firstint = (int)strtol(ptr, &end, 10);
  613. if (ptr == end)
  614. break;
  615. if (*end == '\0' || *end == ',') {
  616. /* single CPU index, count as one more CPU */
  617. cpu_count++;
  618. } else if (*end == '-') {
  619. /* CPU range, count how many CPUs in range */
  620. ptr = end + 1;
  621. int secondint = (int)strtol(ptr, &end, 10);
  622. if (secondint > firstint)
  623. cpu_count += secondint - firstint + 1; // inclusive (e.g., 0-7, or 8-16)
  624. }
  625. ptr = end;
  626. }
  627. INLINE_SYSCALL(close, 1, fd);
  628. if (cpu_count == 0)
  629. return -PAL_ERROR_STREAMNOTEXIST;
  630. return cpu_count;
  631. }
  632. static int load_enclave (struct pal_enclave * enclave,
  633. char * manifest_uri,
  634. char * exec_uri,
  635. char * args, size_t args_size,
  636. char * env, size_t env_size,
  637. bool exec_uri_inferred)
  638. {
  639. struct pal_sec * pal_sec = &enclave->pal_sec;
  640. int ret;
  641. const char * errstring;
  642. struct timeval tv;
  643. #if PRINT_ENCLAVE_STAT == 1
  644. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  645. pal_sec->start_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  646. #endif
  647. ret = open_gsgx();
  648. if (ret < 0)
  649. return ret;
  650. if (!is_wrfsbase_supported())
  651. return -EPERM;
  652. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  653. randval = tv.tv_sec * 1000000UL + tv.tv_usec;
  654. pal_sec->pid = INLINE_SYSCALL(getpid, 0);
  655. pal_sec->uid = INLINE_SYSCALL(getuid, 0);
  656. pal_sec->gid = INLINE_SYSCALL(getgid, 0);
  657. int num_cpus = get_cpu_count();
  658. if (num_cpus < 0) {
  659. return num_cpus;
  660. }
  661. pal_sec->num_cpus = num_cpus;
  662. #ifdef DEBUG
  663. size_t env_i = 0;
  664. while (env_i < env_size) {
  665. if (strcmp_static(&env[env_i], "IN_GDB=1")) {
  666. SGX_DBG(DBG_I, "[ Running under GDB ]\n");
  667. pal_sec->in_gdb = true;
  668. }
  669. if (strcmp_static(&env[env_i], "LD_PRELOAD=")) {
  670. uint64_t env_i_size = strnlen(&env[env_i], env_size - env_i) + 1;
  671. memmove(&env[env_i], &env[env_i + env_i_size], env_size - env_i - env_i_size);
  672. env_size -= env_i_size;
  673. continue;
  674. }
  675. env_i += strnlen(&env[env_i], env_size - env_i) + 1;
  676. }
  677. #endif
  678. char cfgbuf[CONFIG_MAX];
  679. enclave->manifest = INLINE_SYSCALL(open, 3, manifest_uri + 5,
  680. O_RDONLY|O_CLOEXEC, 0);
  681. if (IS_ERR(enclave->manifest)) {
  682. SGX_DBG(DBG_E, "Cannot open manifest %s\n", manifest_uri);
  683. return -EINVAL;
  684. }
  685. ret = load_manifest(enclave->manifest, &enclave->config);
  686. if (ret < 0) {
  687. SGX_DBG(DBG_E, "Invalid manifest: %s\n", manifest_uri);
  688. return -EINVAL;
  689. }
  690. // A manifest can specify an executable with a different base name
  691. // than the manifest itself. Always give the exec field of the manifest
  692. // precedence if specified.
  693. if (get_config(enclave->config, "loader.exec", cfgbuf, CONFIG_MAX) > 0) {
  694. exec_uri = resolve_uri(cfgbuf, &errstring);
  695. exec_uri_inferred = false;
  696. if (!exec_uri) {
  697. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  698. return -EINVAL;
  699. }
  700. }
  701. if (exec_uri) {
  702. enclave->exec = INLINE_SYSCALL(open, 3,
  703. exec_uri + static_strlen("file:"),
  704. O_RDONLY|O_CLOEXEC, 0);
  705. if (IS_ERR(enclave->exec)) {
  706. if (exec_uri_inferred) {
  707. // It is valid for an enclave not to have an executable.
  708. // We need to catch the case where we inferred the executable
  709. // from the manifest file name, but it doesn't exist, and let
  710. // the enclave go a bit further. Go ahead and warn the user,
  711. // though.
  712. SGX_DBG(DBG_I, "Inferred executable cannot be opened: %s. This may be ok, "
  713. "or may represent a manifest misconfiguration. This typically "
  714. "represents advanced usage, and if it is not what you intended, "
  715. "try setting the loader.exec field in the manifest.\n", exec_uri);
  716. enclave->exec = -1;
  717. } else {
  718. SGX_DBG(DBG_E, "Cannot open executable %s\n", exec_uri);
  719. return -EINVAL;
  720. }
  721. }
  722. } else {
  723. enclave->exec = -1;
  724. }
  725. if (get_config(enclave->config, "sgx.sigfile", cfgbuf, CONFIG_MAX) < 0) {
  726. SGX_DBG(DBG_E, "Sigstruct file not found ('sgx.sigfile' must be specified in manifest)\n");
  727. return -EINVAL;
  728. }
  729. char * sig_uri = resolve_uri(cfgbuf, &errstring);
  730. if (!sig_uri) {
  731. SGX_DBG(DBG_E, "%s: %s\n", errstring, cfgbuf);
  732. return -EINVAL;
  733. }
  734. if (!strcmp_static(sig_uri + strlen(sig_uri) - 4, ".sig")) {
  735. SGX_DBG(DBG_E, "Invalid sigstruct file URI as %s\n", cfgbuf);
  736. free(sig_uri);
  737. return -EINVAL;
  738. }
  739. enclave->sigfile = INLINE_SYSCALL(open, 3, sig_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  740. if (IS_ERR(enclave->sigfile)) {
  741. SGX_DBG(DBG_E, "Cannot open sigstruct file %s\n", sig_uri);
  742. free(sig_uri);
  743. return -EINVAL;
  744. }
  745. char * token_uri = alloc_concat(sig_uri, strlen(sig_uri) - 4, ".token", -1);
  746. free(sig_uri);
  747. enclave->token = INLINE_SYSCALL(open, 3, token_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  748. if (IS_ERR(enclave->token)) {
  749. SGX_DBG(DBG_E, "Cannot open token \'%s\'. Use \'"
  750. PAL_FILE("pal-sgx-get-token")
  751. "\' on the runtime host or run \'make SGX_RUN=1\' "
  752. "in the Graphene source to create the token file.\n",
  753. token_uri);
  754. free(token_uri);
  755. return -EINVAL;
  756. }
  757. SGX_DBG(DBG_I, "Token file: %s\n", token_uri);
  758. free(token_uri);
  759. ret = initialize_enclave(enclave);
  760. if (ret < 0)
  761. return ret;
  762. if (!pal_sec->instance_id)
  763. create_instance(&enclave->pal_sec);
  764. memcpy(pal_sec->manifest_name, manifest_uri, strlen(manifest_uri) + 1);
  765. if (enclave->exec == -1) {
  766. memset(pal_sec->exec_name, 0, sizeof(PAL_SEC_STR));
  767. } else {
  768. memcpy(pal_sec->exec_name, exec_uri, strlen(exec_uri) + 1);
  769. }
  770. if (!pal_sec->mcast_port) {
  771. unsigned short mcast_port;
  772. getrand(&mcast_port, sizeof(unsigned short));
  773. pal_sec->mcast_port = mcast_port > 1024 ? mcast_port : mcast_port + 1024;
  774. }
  775. if ((ret = mcast_s(pal_sec->mcast_port)) >= 0) {
  776. pal_sec->mcast_srv = ret;
  777. if ((ret = mcast_c(pal_sec->mcast_port)) >= 0) {
  778. pal_sec->mcast_cli = ret;
  779. } else {
  780. INLINE_SYSCALL(close, 1, pal_sec->mcast_srv);
  781. pal_sec->mcast_srv = 0;
  782. }
  783. }
  784. ret = sgx_signal_setup();
  785. if (ret < 0)
  786. return ret;
  787. ret = init_aesm_targetinfo(&pal_sec->aesm_targetinfo);
  788. if (ret < 0)
  789. return ret;
  790. current_enclave = enclave;
  791. map_tcs(INLINE_SYSCALL(gettid, 0), /* created_by_pthread=*/false);
  792. /* start running trusted PAL */
  793. ecall_enclave_start(args, args_size, env, env_size);
  794. #if PRINT_ENCLAVE_STAT == 1
  795. PAL_NUM exit_time = 0;
  796. INLINE_SYSCALL(gettimeofday, 2, &tv, NULL);
  797. exit_time = tv.tv_sec * 1000000UL + tv.tv_usec;
  798. #endif
  799. unmap_tcs();
  800. INLINE_SYSCALL(exit, 0);
  801. return 0;
  802. }
  803. int main (int argc, char ** argv, char ** envp)
  804. {
  805. char * manifest_uri = NULL;
  806. char * exec_uri = NULL;
  807. const char * pal_loader = argv[0];
  808. int ret = 0;
  809. bool exec_uri_inferred = false; // Handle the case where the exec uri is
  810. // inferred from the manifest name somewhat
  811. // differently
  812. argc--;
  813. argv++;
  814. struct pal_enclave * enclave = malloc(sizeof(struct pal_enclave));
  815. if (!enclave)
  816. return -ENOMEM;
  817. memset(enclave, 0, sizeof(struct pal_enclave));
  818. int is_child = sgx_init_child_process(&enclave->pal_sec);
  819. if (is_child < 0) {
  820. ret = is_child;
  821. goto out;
  822. }
  823. if (!is_child) {
  824. /* occupy PROC_INIT_FD so no one will use it */
  825. INLINE_SYSCALL(dup2, 2, 0, PROC_INIT_FD);
  826. if (!argc)
  827. goto usage;
  828. if (strcmp_static(argv[0], "file:")) {
  829. exec_uri = alloc_concat(argv[0], -1, NULL, -1);
  830. } else {
  831. exec_uri = alloc_concat("file:", -1, argv[0], -1);
  832. }
  833. } else {
  834. exec_uri = alloc_concat(enclave->pal_sec.exec_name, -1, NULL, -1);
  835. }
  836. int fd = INLINE_SYSCALL(open, 3, exec_uri + 5, O_RDONLY|O_CLOEXEC, 0);
  837. if (IS_ERR(fd)) {
  838. SGX_DBG(DBG_E, "Executable not found\n");
  839. goto usage;
  840. }
  841. char filebuf[4];
  842. /* Check if the first argument is a executable. If it is, try finding
  843. all the possible manifest files. */
  844. INLINE_SYSCALL(read, 3, fd, filebuf, 4);
  845. INLINE_SYSCALL(close, 1, fd);
  846. char sgx_manifest[URI_MAX];
  847. size_t len = sizeof(sgx_manifest);
  848. ret = get_base_name(exec_uri + static_strlen("file:"), sgx_manifest, &len);
  849. if (ret < 0) {
  850. goto out;
  851. }
  852. if (strcmp_static(sgx_manifest + len - strlen(".manifest"), ".manifest")) {
  853. strcpy_static(sgx_manifest + len, ".sgx", sizeof(sgx_manifest) - len);
  854. } else if (!strcmp_static(sgx_manifest + len - strlen(".manifest.sgx"),
  855. ".manifest.sgx")) {
  856. strcpy_static(sgx_manifest + len, ".manifest.sgx", sizeof(sgx_manifest) - len);
  857. }
  858. if (memcmp(filebuf, "\177ELF", 4)) {
  859. // In this case the manifest is given as the executable. Set
  860. // manifest_uri to sgx_manifest (should be the same), and
  861. // and drop the .manifest* from exec_uri, so that the program
  862. // loads properly.
  863. manifest_uri = sgx_manifest;
  864. size_t exec_len = strlen(exec_uri);
  865. if (strcmp_static(exec_uri + exec_len - strlen(".manifest"), ".manifest")) {
  866. exec_uri[exec_len - strlen(".manifest")] = '\0';
  867. exec_uri_inferred = true;
  868. } else if (strcmp_static(exec_uri + exec_len - strlen(".manifest.sgx"), ".manifest.sgx")) {
  869. exec_uri[exec_len - strlen(".manifest.sgx")] = '\0';
  870. exec_uri_inferred = true;
  871. }
  872. }
  873. fd = INLINE_SYSCALL(open, 3, sgx_manifest, O_RDONLY|O_CLOEXEC, 0);
  874. if (!IS_ERR(fd)) {
  875. manifest_uri = alloc_concat("file:", static_strlen("file:"),
  876. sgx_manifest, -1);
  877. INLINE_SYSCALL(close, 1, fd);
  878. } else if (!manifest_uri) {
  879. SGX_DBG(DBG_E, "Cannot open manifest file: %s\n", sgx_manifest);
  880. goto usage;
  881. }
  882. SGX_DBG(DBG_I, "Manifest file: %s\n", manifest_uri);
  883. if (exec_uri)
  884. SGX_DBG(DBG_I, "Executable file: %s\n", exec_uri);
  885. else
  886. SGX_DBG(DBG_I, "Executable file not found\n");
  887. /*
  888. * While C does not guarantee that the argv[i] and envp[i] strings are
  889. * continuous we know that we are running on Linux, which does this. This
  890. * saves us creating a copy of all argv and envp strings.
  891. */
  892. char * args = argv[0];
  893. size_t args_size = argc > 0 ? (argv[argc - 1] - argv[0]) + strlen(argv[argc - 1]) + 1: 0;
  894. int envc = 0;
  895. while (envp[envc] != NULL) {
  896. envc++;
  897. }
  898. char * env = envp[0];
  899. size_t env_size = envc > 0 ? (envp[envc - 1] - envp[0]) + strlen(envp[envc - 1]) + 1: 0;
  900. ret = load_enclave(enclave, manifest_uri, exec_uri, args, args_size,
  901. env, env_size, exec_uri_inferred);
  902. out:
  903. if (enclave->manifest >= 0)
  904. INLINE_SYSCALL(close, 1, enclave->manifest);
  905. if (enclave->exec >= 0)
  906. INLINE_SYSCALL(close, 1, enclave->exec);
  907. if (enclave->sigfile >= 0)
  908. INLINE_SYSCALL(close, 1, enclave->sigfile);
  909. if (enclave->token >= 0)
  910. INLINE_SYSCALL(close, 1, enclave->token);
  911. if (enclave)
  912. free(enclave);
  913. if (exec_uri)
  914. free(exec_uri);
  915. if (manifest_uri && manifest_uri != sgx_manifest)
  916. free(manifest_uri);
  917. return ret;
  918. usage:
  919. SGX_DBG(DBG_E, "USAGE: %s [executable|manifest] args ...\n", pal_loader);
  920. ret = -EINVAL;
  921. goto out;
  922. }