enclave_ocalls.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /*
  2. * This is for enclave to make ocalls to untrusted runtime.
  3. */
  4. #include "pal_linux.h"
  5. #include "pal_internal.h"
  6. #include "pal_debug.h"
  7. #include "enclave_ocalls.h"
  8. #include "ocall_types.h"
  9. #include "ecall_types.h"
  10. #include <api.h>
  11. #include <asm/errno.h>
  12. /* TODO: revise return value as long sgx_ocall(void*) */
  13. /* Check against this limit if the buffer to be allocated fits on the untrusted stack; if not,
  14. * buffer will be allocated on untrusted heap. Conservatively set this limit to 1/4 of the
  15. * actual stack size. Currently THREAD_STACK_SIZE = 2MB, so this limit is 512KB.
  16. * Note that the main thread is special in that it is handled by Linux, with the typical stack
  17. * size of 8MB. Thus, 512KB limit also works well for the main thread. */
  18. #define MAX_UNTRUSTED_STACK_BUF (THREAD_STACK_SIZE / 4)
  19. noreturn void ocall_exit(int exitcode, int is_exitgroup)
  20. {
  21. ms_ocall_exit_t * ms;
  22. ms = sgx_alloc_on_ustack(sizeof(*ms));
  23. ms->ms_exitcode = exitcode;
  24. ms->ms_is_exitgroup = is_exitgroup;
  25. // There are two reasons for this loop:
  26. // 1. Ocalls can be interuppted.
  27. // 2. We can't trust the outside to actually exit, so we need to ensure
  28. // that we never return even when the outside tries to trick us (this
  29. // case should be already catched by enclave_entry.S).
  30. while (true) {
  31. sgx_ocall(OCALL_EXIT, ms);
  32. }
  33. }
  34. int ocall_mmap_untrusted (int fd, uint64_t offset,
  35. uint64_t size, unsigned short prot,
  36. void ** mem)
  37. {
  38. int retval = 0;
  39. ms_ocall_mmap_untrusted_t * ms;
  40. ms = sgx_alloc_on_ustack(sizeof(*ms));
  41. if (!ms) {
  42. sgx_reset_ustack();
  43. return -EPERM;
  44. }
  45. ms->ms_fd = fd;
  46. ms->ms_offset = offset;
  47. ms->ms_size = size;
  48. ms->ms_prot = prot;
  49. retval = sgx_ocall(OCALL_MMAP_UNTRUSTED, ms);
  50. if (!retval) {
  51. if (!sgx_copy_ptr_to_enclave(mem, ms->ms_mem, size)) {
  52. sgx_reset_ustack();
  53. return -EPERM;
  54. }
  55. }
  56. sgx_reset_ustack();
  57. return retval;
  58. }
  59. int ocall_munmap_untrusted (const void * mem, uint64_t size)
  60. {
  61. int retval = 0;
  62. ms_ocall_munmap_untrusted_t * ms;
  63. if (!sgx_is_completely_outside_enclave(mem, size)) {
  64. sgx_reset_ustack();
  65. return -EINVAL;
  66. }
  67. ms = sgx_alloc_on_ustack(sizeof(*ms));
  68. if (!ms) {
  69. sgx_reset_ustack();
  70. return -EPERM;
  71. }
  72. ms->ms_mem = mem;
  73. ms->ms_size = size;
  74. retval = sgx_ocall(OCALL_MUNMAP_UNTRUSTED, ms);
  75. sgx_reset_ustack();
  76. return retval;
  77. }
  78. /*
  79. * Memorize untrusted memory area to avoid mmap/munmap per each read/write IO. Because this cache
  80. * is per-thread, we don't worry about concurrency. The cache will be carried over thread
  81. * exit/creation. On fork/exec emulation, untrusted code does vfork/exec, so the mmapped cache
  82. * will be released by exec host syscall.
  83. *
  84. * In case of AEX and consequent signal handling, current thread may be interrupted in the middle
  85. * of using the cache. If there are OCALLs during signal handling, they could interfere with the
  86. * normal-execution use of the cache, so 'in_use' atomic protects against it. OCALLs during signal
  87. * handling do not use the cache and always explicitly mmap/munmap untrusted memory; 'need_munmap'
  88. * indicates whether explicit munmap is needed at the end of such OCALL.
  89. */
  90. static int ocall_mmap_untrusted_cache(uint64_t size, void** mem, bool* need_munmap) {
  91. *need_munmap = false;
  92. struct untrusted_area* cache = &get_tcb_trts()->untrusted_area_cache;
  93. uint64_t in_use = 0;
  94. if (!__atomic_compare_exchange_n(&cache->in_use, &in_use, 1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
  95. /* AEX signal handling case: cache is in use, so make explicit mmap/munmap */
  96. int retval = ocall_mmap_untrusted(-1, 0, size, PROT_READ | PROT_WRITE, mem);
  97. if (IS_ERR(retval)) {
  98. return retval;
  99. }
  100. *need_munmap = true;
  101. return 0;
  102. }
  103. /* normal execution case: cache was not in use, so use it/allocate new one for reuse */
  104. if (cache->valid) {
  105. if (cache->size >= size) {
  106. *mem = cache->mem;
  107. return 0;
  108. }
  109. int retval = ocall_munmap_untrusted(cache->mem, cache->size);
  110. if (IS_ERR(retval)) {
  111. cache->valid = false;
  112. __atomic_store_n(&cache->in_use, 0, __ATOMIC_RELAXED);
  113. return retval;
  114. }
  115. }
  116. int retval = ocall_mmap_untrusted(-1, 0, size, PROT_READ | PROT_WRITE, mem);
  117. if (IS_ERR(retval)) {
  118. cache->valid = false;
  119. __atomic_store_n(&cache->in_use, 0, __ATOMIC_RELAXED);
  120. } else {
  121. cache->valid = true;
  122. cache->mem = *mem;
  123. cache->size = size;
  124. }
  125. return retval;
  126. }
  127. static void ocall_munmap_untrusted_cache(void* mem, uint64_t size, bool need_munmap) {
  128. if (need_munmap) {
  129. ocall_munmap_untrusted(mem, size);
  130. /* there is not much we can do in case of error */
  131. } else {
  132. struct untrusted_area* cache = &get_tcb_trts()->untrusted_area_cache;
  133. __atomic_store_n(&cache->in_use, 0, __ATOMIC_RELAXED);
  134. }
  135. }
  136. int ocall_cpuid (unsigned int leaf, unsigned int subleaf,
  137. unsigned int values[4])
  138. {
  139. int retval = 0;
  140. ms_ocall_cpuid_t * ms;
  141. ms = sgx_alloc_on_ustack(sizeof(*ms));
  142. if (!ms) {
  143. sgx_reset_ustack();
  144. return -EPERM;
  145. }
  146. ms->ms_leaf = leaf;
  147. ms->ms_subleaf = subleaf;
  148. retval = sgx_ocall(OCALL_CPUID, ms);
  149. if (!retval) {
  150. values[0] = ms->ms_values[0];
  151. values[1] = ms->ms_values[1];
  152. values[2] = ms->ms_values[2];
  153. values[3] = ms->ms_values[3];
  154. }
  155. sgx_reset_ustack();
  156. return retval;
  157. }
  158. int ocall_open (const char * pathname, int flags, unsigned short mode)
  159. {
  160. int retval = 0;
  161. int len = pathname ? strlen(pathname) + 1 : 0;
  162. ms_ocall_open_t * ms;
  163. ms = sgx_alloc_on_ustack(sizeof(*ms));
  164. if (!ms) {
  165. sgx_reset_ustack();
  166. return -EPERM;
  167. }
  168. ms->ms_flags = flags;
  169. ms->ms_mode = mode;
  170. ms->ms_pathname = sgx_copy_to_ustack(pathname, len);
  171. if (!ms->ms_pathname) {
  172. sgx_reset_ustack();
  173. return -EPERM;
  174. }
  175. retval = sgx_ocall(OCALL_OPEN, ms);
  176. sgx_reset_ustack();
  177. return retval;
  178. }
  179. int ocall_close (int fd)
  180. {
  181. int retval = 0;
  182. ms_ocall_close_t *ms;
  183. ms = sgx_alloc_on_ustack(sizeof(*ms));
  184. if (!ms) {
  185. sgx_reset_ustack();
  186. return -EPERM;
  187. }
  188. ms->ms_fd = fd;
  189. retval = sgx_ocall(OCALL_CLOSE, ms);
  190. sgx_reset_ustack();
  191. return retval;
  192. }
  193. int ocall_read(int fd, void* buf, unsigned int count) {
  194. int retval = 0;
  195. void* obuf = NULL;
  196. ms_ocall_read_t* ms;
  197. void* ms_buf;
  198. bool need_munmap = false;
  199. if (count > MAX_UNTRUSTED_STACK_BUF) {
  200. retval = ocall_mmap_untrusted_cache(ALLOC_ALIGN_UP(count), &obuf, &need_munmap);
  201. if (IS_ERR(retval))
  202. return retval;
  203. ms_buf = obuf;
  204. } else {
  205. ms_buf = sgx_alloc_on_ustack(count);
  206. if (!ms_buf) {
  207. retval = -EPERM;
  208. goto out;
  209. }
  210. }
  211. ms = sgx_alloc_on_ustack(sizeof(*ms));
  212. if (!ms) {
  213. retval = -EPERM;
  214. goto out;
  215. }
  216. ms->ms_fd = fd;
  217. ms->ms_count = count;
  218. ms->ms_buf = ms_buf;
  219. retval = sgx_ocall(OCALL_READ, ms);
  220. if (retval > 0) {
  221. if (!sgx_copy_to_enclave(buf, count, ms->ms_buf, retval)) {
  222. retval = -EPERM;
  223. goto out;
  224. }
  225. }
  226. out:
  227. sgx_reset_ustack();
  228. if (obuf)
  229. ocall_munmap_untrusted_cache(obuf, ALLOC_ALIGN_UP(count), need_munmap);
  230. return retval;
  231. }
  232. int ocall_write(int fd, const void* buf, unsigned int count) {
  233. int retval = 0;
  234. void* obuf = NULL;
  235. ms_ocall_write_t* ms;
  236. const void* ms_buf;
  237. bool need_munmap = false;
  238. if (sgx_is_completely_outside_enclave(buf, count)) {
  239. /* buf is in untrusted memory (e.g., allowed file mmaped in untrusted memory) */
  240. ms_buf = buf;
  241. } else if (sgx_is_completely_within_enclave(buf, count)) {
  242. /* typical case of buf inside of enclave memory */
  243. if (count > MAX_UNTRUSTED_STACK_BUF) {
  244. /* buf is too big and may overflow untrusted stack, so use untrusted heap */
  245. retval = ocall_mmap_untrusted_cache(ALLOC_ALIGN_UP(count), &obuf, &need_munmap);
  246. if (IS_ERR(retval))
  247. return retval;
  248. memcpy(obuf, buf, count);
  249. ms_buf = obuf;
  250. } else {
  251. ms_buf = sgx_copy_to_ustack(buf, count);
  252. }
  253. } else {
  254. /* buf is partially in/out of enclave memory */
  255. ms_buf = NULL;
  256. }
  257. if (!ms_buf) {
  258. retval = -EPERM;
  259. goto out;
  260. }
  261. ms = sgx_alloc_on_ustack(sizeof(*ms));
  262. if (!ms) {
  263. retval = -EPERM;
  264. goto out;
  265. }
  266. ms->ms_fd = fd;
  267. ms->ms_count = count;
  268. ms->ms_buf = ms_buf;
  269. retval = sgx_ocall(OCALL_WRITE, ms);
  270. out:
  271. sgx_reset_ustack();
  272. if (obuf)
  273. ocall_munmap_untrusted_cache(obuf, ALLOC_ALIGN_UP(count), need_munmap);
  274. return retval;
  275. }
  276. ssize_t ocall_pread(int fd, void* buf, size_t count, off_t offset) {
  277. long retval = 0;
  278. void* obuf = NULL;
  279. ms_ocall_pread_t* ms;
  280. void* ms_buf;
  281. bool need_munmap = false;
  282. if (count > MAX_UNTRUSTED_STACK_BUF) {
  283. retval = ocall_mmap_untrusted_cache(ALLOC_ALIGN_UP(count), &obuf, &need_munmap);
  284. if (IS_ERR(retval))
  285. return retval;
  286. ms_buf = obuf;
  287. } else {
  288. ms_buf = sgx_alloc_on_ustack(count);
  289. if (!ms_buf) {
  290. retval = -EPERM;
  291. goto out;
  292. }
  293. }
  294. ms = sgx_alloc_on_ustack(sizeof(*ms));
  295. if (!ms) {
  296. retval = -EPERM;
  297. goto out;
  298. }
  299. ms->ms_fd = fd;
  300. ms->ms_count = count;
  301. ms->ms_offset = offset;
  302. ms->ms_buf = ms_buf;
  303. retval = sgx_ocall(OCALL_PREAD, ms);
  304. if (retval > 0) {
  305. if (!sgx_copy_to_enclave(buf, count, ms->ms_buf, retval)) {
  306. retval = -EPERM;
  307. }
  308. }
  309. out:
  310. sgx_reset_ustack();
  311. if (obuf)
  312. ocall_munmap_untrusted_cache(obuf, ALLOC_ALIGN_UP(count), need_munmap);
  313. return retval;
  314. }
  315. ssize_t ocall_pwrite(int fd, const void* buf, size_t count, off_t offset) {
  316. long retval = 0;
  317. void* obuf = NULL;
  318. ms_ocall_pwrite_t* ms;
  319. const void* ms_buf;
  320. bool need_munmap = false;
  321. if (sgx_is_completely_outside_enclave(buf, count)) {
  322. /* buf is in untrusted memory (e.g., allowed file mmaped in untrusted memory) */
  323. ms_buf = buf;
  324. } else if (sgx_is_completely_within_enclave(buf, count)) {
  325. /* typical case of buf inside of enclave memory */
  326. if (count > MAX_UNTRUSTED_STACK_BUF) {
  327. /* buf is too big and may overflow untrusted stack, so use untrusted heap */
  328. retval = ocall_mmap_untrusted_cache(ALLOC_ALIGN_UP(count), &obuf, &need_munmap);
  329. if (IS_ERR(retval))
  330. return retval;
  331. memcpy(obuf, buf, count);
  332. ms_buf = obuf;
  333. } else {
  334. ms_buf = sgx_copy_to_ustack(buf, count);
  335. }
  336. } else {
  337. /* buf is partially in/out of enclave memory */
  338. ms_buf = NULL;
  339. }
  340. if (!ms_buf) {
  341. retval = -EPERM;
  342. goto out;
  343. }
  344. ms = sgx_alloc_on_ustack(sizeof(*ms));
  345. if (!ms) {
  346. retval = -EPERM;
  347. goto out;
  348. }
  349. ms->ms_fd = fd;
  350. ms->ms_count = count;
  351. ms->ms_offset = offset;
  352. ms->ms_buf = ms_buf;
  353. retval = sgx_ocall(OCALL_PWRITE, ms);
  354. out:
  355. sgx_reset_ustack();
  356. if (obuf)
  357. ocall_munmap_untrusted_cache(obuf, ALLOC_ALIGN_UP(count), need_munmap);
  358. return retval;
  359. }
  360. int ocall_fstat (int fd, struct stat * buf)
  361. {
  362. int retval = 0;
  363. ms_ocall_fstat_t * ms;
  364. ms = sgx_alloc_on_ustack(sizeof(*ms));
  365. if (!ms) {
  366. sgx_reset_ustack();
  367. return -EPERM;
  368. }
  369. ms->ms_fd = fd;
  370. retval = sgx_ocall(OCALL_FSTAT, ms);
  371. if (!retval)
  372. memcpy(buf, &ms->ms_stat, sizeof(struct stat));
  373. sgx_reset_ustack();
  374. return retval;
  375. }
  376. int ocall_fionread (int fd)
  377. {
  378. int retval = 0;
  379. ms_ocall_fionread_t * ms;
  380. ms = sgx_alloc_on_ustack(sizeof(*ms));
  381. if (!ms) {
  382. sgx_reset_ustack();
  383. return -EPERM;
  384. }
  385. ms->ms_fd = fd;
  386. retval = sgx_ocall(OCALL_FIONREAD, ms);
  387. sgx_reset_ustack();
  388. return retval;
  389. }
  390. int ocall_fsetnonblock (int fd, int nonblocking)
  391. {
  392. int retval = 0;
  393. ms_ocall_fsetnonblock_t * ms;
  394. ms = sgx_alloc_on_ustack(sizeof(*ms));
  395. if (!ms) {
  396. sgx_reset_ustack();
  397. return -EPERM;
  398. }
  399. ms->ms_fd = fd;
  400. ms->ms_nonblocking = nonblocking;
  401. retval = sgx_ocall(OCALL_FSETNONBLOCK, ms);
  402. sgx_reset_ustack();
  403. return retval;
  404. }
  405. int ocall_fchmod (int fd, unsigned short mode)
  406. {
  407. int retval = 0;
  408. ms_ocall_fchmod_t * ms;
  409. ms = sgx_alloc_on_ustack(sizeof(*ms));
  410. if (!ms) {
  411. sgx_reset_ustack();
  412. return -EPERM;
  413. }
  414. ms->ms_fd = fd;
  415. ms->ms_mode = mode;
  416. retval = sgx_ocall(OCALL_FCHMOD, ms);
  417. sgx_reset_ustack();
  418. return retval;
  419. }
  420. int ocall_fsync (int fd)
  421. {
  422. int retval = 0;
  423. ms_ocall_fsync_t * ms;
  424. ms = sgx_alloc_on_ustack(sizeof(*ms));
  425. if (!ms) {
  426. sgx_reset_ustack();
  427. return -EPERM;
  428. }
  429. ms->ms_fd = fd;
  430. retval = sgx_ocall(OCALL_FSYNC, ms);
  431. sgx_reset_ustack();
  432. return retval;
  433. }
  434. int ocall_ftruncate (int fd, uint64_t length)
  435. {
  436. int retval = 0;
  437. ms_ocall_ftruncate_t * ms;
  438. ms = sgx_alloc_on_ustack(sizeof(*ms));
  439. if (!ms) {
  440. sgx_reset_ustack();
  441. return -EPERM;
  442. }
  443. ms->ms_fd = fd;
  444. ms->ms_length = length;
  445. retval = sgx_ocall(OCALL_FTRUNCATE, ms);
  446. sgx_reset_ustack();
  447. return retval;
  448. }
  449. int ocall_mkdir (const char * pathname, unsigned short mode)
  450. {
  451. int retval = 0;
  452. int len = pathname ? strlen(pathname) + 1 : 0;
  453. ms_ocall_mkdir_t * ms;
  454. ms = sgx_alloc_on_ustack(sizeof(*ms));
  455. if (!ms) {
  456. sgx_reset_ustack();
  457. return -EPERM;
  458. }
  459. ms->ms_mode = mode;
  460. ms->ms_pathname = sgx_copy_to_ustack(pathname, len);
  461. if (!ms->ms_pathname) {
  462. sgx_reset_ustack();
  463. return -EPERM;
  464. }
  465. retval = sgx_ocall(OCALL_MKDIR, ms);
  466. sgx_reset_ustack();
  467. return retval;
  468. }
  469. int ocall_getdents (int fd, struct linux_dirent64 * dirp, unsigned int size)
  470. {
  471. int retval = 0;
  472. ms_ocall_getdents_t * ms;
  473. ms = sgx_alloc_on_ustack(sizeof(*ms));
  474. if (!ms) {
  475. sgx_reset_ustack();
  476. return -EPERM;
  477. }
  478. ms->ms_fd = fd;
  479. ms->ms_size = size;
  480. ms->ms_dirp = sgx_alloc_on_ustack(size);
  481. if (!ms->ms_dirp) {
  482. sgx_reset_ustack();
  483. return -EPERM;
  484. }
  485. retval = sgx_ocall(OCALL_GETDENTS, ms);
  486. if (retval > 0) {
  487. if (!sgx_copy_to_enclave(dirp, size, ms->ms_dirp, retval)) {
  488. sgx_reset_ustack();
  489. return -EPERM;
  490. }
  491. }
  492. sgx_reset_ustack();
  493. return retval;
  494. }
  495. int ocall_resume_thread (void * tcs)
  496. {
  497. return sgx_ocall(OCALL_RESUME_THREAD, tcs);
  498. }
  499. int ocall_clone_thread (void)
  500. {
  501. void* dummy = NULL;
  502. return sgx_ocall(OCALL_CLONE_THREAD, dummy);
  503. }
  504. int ocall_create_process(const char* uri, int nargs, const char** args, int* stream_fd,
  505. int* cargo_fd, unsigned int* pid) {
  506. int retval = 0;
  507. int ulen = uri ? strlen(uri) + 1 : 0;
  508. ms_ocall_create_process_t * ms;
  509. ms = sgx_alloc_on_ustack(sizeof(*ms) + nargs * sizeof(char *));
  510. if (!ms) {
  511. sgx_reset_ustack();
  512. return -EPERM;
  513. }
  514. ms->ms_uri = uri ? sgx_copy_to_ustack(uri, ulen) : NULL;
  515. if (uri && !ms->ms_uri) {
  516. sgx_reset_ustack();
  517. return -EPERM;
  518. }
  519. ms->ms_nargs = nargs;
  520. for (int i = 0 ; i < nargs ; i++) {
  521. int len = args[i] ? strlen(args[i]) + 1 : 0;
  522. ms->ms_args[i] = args[i] ? sgx_copy_to_ustack(args[i], len) : NULL;
  523. if (args[i] && !ms->ms_args[i]) {
  524. sgx_reset_ustack();
  525. return -EPERM;
  526. }
  527. }
  528. retval = sgx_ocall(OCALL_CREATE_PROCESS, ms);
  529. if (!retval) {
  530. if (pid)
  531. *pid = ms->ms_pid;
  532. if (stream_fd)
  533. *stream_fd = ms->ms_stream_fd;
  534. if (cargo_fd)
  535. *cargo_fd = ms->ms_cargo_fd;
  536. }
  537. sgx_reset_ustack();
  538. return retval;
  539. }
  540. int ocall_futex(int* futex, int op, int val, int64_t timeout_us) {
  541. int retval = 0;
  542. ms_ocall_futex_t * ms;
  543. if (!sgx_is_completely_outside_enclave(futex, sizeof(int))) {
  544. sgx_reset_ustack();
  545. return -EINVAL;
  546. }
  547. ms = sgx_alloc_on_ustack(sizeof(*ms));
  548. if (!ms) {
  549. sgx_reset_ustack();
  550. return -EPERM;
  551. }
  552. ms->ms_futex = futex;
  553. ms->ms_op = op;
  554. ms->ms_val = val;
  555. ms->ms_timeout_us = timeout_us;
  556. retval = sgx_ocall(OCALL_FUTEX, ms);
  557. sgx_reset_ustack();
  558. return retval;
  559. }
  560. int ocall_socketpair (int domain, int type, int protocol,
  561. int sockfds[2])
  562. {
  563. int retval = 0;
  564. ms_ocall_socketpair_t * ms;
  565. ms = sgx_alloc_on_ustack(sizeof(*ms));
  566. if (!ms) {
  567. sgx_reset_ustack();
  568. return -EPERM;
  569. }
  570. ms->ms_domain = domain;
  571. ms->ms_type = type;
  572. ms->ms_protocol = protocol;
  573. retval = sgx_ocall(OCALL_SOCKETPAIR, ms);
  574. if (!retval) {
  575. sockfds[0] = ms->ms_sockfds[0];
  576. sockfds[1] = ms->ms_sockfds[1];
  577. }
  578. sgx_reset_ustack();
  579. return retval;
  580. }
  581. int ocall_listen(int domain, int type, int protocol, int ipv6_v6only,
  582. struct sockaddr* addr, unsigned int* addrlen, struct sockopt* sockopt) {
  583. int retval = 0;
  584. unsigned int copied;
  585. unsigned int len = addrlen ? *addrlen : 0;
  586. ms_ocall_listen_t* ms;
  587. ms = sgx_alloc_on_ustack(sizeof(*ms));
  588. if (!ms) {
  589. sgx_reset_ustack();
  590. return -EPERM;
  591. }
  592. ms->ms_domain = domain;
  593. ms->ms_type = type;
  594. ms->ms_protocol = protocol;
  595. ms->ms_ipv6_v6only = ipv6_v6only;
  596. ms->ms_addrlen = len;
  597. ms->ms_addr = (addr && len) ? sgx_copy_to_ustack(addr, len) : NULL;
  598. if (addr && len && !ms->ms_addr) {
  599. sgx_reset_ustack();
  600. return -EPERM;
  601. }
  602. retval = sgx_ocall(OCALL_LISTEN, ms);
  603. if (retval >= 0) {
  604. if (addr && len) {
  605. copied = sgx_copy_to_enclave(addr, len, ms->ms_addr, ms->ms_addrlen);
  606. if (!copied) {
  607. sgx_reset_ustack();
  608. return -EPERM;
  609. }
  610. *addrlen = copied;
  611. }
  612. if (sockopt) {
  613. *sockopt = ms->ms_sockopt;
  614. }
  615. }
  616. sgx_reset_ustack();
  617. return retval;
  618. }
  619. int ocall_accept (int sockfd, struct sockaddr * addr,
  620. unsigned int * addrlen, struct sockopt * sockopt)
  621. {
  622. int retval = 0;
  623. unsigned int copied;
  624. unsigned int len = addrlen ? *addrlen : 0;
  625. ms_ocall_accept_t * ms;
  626. ms = sgx_alloc_on_ustack(sizeof(*ms));
  627. if (!ms) {
  628. sgx_reset_ustack();
  629. return -EPERM;
  630. }
  631. ms->ms_sockfd = sockfd;
  632. ms->ms_addrlen = len;
  633. ms->ms_addr = (addr && len) ? sgx_copy_to_ustack(addr, len) : NULL;
  634. if (addr && len && !ms->ms_addr) {
  635. sgx_reset_ustack();
  636. return -EPERM;
  637. }
  638. retval = sgx_ocall(OCALL_ACCEPT, ms);
  639. if (retval >= 0) {
  640. if (addr && len) {
  641. copied = sgx_copy_to_enclave(addr, len, ms->ms_addr, ms->ms_addrlen);
  642. if (!copied) {
  643. sgx_reset_ustack();
  644. return -EPERM;
  645. }
  646. *addrlen = copied;
  647. }
  648. if (sockopt) {
  649. *sockopt = ms->ms_sockopt;
  650. }
  651. }
  652. sgx_reset_ustack();
  653. return retval;
  654. }
  655. int ocall_connect(int domain, int type, int protocol, int ipv6_v6only,
  656. const struct sockaddr* addr, unsigned int addrlen,
  657. struct sockaddr* bind_addr, unsigned int* bind_addrlen,
  658. struct sockopt* sockopt) {
  659. int retval = 0;
  660. unsigned int copied;
  661. unsigned int bind_len = bind_addrlen ? *bind_addrlen : 0;
  662. ms_ocall_connect_t* ms;
  663. ms = sgx_alloc_on_ustack(sizeof(*ms));
  664. if (!ms) {
  665. sgx_reset_ustack();
  666. return -EPERM;
  667. }
  668. ms->ms_domain = domain;
  669. ms->ms_type = type;
  670. ms->ms_protocol = protocol;
  671. ms->ms_ipv6_v6only = ipv6_v6only;
  672. ms->ms_addrlen = addrlen;
  673. ms->ms_bind_addrlen = bind_len;
  674. ms->ms_addr = addr ? sgx_copy_to_ustack(addr, addrlen) : NULL;
  675. ms->ms_bind_addr = bind_addr ? sgx_copy_to_ustack(bind_addr, bind_len) : NULL;
  676. if ((addr && !ms->ms_addr) || (bind_addr && !ms->ms_bind_addr)) {
  677. sgx_reset_ustack();
  678. return -EPERM;
  679. }
  680. retval = sgx_ocall(OCALL_CONNECT, ms);
  681. if (retval >= 0) {
  682. if (bind_addr && bind_len) {
  683. copied = sgx_copy_to_enclave(bind_addr, bind_len, ms->ms_bind_addr, ms->ms_bind_addrlen);
  684. if (!copied) {
  685. sgx_reset_ustack();
  686. return -EPERM;
  687. }
  688. *bind_addrlen = copied;
  689. }
  690. if (sockopt) {
  691. *sockopt = ms->ms_sockopt;
  692. }
  693. }
  694. sgx_reset_ustack();
  695. return retval;
  696. }
  697. int ocall_recv (int sockfd, void * buf, unsigned int count,
  698. struct sockaddr * addr, unsigned int * addrlenptr,
  699. void * control, uint64_t * controllenptr)
  700. {
  701. int retval = 0;
  702. void * obuf = NULL;
  703. unsigned int copied;
  704. unsigned int addrlen = addrlenptr ? *addrlenptr : 0;
  705. uint64_t controllen = controllenptr ? *controllenptr : 0;
  706. ms_ocall_recv_t * ms;
  707. bool need_munmap = false;
  708. if ((count + addrlen + controllen) > MAX_UNTRUSTED_STACK_BUF) {
  709. retval = ocall_mmap_untrusted_cache(ALLOC_ALIGN_UP(count), &obuf, &need_munmap);
  710. if (IS_ERR(retval))
  711. return retval;
  712. }
  713. ms = sgx_alloc_on_ustack(sizeof(*ms));
  714. if (!ms) {
  715. retval = -EPERM;
  716. goto out;
  717. }
  718. ms->ms_sockfd = sockfd;
  719. ms->ms_count = count;
  720. ms->ms_addrlen = addrlen;
  721. ms->ms_addr = addr ? sgx_alloc_on_ustack(addrlen) : NULL;
  722. ms->ms_controllen = controllen;
  723. ms->ms_control = control ? sgx_alloc_on_ustack(controllen) : NULL;
  724. if (obuf)
  725. ms->ms_buf = obuf;
  726. else
  727. ms->ms_buf = sgx_alloc_on_ustack(count);
  728. if (!ms->ms_buf || (addr && !ms->ms_addr)) {
  729. retval = -EPERM;
  730. goto out;
  731. }
  732. retval = sgx_ocall(OCALL_RECV, ms);
  733. if (retval >= 0) {
  734. if (addr && addrlen) {
  735. copied = sgx_copy_to_enclave(addr, addrlen, ms->ms_addr, ms->ms_addrlen);
  736. if (!copied) {
  737. retval = -EPERM;
  738. goto out;
  739. }
  740. *addrlenptr = copied;
  741. }
  742. if (control && controllen) {
  743. copied = sgx_copy_to_enclave(control, controllen, ms->ms_control, ms->ms_controllen);
  744. if (!copied) {
  745. retval = -EPERM;
  746. goto out;
  747. }
  748. *controllenptr = copied;
  749. }
  750. if (retval > 0 && !sgx_copy_to_enclave(buf, count, ms->ms_buf, retval)) {
  751. retval = -EPERM;
  752. goto out;
  753. }
  754. }
  755. out:
  756. sgx_reset_ustack();
  757. if (obuf)
  758. ocall_munmap_untrusted_cache(obuf, ALLOC_ALIGN_UP(count), need_munmap);
  759. return retval;
  760. }
  761. int ocall_send (int sockfd, const void * buf, unsigned int count,
  762. const struct sockaddr * addr, unsigned int addrlen,
  763. void * control, uint64_t controllen)
  764. {
  765. int retval = 0;
  766. void * obuf = NULL;
  767. ms_ocall_send_t * ms;
  768. bool need_munmap;
  769. if (sgx_is_completely_outside_enclave(buf, count)) {
  770. /* buf is in untrusted memory (e.g., allowed file mmaped in untrusted memory) */
  771. obuf = (void*)buf;
  772. } else if (sgx_is_completely_within_enclave(buf, count)) {
  773. /* typical case of buf inside of enclave memory */
  774. if ((count + addrlen + controllen) > MAX_UNTRUSTED_STACK_BUF) {
  775. /* buf is too big and may overflow untrusted stack, so use untrusted heap */
  776. retval = ocall_mmap_untrusted_cache(ALLOC_ALIGN_UP(count), &obuf, &need_munmap);
  777. if (IS_ERR(retval))
  778. return retval;
  779. memcpy(obuf, buf, count);
  780. }
  781. } else {
  782. /* buf is partially in/out of enclave memory */
  783. return -EPERM;
  784. }
  785. ms = sgx_alloc_on_ustack(sizeof(*ms));
  786. if (!ms) {
  787. retval = -EPERM;
  788. goto out;
  789. }
  790. ms->ms_sockfd = sockfd;
  791. ms->ms_count = count;
  792. ms->ms_addrlen = addrlen;
  793. ms->ms_addr = addr ? sgx_copy_to_ustack(addr, addrlen) : NULL;
  794. ms->ms_controllen = controllen;
  795. ms->ms_control = control ? sgx_copy_to_ustack(control, controllen) : NULL;
  796. if (obuf)
  797. ms->ms_buf = obuf;
  798. else
  799. ms->ms_buf = sgx_copy_to_ustack(buf, count);
  800. if (!ms->ms_buf || (addr && !ms->ms_addr)) {
  801. retval = -EPERM;
  802. goto out;
  803. }
  804. retval = sgx_ocall(OCALL_SEND, ms);
  805. out:
  806. sgx_reset_ustack();
  807. if (obuf && obuf != buf)
  808. ocall_munmap_untrusted_cache(obuf, ALLOC_ALIGN_UP(count), need_munmap);
  809. return retval;
  810. }
  811. int ocall_setsockopt (int sockfd, int level, int optname,
  812. const void * optval, unsigned int optlen)
  813. {
  814. int retval = 0;
  815. ms_ocall_setsockopt_t * ms;
  816. ms = sgx_alloc_on_ustack(sizeof(*ms));
  817. if (!ms) {
  818. sgx_reset_ustack();
  819. return -EPERM;
  820. }
  821. ms->ms_sockfd = sockfd;
  822. ms->ms_level = level;
  823. ms->ms_optname = optname;
  824. ms->ms_optlen = 0;
  825. ms->ms_optval = NULL;
  826. if (optval && optlen > 0) {
  827. ms->ms_optlen = optlen;
  828. ms->ms_optval = sgx_copy_to_ustack(optval, optlen);
  829. if (!ms->ms_optval) {
  830. sgx_reset_ustack();
  831. return -EPERM;
  832. }
  833. }
  834. retval = sgx_ocall(OCALL_SETSOCKOPT, ms);
  835. sgx_reset_ustack();
  836. return retval;
  837. }
  838. int ocall_shutdown (int sockfd, int how)
  839. {
  840. int retval = 0;
  841. ms_ocall_shutdown_t * ms;
  842. ms = sgx_alloc_on_ustack(sizeof(*ms));
  843. if (!ms) {
  844. sgx_reset_ustack();
  845. return -EPERM;
  846. }
  847. ms->ms_sockfd = sockfd;
  848. ms->ms_how = how;
  849. retval = sgx_ocall(OCALL_SHUTDOWN, ms);
  850. sgx_reset_ustack();
  851. return retval;
  852. }
  853. int ocall_gettime (unsigned long * microsec)
  854. {
  855. int retval = 0;
  856. ms_ocall_gettime_t * ms;
  857. ms = sgx_alloc_on_ustack(sizeof(*ms));
  858. if (!ms) {
  859. sgx_reset_ustack();
  860. return -EPERM;
  861. }
  862. do {
  863. retval = sgx_ocall(OCALL_GETTIME, ms);
  864. } while(retval == -EINTR);
  865. if (!retval)
  866. *microsec = ms->ms_microsec;
  867. sgx_reset_ustack();
  868. return retval;
  869. }
  870. int ocall_sleep (unsigned long * microsec)
  871. {
  872. int retval = 0;
  873. ms_ocall_sleep_t * ms;
  874. ms = sgx_alloc_on_ustack(sizeof(*ms));
  875. if (!ms) {
  876. sgx_reset_ustack();
  877. return -EPERM;
  878. }
  879. ms->ms_microsec = microsec ? *microsec : 0;
  880. retval = sgx_ocall(OCALL_SLEEP, ms);
  881. if (microsec) {
  882. if (!retval)
  883. *microsec = 0;
  884. else if (retval == -EINTR)
  885. *microsec = ms->ms_microsec;
  886. }
  887. sgx_reset_ustack();
  888. return retval;
  889. }
  890. int ocall_poll(struct pollfd* fds, int nfds, int64_t timeout_us) {
  891. int retval = 0;
  892. unsigned int nfds_bytes = nfds * sizeof(struct pollfd);
  893. ms_ocall_poll_t * ms;
  894. ms = sgx_alloc_on_ustack(sizeof(*ms));
  895. if (!ms) {
  896. sgx_reset_ustack();
  897. return -EPERM;
  898. }
  899. ms->ms_nfds = nfds;
  900. ms->ms_timeout_us = timeout_us;
  901. ms->ms_fds = sgx_copy_to_ustack(fds, nfds_bytes);
  902. if (!ms->ms_fds) {
  903. sgx_reset_ustack();
  904. return -EPERM;
  905. }
  906. retval = sgx_ocall(OCALL_POLL, ms);
  907. if (retval >= 0) {
  908. if (!sgx_copy_to_enclave(fds, nfds_bytes, ms->ms_fds, nfds_bytes)) {
  909. sgx_reset_ustack();
  910. return -EPERM;
  911. }
  912. }
  913. sgx_reset_ustack();
  914. return retval;
  915. }
  916. int ocall_rename (const char * oldpath, const char * newpath)
  917. {
  918. int retval = 0;
  919. int oldlen = oldpath ? strlen(oldpath) + 1 : 0;
  920. int newlen = newpath ? strlen(newpath) + 1 : 0;
  921. ms_ocall_rename_t * ms;
  922. ms = sgx_alloc_on_ustack(sizeof(*ms));
  923. if (!ms) {
  924. sgx_reset_ustack();
  925. return -EPERM;
  926. }
  927. ms->ms_oldpath = sgx_copy_to_ustack(oldpath, oldlen);
  928. ms->ms_newpath = sgx_copy_to_ustack(newpath, newlen);
  929. if (!ms->ms_oldpath || !ms->ms_newpath) {
  930. sgx_reset_ustack();
  931. return -EPERM;
  932. }
  933. retval = sgx_ocall(OCALL_RENAME, ms);
  934. sgx_reset_ustack();
  935. return retval;
  936. }
  937. int ocall_delete (const char * pathname)
  938. {
  939. int retval = 0;
  940. int len = pathname ? strlen(pathname) + 1 : 0;
  941. ms_ocall_delete_t * ms;
  942. ms = sgx_alloc_on_ustack(sizeof(*ms));
  943. if (!ms) {
  944. sgx_reset_ustack();
  945. return -EPERM;
  946. }
  947. ms->ms_pathname = sgx_copy_to_ustack(pathname, len);
  948. if (!ms->ms_pathname) {
  949. sgx_reset_ustack();
  950. return -EPERM;
  951. }
  952. retval = sgx_ocall(OCALL_DELETE, ms);
  953. sgx_reset_ustack();
  954. return retval;
  955. }
  956. int ocall_load_debug(const char * command)
  957. {
  958. int retval = 0;
  959. int len = strlen(command) + 1;
  960. const char * ms = sgx_copy_to_ustack(command, len);
  961. if (!ms) {
  962. sgx_reset_ustack();
  963. return -EPERM;
  964. }
  965. retval = sgx_ocall(OCALL_LOAD_DEBUG, (void *) ms);
  966. sgx_reset_ustack();
  967. return retval;
  968. }
  969. /*
  970. * ocall_get_attestation() triggers remote attestation in untrusted PAL (see sgx_platform.c:
  971. * retrieve_verified_quote()). If the OCall returns successfully, the function returns
  972. * attestation data required for platform verification (i.e., sgx_attestation_t). Except the
  973. * QE report, most data fields of the attestation need to be copied into the enclave.
  974. *
  975. * @spid: The client SPID registered with the IAS.
  976. * @subkey: SPID subscription key.
  977. * @linkable: Whether the SPID is linkable.
  978. * @report: Local attestation report for the quoting enclave.
  979. * @nonce: Randomly-generated nonce for freshness.
  980. * @attestation: Returns the attestation data (QE report, quote, IAS report, signature,
  981. * and certificate chain).
  982. */
  983. int ocall_get_attestation (const sgx_spid_t* spid, const char* subkey, bool linkable,
  984. const sgx_report_t* report, const sgx_quote_nonce_t* nonce,
  985. sgx_attestation_t* attestation) {
  986. ms_ocall_get_attestation_t * ms;
  987. int retval = -EPERM;
  988. ms = sgx_alloc_on_ustack(sizeof(*ms));
  989. if (!ms)
  990. goto reset;
  991. memcpy(&ms->ms_spid, spid, sizeof(sgx_spid_t));
  992. ms->ms_subkey = sgx_copy_to_ustack(subkey, strlen(subkey) + 1);
  993. memcpy(&ms->ms_report, report, sizeof(sgx_report_t));
  994. memcpy(&ms->ms_nonce, nonce, sizeof(sgx_quote_nonce_t));
  995. ms->ms_linkable = linkable;
  996. retval = sgx_ocall(OCALL_GET_ATTESTATION, ms);
  997. if (retval >= 0) {
  998. // First, try to copy the whole ms->ms_attestation inside
  999. if (!sgx_copy_to_enclave(attestation, sizeof(sgx_attestation_t), &ms->ms_attestation,
  1000. sizeof(sgx_attestation_t))) {
  1001. retval = -EACCES;
  1002. goto reset;
  1003. }
  1004. // For calling ocall_munmap_untrusted, need to reset the untrusted stack
  1005. sgx_reset_ustack();
  1006. // Copy each field inside and free the untrusted buffers
  1007. if (attestation->quote) {
  1008. size_t len = attestation->quote_len;
  1009. sgx_quote_t* quote = malloc(len);
  1010. if (!sgx_copy_to_enclave(quote, len, attestation->quote, len))
  1011. retval = -EACCES;
  1012. ocall_munmap_untrusted(attestation->quote, ALLOC_ALIGN_UP(len));
  1013. attestation->quote = quote;
  1014. }
  1015. if (attestation->ias_report) {
  1016. size_t len = attestation->ias_report_len;
  1017. char* ias_report = malloc(len + 1);
  1018. if (!sgx_copy_to_enclave(ias_report, len, attestation->ias_report, len))
  1019. retval = -EACCES;
  1020. ocall_munmap_untrusted(attestation->ias_report, ALLOC_ALIGN_UP(len));
  1021. ias_report[len] = 0; // Ensure null-ending
  1022. attestation->ias_report = ias_report;
  1023. }
  1024. if (attestation->ias_sig) {
  1025. size_t len = attestation->ias_sig_len;
  1026. uint8_t* ias_sig = malloc(len);
  1027. if (!sgx_copy_to_enclave(ias_sig, len, attestation->ias_sig, len))
  1028. retval = -EACCES;
  1029. ocall_munmap_untrusted(attestation->ias_sig, ALLOC_ALIGN_UP(len));
  1030. attestation->ias_sig = ias_sig;
  1031. }
  1032. if (attestation->ias_certs) {
  1033. size_t len = attestation->ias_certs_len;
  1034. char* ias_certs = malloc(len + 1);
  1035. if (!sgx_copy_to_enclave(ias_certs, len, attestation->ias_certs, len))
  1036. retval = -EACCES;
  1037. ocall_munmap_untrusted(attestation->ias_certs, ALLOC_ALIGN_UP(len));
  1038. ias_certs[len] = 0; // Ensure null-ending
  1039. attestation->ias_certs = ias_certs;
  1040. }
  1041. // At this point, no field should point to outside the enclave
  1042. if (retval < 0) {
  1043. if (attestation->quote) free(attestation->quote);
  1044. if (attestation->ias_report) free(attestation->ias_report);
  1045. if (attestation->ias_sig) free(attestation->ias_sig);
  1046. if (attestation->ias_certs) free(attestation->ias_certs);
  1047. }
  1048. goto out;
  1049. }
  1050. reset:
  1051. sgx_reset_ustack();
  1052. out:
  1053. return retval;
  1054. }
  1055. int ocall_eventfd (unsigned int initval, int flags)
  1056. {
  1057. int retval = 0;
  1058. ms_ocall_eventfd_t * ms;
  1059. ms = sgx_alloc_on_ustack(sizeof(*ms));
  1060. if (!ms) {
  1061. sgx_reset_ustack();
  1062. return -EPERM;
  1063. }
  1064. ms->ms_initval = initval;
  1065. ms->ms_flags = flags;
  1066. retval = sgx_ocall(OCALL_EVENTFD, ms);
  1067. sgx_reset_ustack();
  1068. return retval;
  1069. }