sgx_framework.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. #include <hex.h>
  2. #include <pal_linux.h>
  3. #include <pal_rtld.h>
  4. #include "sgx_internal.h"
  5. #include "sgx_arch.h"
  6. #include "sgx_enclave.h"
  7. #include "graphene-sgx.h"
  8. #include <asm/errno.h>
  9. int gsgx_device = -1;
  10. int isgx_device = -1;
  11. #define ISGX_FILE "/dev/isgx"
  12. void * zero_page;
  13. int open_gsgx(void)
  14. {
  15. gsgx_device = INLINE_SYSCALL(open, 3, GSGX_FILE, O_RDWR | O_CLOEXEC, 0);
  16. if (IS_ERR(gsgx_device)) {
  17. SGX_DBG(DBG_E, "Cannot open device " GSGX_FILE ". Please make sure the"
  18. " \'graphene_sgx\' kernel module is loaded.\n");
  19. return -ERRNO(gsgx_device);
  20. }
  21. isgx_device = INLINE_SYSCALL(open, 3, ISGX_FILE, O_RDWR | O_CLOEXEC, 0);
  22. if (IS_ERR(isgx_device)) {
  23. SGX_DBG(DBG_E, "Cannot open device " ISGX_FILE ". Please make sure the"
  24. " Intel SGX kernel module is loaded.\n");
  25. INLINE_SYSCALL(close, 1, gsgx_device);
  26. gsgx_device = -1;
  27. return -ERRNO(isgx_device);
  28. }
  29. return 0;
  30. }
  31. int read_enclave_token(int token_file, sgx_arch_token_t * token)
  32. {
  33. struct stat stat;
  34. int ret;
  35. ret = INLINE_SYSCALL(fstat, 2, token_file, &stat);
  36. if (IS_ERR(ret))
  37. return -ERRNO(ret);
  38. if (stat.st_size != sizeof(sgx_arch_token_t)) {
  39. SGX_DBG(DBG_I, "size of token size does not match\n");
  40. return -EINVAL;
  41. }
  42. int bytes = INLINE_SYSCALL(read, 3, token_file, token, sizeof(sgx_arch_token_t));
  43. if (IS_ERR(bytes))
  44. return -ERRNO(bytes);
  45. SGX_DBG(DBG_I, "read token:\n");
  46. SGX_DBG(DBG_I, " valid: 0x%08x\n", token->body.valid);
  47. SGX_DBG(DBG_I, " attr.flags: 0x%016lx\n", token->body.attributes.flags);
  48. SGX_DBG(DBG_I, " attr.xfrm: 0x%016lx\n", token->body.attributes.xfrm);
  49. SGX_DBG(DBG_I, " mr_enclave: %s\n", ALLOCA_BYTES2HEXSTR(token->body.mr_enclave.m));
  50. SGX_DBG(DBG_I, " mr_signer: %s\n", ALLOCA_BYTES2HEXSTR(token->body.mr_signer.m));
  51. SGX_DBG(DBG_I, " LE cpu_svn: %s\n", ALLOCA_BYTES2HEXSTR(token->cpu_svn_le.svn));
  52. SGX_DBG(DBG_I, " LE isv_prod_id: %02x\n", token->isv_prod_id_le);
  53. SGX_DBG(DBG_I, " LE isv_svn: %02x\n", token->isv_svn_le);
  54. SGX_DBG(DBG_I, " LE masked_misc_select: 0x%08x\n", token->masked_misc_select_le);
  55. SGX_DBG(DBG_I, " LE attr.flags: 0x%016lx\n", token->attributes_le.flags);
  56. SGX_DBG(DBG_I, " LE attr.xfrm: 0x%016lx\n", token->attributes_le.xfrm);
  57. return 0;
  58. }
  59. int read_enclave_sigstruct(int sigfile, sgx_arch_enclave_css_t * sig)
  60. {
  61. struct stat stat;
  62. int ret;
  63. ret = INLINE_SYSCALL(fstat, 2, sigfile, &stat);
  64. if (IS_ERR(ret))
  65. return -ERRNO(ret);
  66. if ((size_t)stat.st_size != sizeof(sgx_arch_enclave_css_t)) {
  67. SGX_DBG(DBG_I, "size of sigstruct size does not match\n");
  68. return -EINVAL;
  69. }
  70. int bytes = INLINE_SYSCALL(read, 3, sigfile, sig, sizeof(sgx_arch_enclave_css_t));
  71. if (IS_ERR(bytes))
  72. return -ERRNO(bytes);
  73. return 0;
  74. }
  75. #define SE_LEAF 0x12
  76. static inline void cpuid(uint32_t leaf, uint32_t subleaf, uint32_t info[4])
  77. {
  78. __asm__ volatile("cpuid"
  79. : "=a"(info[0]),
  80. "=b"(info[1]),
  81. "=c"(info[2]),
  82. "=d"(info[3])
  83. : "a"(leaf),
  84. "c"(subleaf));
  85. }
  86. static size_t get_ssaframesize (uint64_t xfrm)
  87. {
  88. uint32_t cpuinfo[4];
  89. uint64_t xfrm_ex;
  90. size_t xsave_size = 0;
  91. cpuid(SE_LEAF, 1, cpuinfo);
  92. xfrm_ex = ((uint64_t) cpuinfo[3] << 32) + cpuinfo[2];
  93. for (int i = 2; i < 64; i++)
  94. if ((xfrm & (1ULL << i)) || (xfrm_ex & (1ULL << i))) {
  95. cpuid(0xd, i, cpuinfo);
  96. if (cpuinfo[0] + cpuinfo[1] > xsave_size)
  97. xsave_size = cpuinfo[0] + cpuinfo[1];
  98. }
  99. return ALLOC_ALIGN_UP(xsave_size + sizeof(sgx_pal_gpr_t) + 1);
  100. }
  101. bool is_wrfsbase_supported (void)
  102. {
  103. uint32_t cpuinfo[4];
  104. cpuid(7, 0, cpuinfo);
  105. if (!(cpuinfo[1] & 0x1)) {
  106. SGX_DBG(DBG_E, "The WRFSBASE instruction is not permitted on this"
  107. " platform. Please make sure the \'graphene_sgx\' kernel module"
  108. " is loaded properly.\n");
  109. return false;
  110. }
  111. return true;
  112. }
  113. int create_enclave(sgx_arch_secs_t * secs,
  114. sgx_arch_token_t * token)
  115. {
  116. assert(secs->size && IS_POWER_OF_2(secs->size));
  117. assert(IS_ALIGNED(secs->base, secs->size));
  118. int flags = MAP_SHARED;
  119. if (!zero_page) {
  120. zero_page = (void *)
  121. INLINE_SYSCALL(mmap, 6, NULL, g_page_size,
  122. PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS,
  123. -1, 0);
  124. if (IS_ERR_P(zero_page))
  125. return -ENOMEM;
  126. }
  127. secs->ssa_frame_size = get_ssaframesize(token->body.attributes.xfrm) / g_page_size;
  128. secs->misc_select = token->masked_misc_select_le;
  129. memcpy(&secs->attributes, &token->body.attributes, sizeof(sgx_attributes_t));
  130. /* Do not initialize secs->mr_signer and secs->mr_enclave here as they are
  131. * not used by ECREATE to populate the internal SECS. SECS's mr_enclave is
  132. * computed dynamically and SECS's mr_signer is populated based on the
  133. * SIGSTRUCT during EINIT (see pp21 for ECREATE and pp34 for
  134. * EINIT in https://software.intel.com/sites/default/files/managed/48/88/329298-002.pdf). */
  135. uint64_t addr = INLINE_SYSCALL(mmap, 6, secs->base, secs->size,
  136. PROT_READ|PROT_WRITE|PROT_EXEC,
  137. flags|MAP_FIXED, isgx_device, 0);
  138. if (IS_ERR_P(addr)) {
  139. if (ERRNO_P(addr) == 1 && (flags | MAP_FIXED))
  140. pal_printf("Permission denied on mapping enclave. "
  141. "You may need to set sysctl vm.mmap_min_addr to zero\n");
  142. SGX_DBG(DBG_I, "enclave ECREATE failed in allocating EPC memory "
  143. "(errno = %ld)\n", ERRNO_P(addr));
  144. return -ENOMEM;
  145. }
  146. assert(secs->base == addr);
  147. #if SDK_DRIVER_VERSION >= KERNEL_VERSION(1, 8, 0)
  148. struct sgx_enclave_create param = {
  149. .src = (uint64_t) secs,
  150. };
  151. int ret = INLINE_SYSCALL(ioctl, 3, isgx_device, SGX_IOC_ENCLAVE_CREATE,
  152. &param);
  153. #else
  154. struct gsgx_enclave_create param = {
  155. .src = (uint64_t) secs,
  156. };
  157. int ret = INLINE_SYSCALL(ioctl, 3, gsgx_device, GSGX_IOCTL_ENCLAVE_CREATE,
  158. &param);
  159. #endif
  160. if (IS_ERR(ret)) {
  161. SGX_DBG(DBG_I, "enclave ECREATE failed in enclave creation ioctl - %d\n", ERRNO(ret));
  162. return -ERRNO(ret);
  163. }
  164. if (ret) {
  165. SGX_DBG(DBG_I, "enclave ECREATE failed - %d\n", ret);
  166. return -EPERM;
  167. }
  168. secs->attributes.flags |= SGX_FLAGS_INITIALIZED;
  169. SGX_DBG(DBG_I, "enclave created:\n");
  170. SGX_DBG(DBG_I, " base: 0x%016lx\n", secs->base);
  171. SGX_DBG(DBG_I, " size: 0x%016lx\n", secs->size);
  172. SGX_DBG(DBG_I, " misc_select: 0x%08x\n", secs->misc_select);
  173. SGX_DBG(DBG_I, " attr.flags: 0x%016lx\n", secs->attributes.flags);
  174. SGX_DBG(DBG_I, " attr.xfrm: 0x%016lx\n", secs->attributes.xfrm);
  175. SGX_DBG(DBG_I, " ssa_frame_size: %d\n", secs->ssa_frame_size);
  176. SGX_DBG(DBG_I, " isv_prod_id: 0x%08x\n", secs->isv_prod_id);
  177. SGX_DBG(DBG_I, " isv_svn: 0x%08x\n", secs->isv_svn);
  178. return 0;
  179. }
  180. int add_pages_to_enclave(sgx_arch_secs_t * secs,
  181. void * addr, void * user_addr,
  182. unsigned long size,
  183. enum sgx_page_type type, int prot,
  184. bool skip_eextend,
  185. const char * comment)
  186. {
  187. sgx_arch_sec_info_t secinfo;
  188. int ret;
  189. memset(&secinfo, 0, sizeof(sgx_arch_sec_info_t));
  190. switch (type) {
  191. case SGX_PAGE_SECS:
  192. return -EPERM;
  193. case SGX_PAGE_TCS:
  194. secinfo.flags |= SGX_SECINFO_FLAGS_TCS;
  195. break;
  196. case SGX_PAGE_REG:
  197. secinfo.flags |= SGX_SECINFO_FLAGS_REG;
  198. if (prot & PROT_READ)
  199. secinfo.flags |= SGX_SECINFO_FLAGS_R;
  200. if (prot & PROT_WRITE)
  201. secinfo.flags |= SGX_SECINFO_FLAGS_W;
  202. if (prot & PROT_EXEC)
  203. secinfo.flags |= SGX_SECINFO_FLAGS_X;
  204. break;
  205. }
  206. char p[4] = "---";
  207. const char * t = (type == SGX_PAGE_TCS) ? "TCS" : "REG";
  208. const char * m = skip_eextend ? "" : " measured";
  209. if (type == SGX_PAGE_REG) {
  210. if (prot & PROT_READ)
  211. p[0] = 'R';
  212. if (prot & PROT_WRITE)
  213. p[1] = 'W';
  214. if (prot & PROT_EXEC)
  215. p[2] = 'X';
  216. }
  217. if (size == g_page_size)
  218. SGX_DBG(DBG_I, "adding page to enclave: %p [%s:%s] (%s)%s\n",
  219. addr, t, p, comment, m);
  220. else
  221. SGX_DBG(DBG_I, "adding pages to enclave: %p-%p [%s:%s] (%s)%s\n",
  222. addr, addr + size, t, p, comment, m);
  223. #if SDK_DRIVER_VERSION >= KERNEL_VERSION(1, 8, 0)
  224. struct sgx_enclave_add_page param = {
  225. .addr = secs->base + (uint64_t) addr,
  226. .src = (uint64_t) (user_addr ? : zero_page),
  227. .secinfo = (uint64_t) &secinfo,
  228. .mrmask = skip_eextend ? 0 : (uint16_t) -1,
  229. };
  230. uint64_t added_size = 0;
  231. while (added_size < size) {
  232. ret = INLINE_SYSCALL(ioctl, 3, isgx_device,
  233. SGX_IOC_ENCLAVE_ADD_PAGE, &param);
  234. if (IS_ERR(ret)) {
  235. SGX_DBG(DBG_I, "Enclave add page returned %d\n", ret);
  236. return -ERRNO(ret);
  237. }
  238. param.addr += g_page_size;
  239. if (param.src != (uint64_t) zero_page) param.src += g_page_size;
  240. added_size += g_page_size;
  241. }
  242. #else
  243. struct gsgx_enclave_add_pages param = {
  244. .addr = secs->baseaddr + (uint64_t) addr,
  245. .user_addr = (uint64_t) user_addr,
  246. .size = size,
  247. .secinfo = (uint64_t) &secinfo,
  248. .flags = skip_eextend ? GSGX_ENCLAVE_ADD_PAGES_SKIP_EEXTEND : 0,
  249. };
  250. if (!user_addr) {
  251. param.user_addr = (unsigned long) zero_page;
  252. param.flags |= GSGX_ENCLAVE_ADD_PAGES_REPEAT_SRC;
  253. }
  254. ret = INLINE_SYSCALL(ioctl, 3, gsgx_device,
  255. GSGX_IOCTL_ENCLAVE_ADD_PAGES,
  256. &param);
  257. if (IS_ERR(ret)) {
  258. SGX_DBG(DBG_I, "Enclave add page returned %d\n", ret);
  259. return -ERRNO(ret);
  260. }
  261. #endif
  262. return 0;
  263. }
  264. int init_enclave(sgx_arch_secs_t * secs,
  265. sgx_arch_enclave_css_t * sigstruct,
  266. sgx_arch_token_t * token)
  267. {
  268. unsigned long enclave_valid_addr =
  269. secs->base + secs->size - g_page_size;
  270. SGX_DBG(DBG_I, "enclave initializing:\n");
  271. SGX_DBG(DBG_I, " enclave id: 0x%016lx\n", enclave_valid_addr);
  272. SGX_DBG(DBG_I, " enclave hash:");
  273. for (size_t i = 0 ; i < sizeof(sgx_measurement_t) ; i++)
  274. SGX_DBG(DBG_I, " %02x", sigstruct->body.enclave_hash.m[i]);
  275. SGX_DBG(DBG_I, "\n");
  276. #if SDK_DRIVER_VERSION >= KERNEL_VERSION(1, 8, 0)
  277. struct sgx_enclave_init param = {
  278. .addr = enclave_valid_addr,
  279. .sigstruct = (uint64_t) sigstruct,
  280. .einittoken = (uint64_t) token,
  281. };
  282. int ret = INLINE_SYSCALL(ioctl, 3, isgx_device, SGX_IOC_ENCLAVE_INIT,
  283. &param);
  284. #else
  285. struct gsgx_enclave_init param = {
  286. .addr = enclave_valid_addr,
  287. .sigstruct = (uint64_t) sigstruct,
  288. .einittoken = (uint64_t) token,
  289. };
  290. int ret = INLINE_SYSCALL(ioctl, 3, gsgx_device, GSGX_IOCTL_ENCLAVE_INIT,
  291. &param);
  292. #endif
  293. if (IS_ERR(ret)) {
  294. return -ERRNO(ret);
  295. }
  296. if (ret) {
  297. const char * error;
  298. /* DEP 3/22/17: Try to improve error messages */
  299. switch(ret) {
  300. case SGX_INVALID_SIG_STRUCT:
  301. error = "Invalid SIGSTRUCT"; break;
  302. case SGX_INVALID_ATTRIBUTE:
  303. error = "Invalid enclave attribute"; break;
  304. case SGX_INVALID_MEASUREMENT:
  305. error = "Invalid measurement"; break;
  306. case SGX_INVALID_SIGNATURE:
  307. error = "Invalid signature"; break;
  308. case SGX_INVALID_LICENSE:
  309. error = "Invalid EINIT token"; break;
  310. case SGX_INVALID_CPUSVN:
  311. error = "Invalid CPU SVN"; break;
  312. default:
  313. error = "Unknown reason"; break;
  314. }
  315. SGX_DBG(DBG_I, "enclave EINIT failed - %s\n", error);
  316. return -EPERM;
  317. }
  318. return 0;
  319. }
  320. int destroy_enclave(void * base_addr, size_t length)
  321. {
  322. SGX_DBG(DBG_I, "destroying enclave...\n");
  323. int ret = INLINE_SYSCALL(munmap, 2, base_addr, length);
  324. if (IS_ERR(ret)) {
  325. SGX_DBG(DBG_I, "enclave EDESTROY failed\n");
  326. return -ERRNO(ret);
  327. }
  328. return 0;
  329. }