sgx_framework.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. #include <pal_linux.h>
  2. #include <pal_rtld.h>
  3. #include "sgx_internal.h"
  4. #include "sgx_arch.h"
  5. #include "sgx_enclave.h"
  6. #include "graphene-sgx.h"
  7. #include <asm/errno.h>
  8. int gsgx_device = -1;
  9. int isgx_device = -1;
  10. #define ISGX_FILE "/dev/isgx"
  11. void * zero_page;
  12. int open_gsgx(void)
  13. {
  14. gsgx_device = INLINE_SYSCALL(open, 3, GSGX_FILE, O_RDWR | O_CLOEXEC, 0);
  15. if (IS_ERR(gsgx_device)) {
  16. SGX_DBG(DBG_E, "Cannot open device " GSGX_FILE ". Please make sure the"
  17. " \'graphene_sgx\' kernel module is loaded.\n");
  18. return -ERRNO(gsgx_device);
  19. }
  20. isgx_device = INLINE_SYSCALL(open, 3, ISGX_FILE, O_RDWR | O_CLOEXEC, 0);
  21. if (IS_ERR(isgx_device)) {
  22. SGX_DBG(DBG_E, "Cannot open device " ISGX_FILE ". Please make sure the"
  23. " Intel SGX kernel module is loaded.\n");
  24. INLINE_SYSCALL(close, 1, gsgx_device);
  25. gsgx_device = -1;
  26. return -ERRNO(isgx_device);
  27. }
  28. return 0;
  29. }
  30. int read_enclave_token(int token_file, sgx_arch_token_t * token)
  31. {
  32. struct stat stat;
  33. int ret;
  34. ret = INLINE_SYSCALL(fstat, 2, token_file, &stat);
  35. if (IS_ERR(ret))
  36. return -ERRNO(ret);
  37. if (stat.st_size != sizeof(sgx_arch_token_t)) {
  38. SGX_DBG(DBG_I, "size of token size does not match\n");
  39. return -EINVAL;
  40. }
  41. int bytes = INLINE_SYSCALL(read, 3, token_file, token, sizeof(sgx_arch_token_t));
  42. if (IS_ERR(bytes))
  43. return -ERRNO(bytes);
  44. SGX_DBG(DBG_I, "read token:\n");
  45. SGX_DBG(DBG_I, " valid: 0x%08x\n", token->valid);
  46. SGX_DBG(DBG_I, " attr: 0x%016lx\n", token->attributes.flags);
  47. SGX_DBG(DBG_I, " xfrm: 0x%016lx\n", token->attributes.xfrm);
  48. SGX_DBG(DBG_I, " miscmask: 0x%08x\n", token->miscselect_mask);
  49. SGX_DBG(DBG_I, " attr_mask: 0x%016lx\n", token->attribute_mask.flags);
  50. SGX_DBG(DBG_I, " xfrm_mask: 0x%016lx\n", token->attribute_mask.xfrm);
  51. return 0;
  52. }
  53. int read_enclave_sigstruct(int sigfile, sgx_arch_sigstruct_t * sig)
  54. {
  55. struct stat stat;
  56. int ret;
  57. ret = INLINE_SYSCALL(fstat, 2, sigfile, &stat);
  58. if (IS_ERR(ret))
  59. return -ERRNO(ret);
  60. if ((size_t)stat.st_size < sizeof(sgx_arch_sigstruct_t)) {
  61. SGX_DBG(DBG_I, "size of sigstruct size does not match\n");
  62. return -EINVAL;
  63. }
  64. int bytes = INLINE_SYSCALL(read, 3, sigfile, sig, sizeof(sgx_arch_sigstruct_t));
  65. if (IS_ERR(bytes))
  66. return -ERRNO(bytes);
  67. return 0;
  68. }
  69. #define SE_LEAF 0x12
  70. static inline void cpuid(uint32_t leaf, uint32_t subleaf, uint32_t info[4])
  71. {
  72. __asm__ volatile("cpuid"
  73. : "=a"(info[0]),
  74. "=b"(info[1]),
  75. "=c"(info[2]),
  76. "=d"(info[3])
  77. : "a"(leaf),
  78. "c"(subleaf));
  79. }
  80. static size_t get_ssaframesize (uint64_t xfrm)
  81. {
  82. uint32_t cpuinfo[4];
  83. uint64_t xfrm_ex;
  84. size_t xsave_size = 0;
  85. cpuid(SE_LEAF, 1, cpuinfo);
  86. xfrm_ex = ((uint64_t) cpuinfo[3] << 32) + cpuinfo[2];
  87. for (int i = 2; i < 64; i++)
  88. if ((xfrm & (1ULL << i)) || (xfrm_ex & (1ULL << i))) {
  89. cpuid(0xd, i, cpuinfo);
  90. if (cpuinfo[0] + cpuinfo[1] > xsave_size)
  91. xsave_size = cpuinfo[0] + cpuinfo[1];
  92. }
  93. return ALLOC_ALIGNUP(xsave_size + sizeof(sgx_arch_gpr_t) + 1);
  94. }
  95. bool is_wrfsbase_supported (void)
  96. {
  97. uint32_t cpuinfo[4];
  98. cpuid(7, 0, cpuinfo);
  99. if (!(cpuinfo[1] & 0x1)) {
  100. SGX_DBG(DBG_E, "The WRFSBASE instruction is not permitted on this"
  101. " platform. Please make sure the \'graphene_sgx\' kernel module"
  102. " is loaded properly.\n");
  103. return false;
  104. }
  105. return true;
  106. }
  107. int create_enclave(sgx_arch_secs_t * secs,
  108. unsigned long baseaddr,
  109. unsigned long size,
  110. sgx_arch_token_t * token)
  111. {
  112. int flags = MAP_SHARED;
  113. if (!zero_page) {
  114. zero_page = (void *)
  115. INLINE_SYSCALL(mmap, 6, NULL, pagesize,
  116. PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS,
  117. -1, 0);
  118. if (IS_ERR_P(zero_page))
  119. return -ENOMEM;
  120. }
  121. memset(secs, 0, sizeof(sgx_arch_secs_t));
  122. secs->size = pagesize;
  123. while (secs->size < size)
  124. secs->size <<= 1;
  125. secs->ssaframesize = get_ssaframesize(token->attributes.xfrm) / pagesize;
  126. secs->miscselect = token->miscselect_mask;
  127. memcpy(&secs->attributes, &token->attributes,
  128. sizeof(sgx_arch_attributes_t));
  129. // Enable AVX and AVX512
  130. // [2019-09-18] TODO(dep): This alone is not enough to get the fully optional behavior we will want.
  131. // Leave this here for future work in another PR
  132. // secs->attributes.xfrm |= SGX_XFRM_AVX;
  133. /* Do not initialize secs->mrsigner and secs->mrenclave here as they are
  134. * not used by ECREATE to populate the internal SECS. SECS's mrenclave is
  135. * computed dynamically and SECS's mrsigner is populated based on the
  136. * SIGSTRUCT during EINIT (see pp21 for ECREATE and pp34 for
  137. * EINIT in https://software.intel.com/sites/default/files/managed/48/88/329298-002.pdf). */
  138. if (baseaddr) {
  139. secs->baseaddr = (uint64_t) baseaddr & ~(secs->size - 1);
  140. } else {
  141. secs->baseaddr = ENCLAVE_HIGH_ADDRESS;
  142. }
  143. uint64_t addr = INLINE_SYSCALL(mmap, 6, secs->baseaddr, secs->size,
  144. PROT_READ|PROT_WRITE|PROT_EXEC,
  145. flags|MAP_FIXED, isgx_device, 0);
  146. if (IS_ERR_P(addr)) {
  147. if (ERRNO_P(addr) == 1 && (flags | MAP_FIXED))
  148. pal_printf("Permission denied on mapping enclave. "
  149. "You may need to set sysctl vm.mmap_min_addr to zero\n");
  150. SGX_DBG(DBG_I, "enclave ECREATE failed in allocating EPC memory "
  151. "(errno = %ld)\n", ERRNO_P(addr));
  152. return -ENOMEM;
  153. }
  154. secs->baseaddr = addr;
  155. #if SDK_DRIVER_VERSION >= KERNEL_VERSION(1, 8, 0)
  156. struct sgx_enclave_create param = {
  157. .src = (uint64_t) secs,
  158. };
  159. int ret = INLINE_SYSCALL(ioctl, 3, isgx_device, SGX_IOC_ENCLAVE_CREATE,
  160. &param);
  161. #else
  162. struct gsgx_enclave_create param = {
  163. .src = (uint64_t) secs,
  164. };
  165. int ret = INLINE_SYSCALL(ioctl, 3, gsgx_device, GSGX_IOCTL_ENCLAVE_CREATE,
  166. &param);
  167. #endif
  168. if (IS_ERR(ret)) {
  169. SGX_DBG(DBG_I, "enclave ECREATE failed in enclave creation ioctl - %d\n", ERRNO(ret));
  170. return -ERRNO(ret);
  171. }
  172. if (ret) {
  173. SGX_DBG(DBG_I, "enclave ECREATE failed - %d\n", ret);
  174. return -EPERM;
  175. }
  176. secs->attributes.flags |= SGX_FLAGS_INITIALIZED;
  177. SGX_DBG(DBG_I, "enclave created:\n");
  178. SGX_DBG(DBG_I, " base: 0x%016lx\n", secs->baseaddr);
  179. SGX_DBG(DBG_I, " size: 0x%016lx\n", secs->size);
  180. SGX_DBG(DBG_I, " miscselect: 0x%08x\n", secs->miscselect);
  181. SGX_DBG(DBG_I, " attr: 0x%016lx\n", secs->attributes.flags);
  182. SGX_DBG(DBG_I, " xfrm: 0x%016lx\n", secs->attributes.xfrm);
  183. SGX_DBG(DBG_I, " ssaframesize: %d\n", secs->ssaframesize);
  184. SGX_DBG(DBG_I, " isvprodid: 0x%08x\n", secs->isvprodid);
  185. SGX_DBG(DBG_I, " isvsvn: 0x%08x\n", secs->isvsvn);
  186. return 0;
  187. }
  188. int add_pages_to_enclave(sgx_arch_secs_t * secs,
  189. void * addr, void * user_addr,
  190. unsigned long size,
  191. enum sgx_page_type type, int prot,
  192. bool skip_eextend,
  193. const char * comment)
  194. {
  195. sgx_arch_secinfo_t secinfo;
  196. int ret;
  197. memset(&secinfo, 0, sizeof(sgx_arch_secinfo_t));
  198. switch (type) {
  199. case SGX_PAGE_SECS:
  200. return -EPERM;
  201. case SGX_PAGE_TCS:
  202. secinfo.flags |= SGX_SECINFO_FLAGS_TCS;
  203. break;
  204. case SGX_PAGE_REG:
  205. secinfo.flags |= SGX_SECINFO_FLAGS_REG;
  206. if (prot & PROT_READ)
  207. secinfo.flags |= SGX_SECINFO_FLAGS_R;
  208. if (prot & PROT_WRITE)
  209. secinfo.flags |= SGX_SECINFO_FLAGS_W;
  210. if (prot & PROT_EXEC)
  211. secinfo.flags |= SGX_SECINFO_FLAGS_X;
  212. break;
  213. }
  214. char p[4] = "---";
  215. const char * t = (type == SGX_PAGE_TCS) ? "TCS" : "REG";
  216. const char * m = skip_eextend ? "" : " measured";
  217. if (type == SGX_PAGE_REG) {
  218. if (prot & PROT_READ)
  219. p[0] = 'R';
  220. if (prot & PROT_WRITE)
  221. p[1] = 'W';
  222. if (prot & PROT_EXEC)
  223. p[2] = 'X';
  224. }
  225. if (size == pagesize)
  226. SGX_DBG(DBG_I, "adding page to enclave: %p [%s:%s] (%s)%s\n",
  227. addr, t, p, comment, m);
  228. else
  229. SGX_DBG(DBG_I, "adding pages to enclave: %p-%p [%s:%s] (%s)%s\n",
  230. addr, addr + size, t, p, comment, m);
  231. #if SDK_DRIVER_VERSION >= KERNEL_VERSION(1, 8, 0)
  232. struct sgx_enclave_add_page param = {
  233. .addr = secs->baseaddr + (uint64_t) addr,
  234. .src = (uint64_t) (user_addr ? : zero_page),
  235. .secinfo = (uint64_t) &secinfo,
  236. .mrmask = skip_eextend ? 0 : (uint16_t) -1,
  237. };
  238. uint64_t added_size = 0;
  239. while (added_size < size) {
  240. ret = INLINE_SYSCALL(ioctl, 3, isgx_device,
  241. SGX_IOC_ENCLAVE_ADD_PAGE, &param);
  242. if (IS_ERR(ret)) {
  243. SGX_DBG(DBG_I, "Enclave add page returned %d\n", ret);
  244. return -ERRNO(ret);
  245. }
  246. param.addr += pagesize;
  247. if (param.src != (uint64_t) zero_page) param.src += pagesize;
  248. added_size += pagesize;
  249. }
  250. #else
  251. struct gsgx_enclave_add_pages param = {
  252. .addr = secs->baseaddr + (uint64_t) addr,
  253. .user_addr = (uint64_t) user_addr,
  254. .size = size,
  255. .secinfo = (uint64_t) &secinfo,
  256. .flags = skip_eextend ? GSGX_ENCLAVE_ADD_PAGES_SKIP_EEXTEND : 0,
  257. };
  258. if (!user_addr) {
  259. param.user_addr = (unsigned long) zero_page;
  260. param.flags |= GSGX_ENCLAVE_ADD_PAGES_REPEAT_SRC;
  261. }
  262. ret = INLINE_SYSCALL(ioctl, 3, gsgx_device,
  263. GSGX_IOCTL_ENCLAVE_ADD_PAGES,
  264. &param);
  265. if (IS_ERR(ret)) {
  266. SGX_DBG(DBG_I, "Enclave add page returned %d\n", ret);
  267. return -ERRNO(ret);
  268. }
  269. #endif
  270. return 0;
  271. }
  272. int init_enclave(sgx_arch_secs_t * secs,
  273. sgx_arch_sigstruct_t * sigstruct,
  274. sgx_arch_token_t * token)
  275. {
  276. unsigned long enclave_valid_addr =
  277. secs->baseaddr + secs->size - pagesize;
  278. SGX_DBG(DBG_I, "enclave initializing:\n");
  279. SGX_DBG(DBG_I, " enclave id: 0x%016lx\n", enclave_valid_addr);
  280. SGX_DBG(DBG_I, " enclave hash:");
  281. for (size_t i = 0 ; i < sizeof(sgx_arch_hash_t) ; i++)
  282. SGX_DBG(DBG_I, " %02x", sigstruct->enclave_hash[i]);
  283. SGX_DBG(DBG_I, "\n");
  284. #if SDK_DRIVER_VERSION >= KERNEL_VERSION(1, 8, 0)
  285. struct sgx_enclave_init param = {
  286. .addr = enclave_valid_addr,
  287. .sigstruct = (uint64_t) sigstruct,
  288. .einittoken = (uint64_t) token,
  289. };
  290. int ret = INLINE_SYSCALL(ioctl, 3, isgx_device, SGX_IOC_ENCLAVE_INIT,
  291. &param);
  292. #else
  293. struct gsgx_enclave_init param = {
  294. .addr = enclave_valid_addr,
  295. .sigstruct = (uint64_t) sigstruct,
  296. .einittoken = (uint64_t) token,
  297. };
  298. int ret = INLINE_SYSCALL(ioctl, 3, gsgx_device, GSGX_IOCTL_ENCLAVE_INIT,
  299. &param);
  300. #endif
  301. if (IS_ERR(ret)) {
  302. return -ERRNO(ret);
  303. }
  304. if (ret) {
  305. const char * error;
  306. /* DEP 3/22/17: Try to improve error messages */
  307. switch(ret) {
  308. case SGX_INVALID_SIG_STRUCT:
  309. error = "Invalid SIGSTRUCT"; break;
  310. case SGX_INVALID_ATTRIBUTE:
  311. error = "Invalid enclave attribute"; break;
  312. case SGX_INVALID_MEASUREMENT:
  313. error = "Invalid measurement"; break;
  314. case SGX_INVALID_SIGNATURE:
  315. error = "Invalid signature"; break;
  316. case SGX_INVALID_LICENSE:
  317. error = "Invalid EINIT token"; break;
  318. case SGX_INVALID_CPUSVN:
  319. error = "Invalid CPU SVN"; break;
  320. default:
  321. error = "Unknown reason"; break;
  322. }
  323. SGX_DBG(DBG_I, "enclave EINIT failed - %s\n", error);
  324. return -EPERM;
  325. }
  326. return 0;
  327. }
  328. int destroy_enclave(void * base_addr, size_t length)
  329. {
  330. SGX_DBG(DBG_I, "destroying enclave...\n");
  331. int ret = INLINE_SYSCALL(munmap, 2, base_addr, length);
  332. if (IS_ERR(ret)) {
  333. SGX_DBG(DBG_I, "enclave EDESTROY failed\n");
  334. return -ERRNO(ret);
  335. }
  336. return 0;
  337. }