tcs.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "tcs.h"
  32. #include "se_trace.h"
  33. #include "sgx_error.h"
  34. #include "se_memory.h"
  35. #include "se_thread.h"
  36. #include <assert.h>
  37. #include "routine.h"
  38. #include "enclave_creator.h"
  39. #include "rts.h"
  40. #include "enclave.h"
  41. extern se_thread_id_t get_thread_id();
  42. int do_ecall(const int fn, const void *ocall_table, const void *ms, CTrustThread *trust_thread);
  43. CTrustThread::CTrustThread(tcs_t *tcs, CEnclave* enclave)
  44. : m_tcs(tcs)
  45. , m_enclave(enclave)
  46. , m_reference(0)
  47. , m_event(NULL)
  48. {
  49. memset(&m_tcs_info, 0, sizeof(debug_tcs_info_t));
  50. m_tcs_info.TCS_address = reinterpret_cast<void*>(tcs);
  51. m_tcs_info.ocall_frame = 0;
  52. m_tcs_info.thread_id = 0;
  53. }
  54. CTrustThread::~CTrustThread()
  55. {
  56. se_event_destroy(m_event);
  57. m_event = NULL;
  58. }
  59. se_handle_t CTrustThread::get_event()
  60. {
  61. if (m_event == NULL)
  62. m_event = se_event_init();
  63. return m_event;
  64. }
  65. void CTrustThread::push_ocall_frame(ocall_frame_t* frame_point)
  66. {
  67. frame_point->index = this->get_reference();
  68. frame_point->pre_last_frame = m_tcs_info.ocall_frame;
  69. m_tcs_info.ocall_frame = reinterpret_cast<uintptr_t>(frame_point);
  70. m_tcs_info.thread_id = get_thread_id();
  71. }
  72. void CTrustThread::pop_ocall_frame()
  73. {
  74. ocall_frame_t* last_ocall_frame = reinterpret_cast<ocall_frame_t*>(m_tcs_info.ocall_frame);
  75. if (last_ocall_frame)
  76. {
  77. m_tcs_info.ocall_frame = last_ocall_frame->pre_last_frame;
  78. }
  79. }
  80. CTrustThreadPool::CTrustThreadPool(uint32_t tcs_min_pool)
  81. {
  82. m_thread_list = NULL;
  83. m_utility_thread = NULL;
  84. m_tcs_min_pool = tcs_min_pool;
  85. m_need_to_wait_for_new_thread = false;
  86. }
  87. CTrustThreadPool::~CTrustThreadPool()
  88. {
  89. LockGuard lock(&m_thread_mutex);
  90. //destroy free tcs list
  91. for(vector<CTrustThread *>::iterator it=m_free_thread_vector.begin(); it!=m_free_thread_vector.end(); it++)
  92. {
  93. delete *it;
  94. }
  95. m_free_thread_vector.clear();
  96. //destroy unallocated tcs list
  97. for(vector<CTrustThread *>::iterator it=m_unallocated_threads.begin(); it!=m_unallocated_threads.end(); it++)
  98. {
  99. delete *it;
  100. }
  101. m_unallocated_threads.clear();
  102. //destroy thread cache
  103. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list, *tmp = NULL;
  104. while (it != NULL)
  105. {
  106. delete it->value;
  107. tmp = it;
  108. it = it->next;
  109. delete tmp;
  110. }
  111. m_thread_list = NULL;
  112. if (m_utility_thread)
  113. {
  114. delete m_utility_thread;
  115. m_utility_thread = NULL;
  116. }
  117. }
  118. void get_thread_set(vector<se_thread_id_t> &thread_vector);
  119. inline int CTrustThreadPool::find_thread(vector<se_thread_id_t> &thread_vector, se_thread_id_t thread_id)
  120. {
  121. for(vector<se_thread_id_t>::iterator it=thread_vector.begin(); it!=thread_vector.end(); it++)
  122. if(*it == thread_id)
  123. return TRUE;
  124. return FALSE;
  125. }
  126. inline CTrustThread * CTrustThreadPool::get_free_thread()
  127. {
  128. LockGuard lock(&m_free_thread_mutex);
  129. if(true == m_free_thread_vector.empty())
  130. {
  131. return NULL;
  132. }
  133. //if there is free tcs, remove it from free list
  134. CTrustThread *thread_node = m_free_thread_vector.back();
  135. m_free_thread_vector.pop_back();
  136. return thread_node;
  137. }
  138. //This tcs policy is bind tcs with one thread.
  139. int CTrustThreadPool::bind_thread(const se_thread_id_t thread_id, CTrustThread * const trust_thread)
  140. {
  141. if (m_thread_list == NULL) {
  142. m_thread_list = new Node<se_thread_id_t, CTrustThread*>(thread_id, trust_thread);
  143. } else {
  144. Node<se_thread_id_t, CTrustThread*>* it = new Node<se_thread_id_t, CTrustThread*>(thread_id, trust_thread);
  145. if (m_thread_list->InsertNext(it) == false) {
  146. delete it;
  147. SE_TRACE(SE_TRACE_WARNING, "trust thread %x is already added to the list\n", trust_thread);
  148. return FALSE;
  149. }
  150. }
  151. return TRUE;
  152. }
  153. void CTrustThreadPool::unbind_thread(const se_thread_id_t thread_id)
  154. {
  155. CTrustThread *trust_thread = nullptr;
  156. if (m_thread_list)
  157. {
  158. auto it = m_thread_list->Remove(thread_id);
  159. if(it)
  160. {
  161. trust_thread = it->value;
  162. trust_thread->reset_ref();
  163. add_to_free_thread_vector(trust_thread);
  164. if(it == m_thread_list)
  165. {
  166. m_thread_list = it->next;
  167. }
  168. delete it;
  169. }
  170. }
  171. }
  172. CTrustThread * CTrustThreadPool::get_bound_thread(const se_thread_id_t thread_id)
  173. {
  174. CTrustThread *trust_thread = nullptr;
  175. if (m_thread_list)
  176. {
  177. auto it = m_thread_list->Find(thread_id);
  178. if (it)
  179. trust_thread = it->value;
  180. }
  181. return trust_thread;
  182. }
  183. CTrustThread * CTrustThreadPool::add_thread(tcs_t * const tcs, CEnclave * const enclave, bool is_unallocated)
  184. {
  185. CTrustThread *trust_thread = new CTrustThread(tcs, enclave);
  186. LockGuard lock(&m_thread_mutex);
  187. //add tcs to free list
  188. if(!is_unallocated)
  189. {
  190. if (g_enclave_creator->is_EDMM_supported(enclave->get_enclave_id()) && !m_utility_thread && (enclave->get_dynamic_tcs_list_size() != 0))
  191. m_utility_thread = trust_thread;
  192. else
  193. m_free_thread_vector.push_back(trust_thread);
  194. }
  195. else
  196. {
  197. m_unallocated_threads.push_back(trust_thread);
  198. }
  199. return trust_thread;
  200. }
  201. CTrustThread *CTrustThreadPool::get_bound_thread(const tcs_t *tcs)
  202. {
  203. //Since now this function will be call outside, we need get lock to protect map
  204. LockGuard lock(&m_thread_mutex);
  205. CTrustThread *trust_thread = NULL;
  206. if (m_thread_list == NULL)
  207. return NULL;
  208. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list;
  209. while (it != NULL) {
  210. trust_thread = it->value;
  211. if(trust_thread->get_tcs() == tcs) {
  212. return trust_thread;
  213. }
  214. it = it->next;
  215. }
  216. return NULL;
  217. }
  218. std::vector<CTrustThread *> CTrustThreadPool::get_thread_list()
  219. {
  220. LockGuard lock(&m_thread_mutex);
  221. vector<CTrustThread *> threads;
  222. for(vector<CTrustThread *>::iterator it = m_free_thread_vector.begin(); it != m_free_thread_vector.end(); it++)
  223. {
  224. threads.push_back(*it);
  225. }
  226. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list;
  227. while (it != NULL) {
  228. threads.push_back(it->value);
  229. it = it->next;
  230. }
  231. return threads;
  232. }
  233. void CTrustThreadPool::reset()
  234. {
  235. //get lock at the begin of list walk.
  236. LockGuard lock(&m_thread_mutex);
  237. //walk through thread cache to free every element;
  238. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list, *tmp = NULL;
  239. while(it != NULL)
  240. {
  241. tmp = it;
  242. it = it->next;
  243. CTrustThread *trust_thread = tmp->value;
  244. //remove from thread cache
  245. delete tmp;
  246. trust_thread->reset_ref();
  247. add_to_free_thread_vector(trust_thread);
  248. }
  249. m_thread_list = NULL;
  250. return;
  251. }
  252. void CTrustThreadPool::wake_threads()
  253. {
  254. LockGuard lock(&m_thread_mutex);
  255. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list;
  256. while (it != NULL) {
  257. CTrustThread *thread = it->value;
  258. se_handle_t event = thread->get_event();
  259. se_event_wake(event);
  260. it = it->next;
  261. }
  262. }
  263. CTrustThread * CTrustThreadPool::_acquire_thread()
  264. {
  265. //try to get tcs from thread cache
  266. se_thread_id_t thread_id = get_thread_id();
  267. CTrustThread *trust_thread = get_bound_thread(thread_id);
  268. if(NULL != trust_thread && m_utility_thread != trust_thread)
  269. {
  270. return trust_thread;
  271. }
  272. //try get tcs from free list;
  273. trust_thread = get_free_thread();
  274. //if there is no free tcs, collect useless tcs.
  275. if(NULL == trust_thread)
  276. {
  277. if(!garbage_collect())
  278. return NULL;
  279. //get tcs from free list again.
  280. trust_thread = get_free_thread();
  281. assert(NULL != trust_thread);
  282. }
  283. //we have got a free tcs. add the tcs to thread cache
  284. bind_thread(thread_id, trust_thread);
  285. return trust_thread;
  286. }
  287. CTrustThread * CTrustThreadPool::acquire_thread(int ecall_cmd)
  288. {
  289. LockGuard lock(&m_thread_mutex);
  290. CTrustThread *trust_thread = NULL;
  291. bool is_special_ecall = (ecall_cmd == ECMD_INIT_ENCLAVE) || (ecall_cmd == ECMD_UNINIT_ENCLAVE) ;
  292. if(is_special_ecall == true)
  293. {
  294. if (m_utility_thread)
  295. {
  296. trust_thread = m_utility_thread;
  297. assert(trust_thread != NULL);
  298. if(ecall_cmd == ECMD_UNINIT_ENCLAVE)
  299. {
  300. se_thread_id_t thread_id = get_thread_id();
  301. unbind_thread(thread_id);
  302. bind_thread(thread_id, trust_thread);
  303. m_utility_thread = NULL;
  304. }
  305. }
  306. else
  307. {
  308. trust_thread = _acquire_thread();
  309. }
  310. }
  311. else
  312. {
  313. trust_thread = _acquire_thread();
  314. // for edmm feature, we don't support simulation mode yet
  315. // m_utility_thread will be NULL in simulation mode
  316. if(NULL == trust_thread && NULL != m_utility_thread)
  317. {
  318. m_need_to_wait_for_new_thread_cond.lock();
  319. m_utility_thread->get_enclave()->fill_tcs_mini_pool_fn();
  320. m_need_to_wait_for_new_thread = true;
  321. while(m_need_to_wait_for_new_thread != false)
  322. {
  323. m_need_to_wait_for_new_thread_cond.wait();
  324. }
  325. m_need_to_wait_for_new_thread_cond.unlock();
  326. trust_thread = _acquire_thread();
  327. }
  328. }
  329. if(trust_thread)
  330. {
  331. trust_thread->increase_ref();
  332. }
  333. if(is_special_ecall != true &&
  334. need_to_new_thread() == true)
  335. {
  336. m_utility_thread->get_enclave()->fill_tcs_mini_pool_fn();
  337. }
  338. return trust_thread;
  339. }
  340. //Do nothing for bind mode, the tcs is always bound to a thread.
  341. void CTrustThreadPool::release_thread(CTrustThread * const trust_thread)
  342. {
  343. LockGuard lock(&m_thread_mutex);
  344. trust_thread->decrease_ref();
  345. return;
  346. }
  347. bool CTrustThreadPool::is_dynamic_thread_exist()
  348. {
  349. if (m_unallocated_threads.empty())
  350. {
  351. return false;
  352. }
  353. else
  354. {
  355. return true;
  356. }
  357. }
  358. bool CTrustThreadPool::need_to_new_thread()
  359. {
  360. LockGuard lock(&m_free_thread_mutex);
  361. if (m_unallocated_threads.empty())
  362. {
  363. return false;
  364. }
  365. if(m_tcs_min_pool == 0 && m_free_thread_vector.size() > m_tcs_min_pool)
  366. {
  367. return false;
  368. }
  369. if(m_tcs_min_pool != 0 && m_free_thread_vector.size() >= m_tcs_min_pool)
  370. {
  371. return false;
  372. }
  373. return true;
  374. }
  375. static int make_tcs(size_t tcs)
  376. {
  377. return g_enclave_creator->mktcs(tcs);
  378. }
  379. struct ms_str
  380. {
  381. void * ms;
  382. };
  383. #define fastcall __attribute__((regparm(3),noinline,visibility("default")))
  384. //this function is used to notify GDB scripts
  385. //GDB is supposed to have a breakpoint on urts_add_tcs to receive debug interupt
  386. //once the breakpoint has been hit, GDB extracts the address of tcs and sets DBGOPTIN for the tcs
  387. extern "C" void fastcall urts_add_tcs(tcs_t * const tcs)
  388. {
  389. UNUSED(tcs);
  390. SE_TRACE(SE_TRACE_WARNING, "urts_add_tcs %x\n", tcs);
  391. }
  392. sgx_status_t CTrustThreadPool::new_thread()
  393. {
  394. sgx_status_t ret = SGX_ERROR_UNEXPECTED;
  395. if(!m_utility_thread)
  396. {
  397. return ret;
  398. }
  399. if (m_unallocated_threads.empty())
  400. {
  401. return SGX_SUCCESS;
  402. }
  403. size_t octbl_buf[ROUND_TO(sizeof(sgx_ocall_table_t) + sizeof(void*), sizeof(size_t)) / sizeof(size_t)];
  404. sgx_ocall_table_t *octbl = reinterpret_cast<sgx_ocall_table_t*>(octbl_buf);
  405. octbl->count = 1;
  406. void **ocalls = octbl->ocall;
  407. *ocalls = reinterpret_cast<void*>(make_tcs);
  408. CTrustThread *trust_thread = m_unallocated_threads.back();
  409. tcs_t *tcsp = trust_thread->get_tcs();
  410. struct ms_str ms1;
  411. ms1.ms = tcsp;
  412. ret = (sgx_status_t)do_ecall(ECMD_MKTCS, octbl, &ms1, m_utility_thread);
  413. if (SGX_SUCCESS == ret )
  414. {
  415. //add tcs to debug tcs info list
  416. trust_thread->get_enclave()->add_thread(trust_thread);
  417. add_to_free_thread_vector(trust_thread);
  418. m_unallocated_threads.pop_back();
  419. urts_add_tcs(tcsp);
  420. }
  421. return ret;
  422. }
  423. void CTrustThreadPool::add_to_free_thread_vector(CTrustThread* it)
  424. {
  425. LockGuard lock(&m_free_thread_mutex);
  426. m_free_thread_vector.push_back(it);
  427. }
  428. sgx_status_t CTrustThreadPool::fill_tcs_mini_pool()
  429. {
  430. sgx_status_t ret = SGX_SUCCESS;
  431. bool stop = false;
  432. while(stop != true)
  433. {
  434. if(need_to_new_thread() == true)
  435. {
  436. ret = new_thread();
  437. if(ret != SGX_SUCCESS)
  438. {
  439. stop= true;
  440. }
  441. }
  442. else
  443. {
  444. stop = true;
  445. }
  446. m_need_to_wait_for_new_thread_cond.lock();
  447. if(m_need_to_wait_for_new_thread == true)
  448. {
  449. m_need_to_wait_for_new_thread = false;
  450. m_need_to_wait_for_new_thread_cond.signal();
  451. }
  452. m_need_to_wait_for_new_thread_cond.unlock();
  453. }
  454. return ret;
  455. }
  456. //The return value stand for the number of free trust thread.
  457. int CThreadPoolBindMode::garbage_collect()
  458. {
  459. int nr_free = 0;
  460. //if free list is NULL, recycle tcs.
  461. //get thread id set of current process
  462. vector<se_thread_id_t> thread_vector;
  463. get_thread_set(thread_vector);
  464. //walk through thread cache to see if there is any thread that has exited
  465. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list, *pre = NULL, *tmp = NULL;
  466. while(it != NULL)
  467. {
  468. se_thread_id_t thread_id = it->key;
  469. //if the thread has exited
  470. if(FALSE == find_thread(thread_vector, thread_id))
  471. {
  472. //if the reference is not 0, there must be some wrong termination, so we can't recycle such trust thread.
  473. //return to free_tcs list
  474. if(0 == it->value->get_reference())
  475. {
  476. add_to_free_thread_vector(it->value);
  477. nr_free++;
  478. }
  479. else
  480. {
  481. //the list only record the pointer of trust thread, so we can delete it first and then erase from map.
  482. delete it->value;
  483. }
  484. tmp = it;
  485. it = it->next;
  486. if (tmp == m_thread_list)
  487. m_thread_list = it;
  488. if (pre != NULL)
  489. pre->next = it;
  490. //remove from thread cache
  491. delete tmp;
  492. }
  493. else
  494. {
  495. pre = it;
  496. it = it->next;
  497. }
  498. }
  499. return nr_free;
  500. }
  501. int CThreadPoolUnBindMode::garbage_collect()
  502. {
  503. int nr_free = 0;
  504. //walk through to free unused trust thread
  505. Node<se_thread_id_t, CTrustThread*>* it = m_thread_list, *pre = NULL, *tmp = NULL;
  506. while(it != NULL)
  507. {
  508. //if the reference is 0, then the trust thread is not in use, so return to free_tcs list
  509. if(0 == it->value->get_reference())
  510. {
  511. add_to_free_thread_vector(it->value);
  512. nr_free++;
  513. tmp = it;
  514. it = it->next;
  515. if (tmp == m_thread_list)
  516. m_thread_list = it;
  517. if (pre != NULL)
  518. pre->next = it;
  519. //remove from thread cache
  520. delete tmp;
  521. }
  522. else
  523. {
  524. pre = it;
  525. it = it->next;
  526. }
  527. }
  528. return nr_free;
  529. }