enclave.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "enclave.h"
  32. #include "util.h"
  33. #include "se_detect.h"
  34. #include "enclave_creator.h"
  35. #include "sgx_error.h"
  36. #include "se_error_internal.h"
  37. #include "debugger_support.h"
  38. #include "se_memory.h"
  39. #include "urts_trim.h"
  40. #include "urts_emodpr.h"
  41. #include <assert.h>
  42. #include "rts.h"
  43. using namespace std;
  44. int do_ecall(const int fn, const void *ocall_table, const void *ms, CTrustThread *trust_thread);
  45. int do_ocall(const bridge_fn_t bridge, void *ms);
  46. CEnclave::CEnclave(CLoader &ldr)
  47. : m_loader(ldr)
  48. , m_enclave_id(0)
  49. , m_start_addr(NULL)
  50. , m_size(0)
  51. , m_power_event_flag(0)
  52. , m_ref(0)
  53. , m_zombie(false)
  54. , m_thread_pool(NULL)
  55. , m_dbg_flag(false)
  56. , m_destroyed(false)
  57. , m_version(0)
  58. , m_ocall_table(NULL)
  59. , m_pthread_is_valid(false)
  60. , m_new_thread_event(NULL)
  61. , m_sealed_key(NULL)
  62. {
  63. memset(&m_enclave_info, 0, sizeof(debug_enclave_info_t));
  64. se_init_rwlock(&m_rwlock);
  65. }
  66. sgx_status_t CEnclave::initialize(const se_file_t& file, const sgx_enclave_id_t enclave_id, void * const start_addr, const uint64_t enclave_size,
  67. const uint32_t tcs_policy, const uint32_t enclave_version, const uint32_t tcs_min_pool)
  68. {
  69. uint32_t name_len = file.name_len;
  70. if (file.unicode)
  71. name_len *= (uint32_t)sizeof(wchar_t);
  72. const int buf_len = name_len + 4; //+4, because we need copy the charactor of string end ('\0').;
  73. m_enclave_info.lpFileName = calloc(1, buf_len);
  74. if (m_enclave_info.lpFileName == NULL)
  75. return SGX_ERROR_OUT_OF_MEMORY;
  76. memcpy_s(m_enclave_info.lpFileName, name_len, file.name, name_len);
  77. m_enclave_info.unicode = file.unicode?0:1;
  78. m_enclave_info.file_name_size = name_len;
  79. m_enclave_info.struct_version = DEBUG_INFO_STRUCT_VERSION;
  80. m_enclave_id = enclave_id;
  81. m_start_addr = start_addr;
  82. m_size = enclave_size;
  83. m_version = enclave_version;
  84. m_new_thread_event = se_event_init();
  85. if(m_new_thread_event == NULL)
  86. {
  87. free(m_enclave_info.lpFileName);
  88. m_enclave_info.lpFileName = NULL;
  89. return SGX_ERROR_OUT_OF_MEMORY;
  90. }
  91. if(TCS_POLICY_BIND == tcs_policy)
  92. {
  93. m_thread_pool = new CThreadPoolBindMode(tcs_min_pool);
  94. }
  95. else if(TCS_POLICY_UNBIND == tcs_policy)
  96. {
  97. //we also set it as bind mode.
  98. m_thread_pool = new CThreadPoolUnBindMode(tcs_min_pool);
  99. }
  100. else
  101. {
  102. SE_TRACE(SE_TRACE_WARNING, "BUG: unknown tcs policy\n");
  103. //Should NOT run here, because we have validate the metadata before.
  104. free(m_enclave_info.lpFileName);
  105. m_enclave_info.lpFileName = NULL;
  106. return SGX_ERROR_INVALID_PARAMETER;
  107. }
  108. return SGX_SUCCESS;
  109. }
  110. CEnclave::~CEnclave()
  111. {
  112. if (m_thread_pool)
  113. {
  114. delete m_thread_pool;
  115. m_thread_pool = NULL;
  116. }
  117. m_ocall_table = NULL;
  118. destory_debug_info(&m_enclave_info);
  119. se_fini_rwlock(&m_rwlock);
  120. se_event_destroy(m_new_thread_event);
  121. m_new_thread_event = NULL;
  122. }
  123. void * CEnclave::get_symbol_address(const char * const symbol)
  124. {
  125. return m_loader.get_symbol_address(symbol);
  126. }
  127. sgx_enclave_id_t CEnclave::get_enclave_id()
  128. {
  129. return m_enclave_id;
  130. }
  131. uint32_t CEnclave::get_enclave_version()
  132. {
  133. return m_version;
  134. }
  135. size_t CEnclave::get_dynamic_tcs_list_size()
  136. {
  137. std::vector<std::pair<tcs_t *, bool>> tcs_list = m_loader.get_tcs_list();
  138. size_t count = 0;
  139. for (size_t idx = 0; idx < tcs_list.size(); ++idx)
  140. {
  141. if(tcs_list[idx].second == true)
  142. {
  143. count++;
  144. }
  145. }
  146. return count;
  147. }
  148. uint8_t *CEnclave::get_sealed_key()
  149. {
  150. return m_sealed_key;
  151. }
  152. void CEnclave::set_sealed_key(uint8_t *sealed_key)
  153. {
  154. m_sealed_key = sealed_key;
  155. }
  156. sgx_status_t CEnclave::error_trts2urts(unsigned int trts_error)
  157. {
  158. if(trts_error == (unsigned int)SE_ERROR_READ_LOCK_FAIL)
  159. {
  160. return SGX_ERROR_ENCLAVE_LOST;
  161. }
  162. //tRTS may directly return the external error code, so we don't need transfer it.
  163. if(EXTERNAL_ERROR != (trts_error >> MAIN_MOD_SHIFT))
  164. {
  165. SE_TRACE(SE_TRACE_WARNING, "trts return error %x, it should be urts/trts bug\n", trts_error);
  166. return SGX_ERROR_UNEXPECTED;
  167. }
  168. return (sgx_status_t)trts_error;
  169. }
  170. sgx_status_t CEnclave::ecall(const int proc, const void *ocall_table, void *ms)
  171. {
  172. if(se_try_rdlock(&m_rwlock))
  173. {
  174. //Maybe the enclave has been destroyed after acquire/release m_rwlock. See CEnclave::destroy()
  175. if(m_destroyed)
  176. {
  177. se_rdunlock(&m_rwlock);
  178. return SGX_ERROR_ENCLAVE_LOST;
  179. }
  180. //do sgx_ecall
  181. CTrustThread *trust_thread = get_tcs(proc);
  182. unsigned ret = SGX_ERROR_OUT_OF_TCS;
  183. if(NULL != trust_thread)
  184. {
  185. if (NULL == m_ocall_table)
  186. {
  187. m_ocall_table = (sgx_ocall_table_t *)ocall_table;
  188. }
  189. if (proc == ECMD_UNINIT_ENCLAVE)
  190. {
  191. if(m_pthread_is_valid == true)
  192. {
  193. m_pthread_is_valid = false;
  194. se_event_wake(m_new_thread_event);
  195. pthread_join(m_pthread_tid, NULL);
  196. }
  197. ocall_table = m_ocall_table;
  198. std::vector<CTrustThread *> threads = m_thread_pool->get_thread_list();
  199. for (unsigned idx = 0; idx < threads.size(); ++idx)
  200. {
  201. if (trust_thread->get_tcs() == threads[idx]->get_tcs())
  202. {
  203. continue;
  204. }
  205. uint64_t start = (uint64_t)(threads[idx]->get_tcs());
  206. uint64_t end = start + (1 << SE_PAGE_SHIFT);
  207. if (get_enclave_creator()->is_EDMM_supported(CEnclave::get_enclave_id()))
  208. {
  209. if (SGX_SUCCESS != (ret = get_enclave_creator()->trim_range(start, end)))
  210. {
  211. se_rdunlock(&m_rwlock);
  212. return (sgx_status_t)ret;
  213. }
  214. }
  215. }
  216. }
  217. ret = do_ecall(proc, ocall_table, ms, trust_thread);
  218. }
  219. put_tcs(trust_thread);
  220. //release the read/write lock, the only exception is enclave already be removed in ocall
  221. if(AbnormalTermination() || ret != SE_ERROR_READ_LOCK_FAIL)
  222. {
  223. se_rdunlock(&m_rwlock);
  224. }
  225. return error_trts2urts(ret);
  226. }
  227. else
  228. {
  229. return SGX_ERROR_ENCLAVE_LOST;
  230. }
  231. }
  232. int CEnclave::ocall(const unsigned int proc, const sgx_ocall_table_t *ocall_table, void *ms)
  233. {
  234. int error = SGX_ERROR_UNEXPECTED;
  235. if ((int)proc == EDMM_TRIM || (int)proc == EDMM_TRIM_COMMIT || (int)proc == EDMM_MODPR)
  236. {
  237. se_rdunlock(&m_rwlock);
  238. if((int)proc == EDMM_TRIM)
  239. error = ocall_trim_range(ms);
  240. else if ((int)proc == EDMM_TRIM_COMMIT)
  241. error = ocall_trim_accept(ms);
  242. else if ((int)proc == EDMM_MODPR)
  243. error = ocall_emodpr(ms);
  244. }
  245. else
  246. {
  247. //validate the proc is within ocall_table;
  248. if(NULL == ocall_table ||
  249. (proc >= ocall_table->count))
  250. {
  251. return SGX_ERROR_INVALID_FUNCTION;
  252. }
  253. se_rdunlock(&m_rwlock);
  254. bridge_fn_t bridge = reinterpret_cast<bridge_fn_t>(ocall_table->ocall[proc]);
  255. error = do_ocall(bridge, ms);
  256. }
  257. if (!se_try_rdlock(&m_rwlock))
  258. {
  259. //Probablly the enclave has been destroyed, so we can't get the read lock.
  260. error = SE_ERROR_READ_LOCK_FAIL;
  261. }
  262. //We have m_destroyed to determinate if the enclave has been destroyed.
  263. else if(m_destroyed)
  264. {
  265. //Enclave has been destroyed, emulate that we fail to get read lock.
  266. se_rdunlock(&m_rwlock);
  267. error = SE_ERROR_READ_LOCK_FAIL;
  268. }
  269. return error;
  270. }
  271. const debug_enclave_info_t* CEnclave::get_debug_info()
  272. {
  273. return &m_enclave_info;
  274. }
  275. CTrustThread * CEnclave::get_tcs(int ecall_cmd)
  276. {
  277. CTrustThread *trust_thread = m_thread_pool->acquire_thread(ecall_cmd);
  278. return trust_thread;
  279. }
  280. void *fill_tcs_mini_pool_func(void *args)
  281. {
  282. CEnclave *it = (CEnclave*)(args);
  283. if(it != NULL)
  284. {
  285. it->fill_tcs_mini_pool();
  286. }
  287. return NULL;
  288. }
  289. sgx_status_t CEnclave::fill_tcs_mini_pool_fn()
  290. {
  291. pthread_t tid;
  292. if(m_pthread_is_valid == false)
  293. {
  294. m_pthread_is_valid = true;
  295. int ret = pthread_create(&tid, NULL, fill_tcs_mini_pool_func, (void *)(this));
  296. if(ret != 0)
  297. {
  298. m_pthread_is_valid = false;
  299. return SGX_ERROR_UNEXPECTED;
  300. }
  301. m_pthread_tid = tid;
  302. }
  303. else if(m_pthread_is_valid == true)
  304. {
  305. if(se_event_wake(m_new_thread_event) != SE_MUTEX_SUCCESS)
  306. {
  307. return SGX_ERROR_UNEXPECTED;
  308. }
  309. }
  310. return SGX_SUCCESS;
  311. }
  312. sgx_status_t CEnclave::fill_tcs_mini_pool()
  313. {
  314. do
  315. {
  316. if(se_try_rdlock(&m_rwlock))
  317. {
  318. //Maybe the enclave has been destroyed after acquire/release m_rwlock. See CEnclave::destroy()
  319. if(m_destroyed)
  320. {
  321. se_rdunlock(&m_rwlock);
  322. return SGX_ERROR_ENCLAVE_LOST;
  323. }
  324. if(m_pthread_is_valid == false)
  325. {
  326. se_rdunlock(&m_rwlock);
  327. return SGX_SUCCESS;
  328. }
  329. m_thread_pool->fill_tcs_mini_pool();
  330. se_rdunlock(&m_rwlock);
  331. }
  332. else
  333. {
  334. return SGX_ERROR_ENCLAVE_LOST;
  335. }
  336. }while(se_event_wait(m_new_thread_event) == SE_MUTEX_SUCCESS);
  337. return SGX_ERROR_UNEXPECTED;
  338. }
  339. void CEnclave::put_tcs(CTrustThread *trust_thread)
  340. {
  341. if(NULL == trust_thread)
  342. {
  343. return;
  344. }
  345. m_thread_pool->release_thread(trust_thread);
  346. }
  347. void CEnclave::destroy()
  348. {
  349. se_wtlock(&m_rwlock);
  350. get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL, m_size);
  351. m_destroyed = true;
  352. //We are going to destory m_rwlock. At this point, maybe an ecall is in progress, and try to get m_rwlock.
  353. //To prevent such ecall, we use m_destroyed to identify that the no ecall should going on. See CEnclave::ecall(...).
  354. //For new ecall to the enclave, it will return with SGX_ERROR_INVALID_ENCLAVE_ID immediately.
  355. se_wtunlock(&m_rwlock);
  356. // We should not use loader to destroy encalve because loader has been removed after successful enclave loading
  357. //m_loader.destroy_enclave();
  358. }
  359. void CEnclave::add_thread(tcs_t * const tcs, bool is_unallocated)
  360. {
  361. CTrustThread *trust_thread = m_thread_pool->add_thread(tcs, this, is_unallocated);
  362. if(!is_unallocated)
  363. {
  364. insert_debug_tcs_info_head(&m_enclave_info, trust_thread->get_debug_info());
  365. }
  366. }
  367. void CEnclave::add_thread(CTrustThread * const trust_thread)
  368. {
  369. insert_debug_tcs_info_head(&m_enclave_info, trust_thread->get_debug_info());
  370. }
  371. int CEnclave::set_extra_debug_info(secs_t& secs)
  372. {
  373. void *g_peak_heap_used_addr = get_symbol_address("g_peak_heap_used");
  374. m_enclave_info.g_peak_heap_used_addr = g_peak_heap_used_addr;
  375. m_enclave_info.start_addr = secs.base;
  376. m_enclave_info.misc_select = secs.misc_select;
  377. if(g_peak_heap_used_addr == NULL)
  378. {
  379. SE_TRACE(SE_TRACE_DEBUG, "Symbol 'g_peak_heap_used' is not found\n");
  380. //This error should not break loader and debugger, so the upper layer function will ignore it.
  381. return SGX_ERROR_INVALID_ENCLAVE;
  382. }
  383. return SGX_SUCCESS;
  384. }
  385. void CEnclave::push_ocall_frame(ocall_frame_t* frame_point, CTrustThread *trust_thread)
  386. {
  387. if(NULL == trust_thread)
  388. {
  389. return;
  390. }
  391. trust_thread->push_ocall_frame(frame_point);
  392. }
  393. void CEnclave::pop_ocall_frame(CTrustThread *trust_thread)
  394. {
  395. if(NULL == trust_thread)
  396. {
  397. return;
  398. }
  399. trust_thread->pop_ocall_frame();
  400. }
  401. CEnclavePool CEnclavePool::m_instance;
  402. CEnclavePool::CEnclavePool()
  403. {
  404. m_enclave_list = NULL;
  405. se_mutex_init(&m_enclave_mutex);
  406. SE_TRACE(SE_TRACE_NOTICE, "enter CEnclavePool constructor\n");
  407. }
  408. CEnclavePool *CEnclavePool::instance()
  409. {
  410. return &m_instance;
  411. }
  412. int CEnclavePool::add_enclave(CEnclave *enclave)
  413. {
  414. int result = TRUE;
  415. se_mutex_lock(&m_enclave_mutex);
  416. if (m_enclave_list == NULL) {
  417. m_enclave_list = new Node<sgx_enclave_id_t, CEnclave*>(enclave->get_enclave_id(), enclave);
  418. } else {
  419. Node<sgx_enclave_id_t, CEnclave*>* node = new Node<sgx_enclave_id_t, CEnclave*>(enclave->get_enclave_id(), enclave);
  420. if (m_enclave_list->InsertNext(node) == false) {
  421. delete node;
  422. SE_TRACE(SE_TRACE_WARNING, "the encalve %llx has already been added\n", enclave->get_enclave_id());
  423. result = FALSE;
  424. }
  425. }
  426. se_mutex_unlock(&m_enclave_mutex);
  427. return result;
  428. }
  429. CEnclave * CEnclavePool::get_enclave(const sgx_enclave_id_t enclave_id)
  430. {
  431. se_mutex_lock(&m_enclave_mutex);
  432. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list->Find(enclave_id);
  433. if(it != NULL)
  434. {
  435. se_mutex_unlock(&m_enclave_mutex);
  436. return it->value;
  437. }
  438. else
  439. {
  440. se_mutex_unlock(&m_enclave_mutex);
  441. return NULL;
  442. }
  443. }
  444. CEnclave * CEnclavePool::ref_enclave(const sgx_enclave_id_t enclave_id)
  445. {
  446. se_mutex_lock(&m_enclave_mutex);
  447. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list->Find(enclave_id);
  448. if(it != NULL)
  449. {
  450. it->value->atomic_inc_ref();
  451. se_mutex_unlock(&m_enclave_mutex);
  452. return it->value;
  453. }
  454. else
  455. {
  456. se_mutex_unlock(&m_enclave_mutex);
  457. return NULL;
  458. }
  459. }
  460. void CEnclavePool::unref_enclave(CEnclave *enclave)
  461. {
  462. //We use enclave pool lock to protect data, the lock is big, but is more secure.
  463. se_mutex_lock(&m_enclave_mutex);
  464. //The ref is increased in ref_enclave;
  465. uint32_t ref = enclave->atomic_dec_ref();
  466. //If the enclave is in zombie state, the HW enclave must have been destroyed.
  467. //And if the enclave is not referenced, the enclave instance will not be referenced any more,
  468. //so we delete the instance.
  469. //Another code path that delete enclave instance is in function "CEnclavePool::remove_enclave"
  470. if(enclave->is_zombie() && !ref)
  471. delete enclave;
  472. se_mutex_unlock(&m_enclave_mutex);
  473. }
  474. se_handle_t CEnclavePool::get_event(const void * const tcs)
  475. {
  476. se_handle_t hevent = NULL;
  477. CEnclave *enclave = NULL;
  478. assert(tcs != NULL);
  479. se_mutex_lock(&m_enclave_mutex);
  480. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list;
  481. for(; it != NULL; it = it->next)
  482. {
  483. void *start = it->value->get_start_address();
  484. void *end = GET_PTR(void, start, it->value->get_size());
  485. /* check start & end */
  486. if (tcs >= start && tcs < end) {
  487. enclave = it->value;
  488. break;
  489. }
  490. }
  491. if (NULL != enclave)
  492. {
  493. CTrustThreadPool *pool = enclave->get_thread_pool();
  494. if (pool != NULL)
  495. {
  496. CTrustThread *thread = pool->get_bound_thread((const tcs_t *)tcs);
  497. if (thread != NULL)
  498. hevent = thread->get_event();
  499. }
  500. }
  501. se_mutex_unlock(&m_enclave_mutex);
  502. return hevent;
  503. }
  504. CEnclave* CEnclavePool::remove_enclave(const sgx_enclave_id_t enclave_id, sgx_status_t &status)
  505. {
  506. status = SGX_SUCCESS;
  507. se_mutex_lock(&m_enclave_mutex);
  508. CEnclave *enclave = get_enclave(enclave_id);
  509. if(NULL == enclave)
  510. {
  511. status = SGX_ERROR_INVALID_ENCLAVE_ID;
  512. SE_TRACE(SE_TRACE_WARNING, "remove an unknown enclave\n");
  513. se_mutex_unlock(&m_enclave_mutex);
  514. return enclave;
  515. }
  516. enclave->destroy();
  517. //the ref is not 0, maybe some thread is in sgx_ocall, so we can NOT delete enclave instance.
  518. if(enclave->get_ref())
  519. {
  520. enclave->mark_zombie();
  521. /* When destroy the enclave, all threads that are waiting/about to wait
  522. * on untrusted event need to be waked. Otherwise, they will be always
  523. * pending on the untrusted events, and app need to manually kill the threads.
  524. */
  525. CTrustThreadPool *pool = enclave->get_thread_pool();
  526. pool->wake_threads();
  527. enclave = NULL;
  528. }
  529. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list->Remove(enclave_id);
  530. if (it == m_enclave_list)
  531. m_enclave_list = it->next;
  532. delete it;
  533. se_mutex_unlock(&m_enclave_mutex);
  534. return enclave;
  535. }
  536. void CEnclavePool::notify_debugger()
  537. {
  538. se_mutex_lock(&m_enclave_mutex);
  539. if(m_enclave_list!= NULL)
  540. {
  541. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list;
  542. for(; it != NULL; it = it->next)
  543. {
  544. //send debug event to debugger when enclave is debug mode or release mode
  545. debug_enclave_info_t * debug_info = const_cast<debug_enclave_info_t*>((it->value)->get_debug_info());
  546. generate_enclave_debug_event(URTS_EXCEPTION_PREREMOVEENCLAVE, debug_info);
  547. }
  548. }
  549. se_mutex_unlock(&m_enclave_mutex);
  550. }
  551. bool CEnclave::update_trust_thread_debug_flag(void* tcs_address, uint8_t debug_flag)
  552. {
  553. uint64_t debug_flag2 = (uint64_t)debug_flag;
  554. debug_enclave_info_t *debug_info = NULL;
  555. debug_info = const_cast<debug_enclave_info_t *>(get_debug_info());
  556. pid_t pid = getpid();
  557. if(debug_info->enclave_type == ET_DEBUG)
  558. {
  559. if(!se_write_process_mem(pid, reinterpret_cast<unsigned char *>(tcs_address) + sizeof(uint64_t), &debug_flag2, sizeof(uint64_t), NULL))
  560. return FALSE;
  561. }
  562. return TRUE;
  563. }
  564. bool CEnclave::update_debug_flag(uint8_t debug_flag)
  565. {
  566. debug_tcs_info_t* tcs_list_entry = m_enclave_info.tcs_list;
  567. while(tcs_list_entry)
  568. {
  569. if(!update_trust_thread_debug_flag(tcs_list_entry->TCS_address, debug_flag))
  570. return FALSE;
  571. tcs_list_entry = tcs_list_entry->next_tcs_info;
  572. }
  573. return TRUE;
  574. }