enclave.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "enclave.h"
  32. #include "util.h"
  33. #include "se_detect.h"
  34. #include "enclave_creator.h"
  35. #include "sgx_error.h"
  36. #include "se_error_internal.h"
  37. #include "debugger_support.h"
  38. #include "se_memory.h"
  39. #include "urts_trim.h"
  40. #include "urts_emodpr.h"
  41. #include <assert.h>
  42. #include "rts.h"
  43. using namespace std;
  44. int do_ecall(const int fn, const void *ocall_table, const void *ms, CTrustThread *trust_thread);
  45. int do_ocall(const bridge_fn_t bridge, void *ms);
  46. CEnclave::CEnclave(CLoader &ldr)
  47. : m_loader(ldr)
  48. , m_enclave_id(0)
  49. , m_start_addr(NULL)
  50. , m_size(0)
  51. , m_power_event_flag(0)
  52. , m_ref(0)
  53. , m_zombie(false)
  54. , m_thread_pool(NULL)
  55. , m_dbg_flag(false)
  56. , m_destroyed(false)
  57. , m_version(0)
  58. , m_ocall_table(NULL)
  59. , m_pthread_is_valid(false)
  60. , m_new_thread_event(NULL)
  61. {
  62. memset(&m_enclave_info, 0, sizeof(debug_enclave_info_t));
  63. se_init_rwlock(&m_rwlock);
  64. }
  65. sgx_status_t CEnclave::initialize(const se_file_t& file, const sgx_enclave_id_t enclave_id, void * const start_addr, const uint64_t enclave_size,
  66. const uint32_t tcs_policy, const uint32_t enclave_version, const uint32_t tcs_min_pool)
  67. {
  68. uint32_t name_len = file.name_len;
  69. if (file.unicode)
  70. name_len *= (uint32_t)sizeof(wchar_t);
  71. const int buf_len = name_len + 4; //+4, because we need copy the charactor of string end ('\0').;
  72. m_enclave_info.lpFileName = calloc(1, buf_len);
  73. if (m_enclave_info.lpFileName == NULL)
  74. return SGX_ERROR_OUT_OF_MEMORY;
  75. memcpy_s(m_enclave_info.lpFileName, name_len, file.name, name_len);
  76. m_enclave_info.unicode = file.unicode?0:1;
  77. m_enclave_info.file_name_size = name_len;
  78. m_enclave_info.struct_version = DEBUG_INFO_STRUCT_VERSION;
  79. m_enclave_id = enclave_id;
  80. m_start_addr = start_addr;
  81. m_size = enclave_size;
  82. m_version = enclave_version;
  83. m_new_thread_event = se_event_init();
  84. if(m_new_thread_event == NULL)
  85. {
  86. free(m_enclave_info.lpFileName);
  87. m_enclave_info.lpFileName = NULL;
  88. return SGX_ERROR_OUT_OF_MEMORY;
  89. }
  90. if(TCS_POLICY_BIND == tcs_policy)
  91. {
  92. m_thread_pool = new CThreadPoolBindMode(tcs_min_pool);
  93. }
  94. else if(TCS_POLICY_UNBIND == tcs_policy)
  95. {
  96. //we also set it as bind mode.
  97. m_thread_pool = new CThreadPoolUnBindMode(tcs_min_pool);
  98. }
  99. else
  100. {
  101. SE_TRACE(SE_TRACE_WARNING, "BUG: unknown tcs policy\n");
  102. //Should NOT run here, because we have validate the metadata before.
  103. free(m_enclave_info.lpFileName);
  104. m_enclave_info.lpFileName = NULL;
  105. return SGX_ERROR_INVALID_PARAMETER;
  106. }
  107. return SGX_SUCCESS;
  108. }
  109. CEnclave::~CEnclave()
  110. {
  111. if (m_thread_pool)
  112. {
  113. delete m_thread_pool;
  114. m_thread_pool = NULL;
  115. }
  116. m_ocall_table = NULL;
  117. destory_debug_info(&m_enclave_info);
  118. se_fini_rwlock(&m_rwlock);
  119. se_event_destroy(m_new_thread_event);
  120. m_new_thread_event = NULL;
  121. }
  122. void * CEnclave::get_symbol_address(const char * const symbol)
  123. {
  124. return m_loader.get_symbol_address(symbol);
  125. }
  126. sgx_enclave_id_t CEnclave::get_enclave_id()
  127. {
  128. return m_enclave_id;
  129. }
  130. uint32_t CEnclave::get_enclave_version()
  131. {
  132. return m_version;
  133. }
  134. size_t CEnclave::get_dynamic_tcs_list_size()
  135. {
  136. std::vector<std::pair<tcs_t *, bool>> tcs_list = m_loader.get_tcs_list();
  137. size_t count = 0;
  138. for (size_t idx = 0; idx < tcs_list.size(); ++idx)
  139. {
  140. if(tcs_list[idx].second == true)
  141. {
  142. count++;
  143. }
  144. }
  145. return count;
  146. }
  147. sgx_status_t CEnclave::error_trts2urts(unsigned int trts_error)
  148. {
  149. if(trts_error == (unsigned int)SE_ERROR_READ_LOCK_FAIL)
  150. {
  151. return SGX_ERROR_ENCLAVE_LOST;
  152. }
  153. //tRTS may directly return the external error code, so we don't need transfer it.
  154. if(EXTERNAL_ERROR != (trts_error >> MAIN_MOD_SHIFT))
  155. {
  156. SE_TRACE(SE_TRACE_WARNING, "trts return error %x, it should be urts/trts bug\n", trts_error);
  157. return SGX_ERROR_UNEXPECTED;
  158. }
  159. return (sgx_status_t)trts_error;
  160. }
  161. sgx_status_t CEnclave::ecall(const int proc, const void *ocall_table, void *ms)
  162. {
  163. if(se_try_rdlock(&m_rwlock))
  164. {
  165. //Maybe the enclave has been destroyed after acquire/release m_rwlock. See CEnclave::destroy()
  166. if(m_destroyed)
  167. {
  168. se_rdunlock(&m_rwlock);
  169. return SGX_ERROR_ENCLAVE_LOST;
  170. }
  171. //do sgx_ecall
  172. CTrustThread *trust_thread = get_tcs(proc);
  173. unsigned ret = SGX_ERROR_OUT_OF_TCS;
  174. if(NULL != trust_thread)
  175. {
  176. if (NULL == m_ocall_table)
  177. {
  178. m_ocall_table = (sgx_ocall_table_t *)ocall_table;
  179. }
  180. if (proc == ECMD_UNINIT_ENCLAVE)
  181. {
  182. if(m_pthread_is_valid == true)
  183. {
  184. m_pthread_is_valid = false;
  185. se_event_wake(m_new_thread_event);
  186. pthread_join(m_pthread_tid, NULL);
  187. }
  188. ocall_table = m_ocall_table;
  189. std::vector<CTrustThread *> threads = m_thread_pool->get_thread_list();
  190. for (unsigned idx = 0; idx < threads.size(); ++idx)
  191. {
  192. if (trust_thread->get_tcs() == threads[idx]->get_tcs())
  193. {
  194. continue;
  195. }
  196. uint64_t start = (uint64_t)(threads[idx]->get_tcs());
  197. uint64_t end = start + (1 << SE_PAGE_SHIFT);
  198. if (get_enclave_creator()->is_EDMM_supported(CEnclave::get_enclave_id()))
  199. {
  200. if (SGX_SUCCESS != (ret = get_enclave_creator()->trim_range(start, end)))
  201. {
  202. se_rdunlock(&m_rwlock);
  203. return (sgx_status_t)ret;
  204. }
  205. }
  206. }
  207. }
  208. ret = do_ecall(proc, ocall_table, ms, trust_thread);
  209. }
  210. put_tcs(trust_thread);
  211. //release the read/write lock, the only exception is enclave already be removed in ocall
  212. if(AbnormalTermination() || ret != SE_ERROR_READ_LOCK_FAIL)
  213. {
  214. se_rdunlock(&m_rwlock);
  215. }
  216. return error_trts2urts(ret);
  217. }
  218. else
  219. {
  220. return SGX_ERROR_ENCLAVE_LOST;
  221. }
  222. }
  223. int CEnclave::ocall(const unsigned int proc, const sgx_ocall_table_t *ocall_table, void *ms)
  224. {
  225. int error = SGX_ERROR_UNEXPECTED;
  226. if ((int)proc == EDMM_TRIM || (int)proc == EDMM_TRIM_COMMIT || (int)proc == EDMM_MODPR)
  227. {
  228. se_rdunlock(&m_rwlock);
  229. if((int)proc == EDMM_TRIM)
  230. error = ocall_trim_range(ms);
  231. else if ((int)proc == EDMM_TRIM_COMMIT)
  232. error = ocall_trim_accept(ms);
  233. else if ((int)proc == EDMM_MODPR)
  234. error = ocall_emodpr(ms);
  235. }
  236. else
  237. {
  238. //validate the proc is within ocall_table;
  239. if(NULL == ocall_table ||
  240. (proc >= ocall_table->count))
  241. {
  242. return SGX_ERROR_INVALID_FUNCTION;
  243. }
  244. se_rdunlock(&m_rwlock);
  245. bridge_fn_t bridge = reinterpret_cast<bridge_fn_t>(ocall_table->ocall[proc]);
  246. error = do_ocall(bridge, ms);
  247. }
  248. if (!se_try_rdlock(&m_rwlock))
  249. {
  250. //Probablly the enclave has been destroyed, so we can't get the read lock.
  251. error = SE_ERROR_READ_LOCK_FAIL;
  252. }
  253. //We have m_destroyed to determinate if the enclave has been destroyed.
  254. else if(m_destroyed)
  255. {
  256. //Enclave has been destroyed, emulate that we fail to get read lock.
  257. se_rdunlock(&m_rwlock);
  258. error = SE_ERROR_READ_LOCK_FAIL;
  259. }
  260. return error;
  261. }
  262. const debug_enclave_info_t* CEnclave::get_debug_info()
  263. {
  264. return &m_enclave_info;
  265. }
  266. CTrustThread * CEnclave::get_tcs(int ecall_cmd)
  267. {
  268. CTrustThread *trust_thread = m_thread_pool->acquire_thread(ecall_cmd);
  269. return trust_thread;
  270. }
  271. void *fill_tcs_mini_pool_func(void *args)
  272. {
  273. CEnclave *it = (CEnclave*)(args);
  274. if(it != NULL)
  275. {
  276. it->fill_tcs_mini_pool();
  277. }
  278. return NULL;
  279. }
  280. sgx_status_t CEnclave::fill_tcs_mini_pool_fn()
  281. {
  282. pthread_t tid;
  283. if(m_pthread_is_valid == false)
  284. {
  285. m_pthread_is_valid = true;
  286. int ret = pthread_create(&tid, NULL, fill_tcs_mini_pool_func, (void *)(this));
  287. if(ret != 0)
  288. {
  289. m_pthread_is_valid = false;
  290. return SGX_ERROR_UNEXPECTED;
  291. }
  292. m_pthread_tid = tid;
  293. }
  294. else if(m_pthread_is_valid == true)
  295. {
  296. if(se_event_wake(m_new_thread_event) != SE_MUTEX_SUCCESS)
  297. {
  298. return SGX_ERROR_UNEXPECTED;
  299. }
  300. }
  301. return SGX_SUCCESS;
  302. }
  303. sgx_status_t CEnclave::fill_tcs_mini_pool()
  304. {
  305. do
  306. {
  307. if(se_try_rdlock(&m_rwlock))
  308. {
  309. //Maybe the enclave has been destroyed after acquire/release m_rwlock. See CEnclave::destroy()
  310. if(m_destroyed)
  311. {
  312. se_rdunlock(&m_rwlock);
  313. return SGX_ERROR_ENCLAVE_LOST;
  314. }
  315. if(m_pthread_is_valid == false)
  316. {
  317. se_rdunlock(&m_rwlock);
  318. return SGX_SUCCESS;
  319. }
  320. m_thread_pool->fill_tcs_mini_pool();
  321. se_rdunlock(&m_rwlock);
  322. }
  323. else
  324. {
  325. return SGX_ERROR_ENCLAVE_LOST;
  326. }
  327. }while(se_event_wait(m_new_thread_event) == SE_MUTEX_SUCCESS);
  328. return SGX_ERROR_UNEXPECTED;
  329. }
  330. void CEnclave::put_tcs(CTrustThread *trust_thread)
  331. {
  332. if(NULL == trust_thread)
  333. {
  334. return;
  335. }
  336. m_thread_pool->release_thread(trust_thread);
  337. }
  338. void CEnclave::destroy()
  339. {
  340. se_wtlock(&m_rwlock);
  341. get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL, m_size);
  342. m_destroyed = true;
  343. //We are going to destory m_rwlock. At this point, maybe an ecall is in progress, and try to get m_rwlock.
  344. //To prevent such ecall, we use m_destroyed to identify that the no ecall should going on. See CEnclave::ecall(...).
  345. //For new ecall to the enclave, it will return with SGX_ERROR_INVALID_ENCLAVE_ID immediately.
  346. se_wtunlock(&m_rwlock);
  347. // We should not use loader to destroy encalve because loader has been removed after successful enclave loading
  348. //m_loader.destroy_enclave();
  349. }
  350. void CEnclave::add_thread(tcs_t * const tcs, bool is_unallocated)
  351. {
  352. CTrustThread *trust_thread = m_thread_pool->add_thread(tcs, this, is_unallocated);
  353. if(!is_unallocated)
  354. {
  355. insert_debug_tcs_info_head(&m_enclave_info, trust_thread->get_debug_info());
  356. }
  357. }
  358. void CEnclave::add_thread(CTrustThread * const trust_thread)
  359. {
  360. insert_debug_tcs_info_head(&m_enclave_info, trust_thread->get_debug_info());
  361. }
  362. int CEnclave::set_extra_debug_info(secs_t& secs)
  363. {
  364. void *g_peak_heap_used_addr = get_symbol_address("g_peak_heap_used");
  365. m_enclave_info.g_peak_heap_used_addr = g_peak_heap_used_addr;
  366. m_enclave_info.start_addr = secs.base;
  367. m_enclave_info.misc_select = secs.misc_select;
  368. if(g_peak_heap_used_addr == NULL)
  369. {
  370. SE_TRACE(SE_TRACE_DEBUG, "Symbol 'g_peak_heap_used' is not found\n");
  371. //This error should not break loader and debugger, so the upper layer function will ignore it.
  372. return SGX_ERROR_INVALID_ENCLAVE;
  373. }
  374. return SGX_SUCCESS;
  375. }
  376. void CEnclave::push_ocall_frame(ocall_frame_t* frame_point, CTrustThread *trust_thread)
  377. {
  378. if(NULL == trust_thread)
  379. {
  380. return;
  381. }
  382. trust_thread->push_ocall_frame(frame_point);
  383. }
  384. void CEnclave::pop_ocall_frame(CTrustThread *trust_thread)
  385. {
  386. if(NULL == trust_thread)
  387. {
  388. return;
  389. }
  390. trust_thread->pop_ocall_frame();
  391. }
  392. CEnclavePool CEnclavePool::m_instance;
  393. CEnclavePool::CEnclavePool()
  394. {
  395. m_enclave_list = NULL;
  396. se_mutex_init(&m_enclave_mutex);
  397. SE_TRACE(SE_TRACE_NOTICE, "enter CEnclavePool constructor\n");
  398. }
  399. CEnclavePool *CEnclavePool::instance()
  400. {
  401. return &m_instance;
  402. }
  403. int CEnclavePool::add_enclave(CEnclave *enclave)
  404. {
  405. int result = TRUE;
  406. se_mutex_lock(&m_enclave_mutex);
  407. if (m_enclave_list == NULL) {
  408. m_enclave_list = new Node<sgx_enclave_id_t, CEnclave*>(enclave->get_enclave_id(), enclave);
  409. } else {
  410. Node<sgx_enclave_id_t, CEnclave*>* node = new Node<sgx_enclave_id_t, CEnclave*>(enclave->get_enclave_id(), enclave);
  411. if (m_enclave_list->InsertNext(node) == false) {
  412. delete node;
  413. SE_TRACE(SE_TRACE_WARNING, "the encalve %llx has already been added\n", enclave->get_enclave_id());
  414. result = FALSE;
  415. }
  416. }
  417. se_mutex_unlock(&m_enclave_mutex);
  418. return result;
  419. }
  420. CEnclave * CEnclavePool::get_enclave(const sgx_enclave_id_t enclave_id)
  421. {
  422. se_mutex_lock(&m_enclave_mutex);
  423. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list->Find(enclave_id);
  424. if(it != NULL)
  425. {
  426. se_mutex_unlock(&m_enclave_mutex);
  427. return it->value;
  428. }
  429. else
  430. {
  431. se_mutex_unlock(&m_enclave_mutex);
  432. return NULL;
  433. }
  434. }
  435. CEnclave * CEnclavePool::ref_enclave(const sgx_enclave_id_t enclave_id)
  436. {
  437. se_mutex_lock(&m_enclave_mutex);
  438. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list->Find(enclave_id);
  439. if(it != NULL)
  440. {
  441. it->value->atomic_inc_ref();
  442. se_mutex_unlock(&m_enclave_mutex);
  443. return it->value;
  444. }
  445. else
  446. {
  447. se_mutex_unlock(&m_enclave_mutex);
  448. return NULL;
  449. }
  450. }
  451. void CEnclavePool::unref_enclave(CEnclave *enclave)
  452. {
  453. //We use enclave pool lock to protect data, the lock is big, but is more secure.
  454. se_mutex_lock(&m_enclave_mutex);
  455. //The ref is increased in ref_enclave;
  456. uint32_t ref = enclave->atomic_dec_ref();
  457. //If the enclave is in zombie state, the HW enclave must have been destroyed.
  458. //And if the enclave is not referenced, the enclave instance will not be referenced any more,
  459. //so we delete the instance.
  460. //Another code path that delete enclave instance is in function "CEnclavePool::remove_enclave"
  461. if(enclave->is_zombie() && !ref)
  462. delete enclave;
  463. se_mutex_unlock(&m_enclave_mutex);
  464. }
  465. se_handle_t CEnclavePool::get_event(const void * const tcs)
  466. {
  467. se_handle_t hevent = NULL;
  468. CEnclave *enclave = NULL;
  469. assert(tcs != NULL);
  470. se_mutex_lock(&m_enclave_mutex);
  471. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list;
  472. for(; it != NULL; it = it->next)
  473. {
  474. void *start = it->value->get_start_address();
  475. void *end = GET_PTR(void, start, it->value->get_size());
  476. /* check start & end */
  477. if (tcs >= start && tcs < end) {
  478. enclave = it->value;
  479. break;
  480. }
  481. }
  482. if (NULL != enclave)
  483. {
  484. CTrustThreadPool *pool = enclave->get_thread_pool();
  485. if (pool != NULL)
  486. {
  487. CTrustThread *thread = pool->get_bound_thread((const tcs_t *)tcs);
  488. if (thread != NULL)
  489. hevent = thread->get_event();
  490. }
  491. }
  492. se_mutex_unlock(&m_enclave_mutex);
  493. return hevent;
  494. }
  495. CEnclave* CEnclavePool::remove_enclave(const sgx_enclave_id_t enclave_id, sgx_status_t &status)
  496. {
  497. status = SGX_SUCCESS;
  498. se_mutex_lock(&m_enclave_mutex);
  499. CEnclave *enclave = get_enclave(enclave_id);
  500. if(NULL == enclave)
  501. {
  502. status = SGX_ERROR_INVALID_ENCLAVE_ID;
  503. SE_TRACE(SE_TRACE_WARNING, "remove an unknown enclave\n");
  504. se_mutex_unlock(&m_enclave_mutex);
  505. return enclave;
  506. }
  507. enclave->destroy();
  508. //the ref is not 0, maybe some thread is in sgx_ocall, so we can NOT delete enclave instance.
  509. if(enclave->get_ref())
  510. {
  511. enclave->mark_zombie();
  512. /* When destroy the enclave, all threads that are waiting/about to wait
  513. * on untrusted event need to be waked. Otherwise, they will be always
  514. * pending on the untrusted events, and app need to manually kill the threads.
  515. */
  516. CTrustThreadPool *pool = enclave->get_thread_pool();
  517. pool->wake_threads();
  518. enclave = NULL;
  519. }
  520. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list->Remove(enclave_id);
  521. if (it == m_enclave_list)
  522. m_enclave_list = it->next;
  523. delete it;
  524. se_mutex_unlock(&m_enclave_mutex);
  525. return enclave;
  526. }
  527. void CEnclavePool::notify_debugger()
  528. {
  529. se_mutex_lock(&m_enclave_mutex);
  530. if(m_enclave_list!= NULL)
  531. {
  532. Node<sgx_enclave_id_t, CEnclave*>* it = m_enclave_list;
  533. for(; it != NULL; it = it->next)
  534. {
  535. //send debug event to debugger when enclave is debug mode or release mode
  536. debug_enclave_info_t * debug_info = const_cast<debug_enclave_info_t*>((it->value)->get_debug_info());
  537. generate_enclave_debug_event(URTS_EXCEPTION_PREREMOVEENCLAVE, debug_info);
  538. }
  539. }
  540. se_mutex_unlock(&m_enclave_mutex);
  541. }
  542. bool CEnclave::update_trust_thread_debug_flag(void* tcs_address, uint8_t debug_flag)
  543. {
  544. uint64_t debug_flag2 = (uint64_t)debug_flag;
  545. debug_enclave_info_t *debug_info = NULL;
  546. debug_info = const_cast<debug_enclave_info_t *>(get_debug_info());
  547. pid_t pid = getpid();
  548. if(debug_info->enclave_type == ET_DEBUG)
  549. {
  550. if(!se_write_process_mem(pid, reinterpret_cast<unsigned char *>(tcs_address) + sizeof(uint64_t), &debug_flag2, sizeof(uint64_t), NULL))
  551. return FALSE;
  552. }
  553. return TRUE;
  554. }
  555. bool CEnclave::update_debug_flag(uint8_t debug_flag)
  556. {
  557. debug_tcs_info_t* tcs_list_entry = m_enclave_info.tcs_list;
  558. while(tcs_list_entry)
  559. {
  560. if(!update_trust_thread_debug_flag(tcs_list_entry->TCS_address, debug_flag))
  561. return FALSE;
  562. tcs_list_entry = tcs_list_entry->next_tcs_info;
  563. }
  564. return TRUE;
  565. }