aesm_long_lived_thread.cpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "aesm_long_lived_thread.h"
  32. #include "pve_logic.h"
  33. #include "pse_op_logic.h"
  34. #include "platform_info_logic.h"
  35. #include "oal/internal_log.h"
  36. #include "se_time.h"
  37. #include "se_wrapper.h"
  38. #include <time.h>
  39. #include <assert.h>
  40. #include <list>
  41. #include "LEClass.h"
  42. enum _thread_state
  43. {
  44. ths_idle,
  45. ths_busy,
  46. ths_stop//The thread is to be stopped and no new job will be accepted
  47. };
  48. enum _io_cache_state
  49. {
  50. ioc_idle, //thread has been finished
  51. ioc_busy, //thread not finished yet
  52. ioc_stop //thread stop required
  53. };
  54. #define MAX_OUTPUT_CACHE 50
  55. #define THREAD_INFINITE_TICK_COUNT 0xFFFFFFFFFFFFFFFFLL
  56. class ThreadStatus;
  57. class BaseThreadIOCache;
  58. typedef ae_error_t (*long_lived_thread_func_t)(BaseThreadIOCache *cache);
  59. //Base class for cached data of each thread to fork
  60. class BaseThreadIOCache:private Uncopyable{
  61. time_t timeout; //The data will timeout after the time if the state is not busy
  62. int ref_count; //ref_count is used to track how many threads are currently referencing the data
  63. _io_cache_state status;
  64. //handle of the thread, some thread will be waited by other threads so that we could not
  65. // free the handle until all other threads have got notification that the thread is terminated
  66. aesm_thread_t thread_handle;
  67. friend class ThreadStatus;
  68. protected:
  69. ae_error_t ae_ret;
  70. BaseThreadIOCache():ref_count(0),status(ioc_busy){
  71. timeout=0;
  72. thread_handle=NULL;
  73. ae_ret = AE_FAILURE;
  74. }
  75. virtual ThreadStatus& get_thread()=0;
  76. public:
  77. virtual ae_error_t entry(void)=0;
  78. virtual bool operator==(const BaseThreadIOCache& oc)const=0;
  79. ae_error_t start(BaseThreadIOCache *&out_ioc, uint32_t timeout=THREAD_TIMEOUT);
  80. void deref(void);
  81. void set_status_finish();
  82. public:
  83. virtual ~BaseThreadIOCache(){}
  84. };
  85. class ThreadStatus: private Uncopyable
  86. {
  87. private:
  88. AESMLogicMutex thread_mutex;
  89. _thread_state thread_state;
  90. uint64_t status_clock;
  91. BaseThreadIOCache *cur_iocache;
  92. std::list<BaseThreadIOCache *>output_cache;
  93. protected:
  94. friend class BaseThreadIOCache;
  95. //function to look up cached output, there will be no real thread associated with the input ioc
  96. //If a match is found the input parameter will be free automatically and the matched value is returned
  97. //return true if a thread will be forked for the out_ioc
  98. bool find_or_insert_iocache(BaseThreadIOCache* ioc, BaseThreadIOCache *&out_ioc)
  99. {
  100. AESMLogicLock locker(thread_mutex);
  101. std::list<BaseThreadIOCache *>::reverse_iterator it;
  102. out_ioc=NULL;
  103. if(thread_state == ths_stop){
  104. AESM_DBG_TRACE("thread %p has been stopped and ioc %p not inserted", this,ioc);
  105. delete ioc;
  106. return false;//never visit any item after thread is stopped
  107. }
  108. time_t cur=time(NULL);
  109. AESM_DBG_TRACE("cache size %d",(int)output_cache.size());
  110. BaseThreadIOCache *remove_candidate = NULL;
  111. for(it=output_cache.rbegin();it!=output_cache.rend();++it){//visit the cache in reverse order so that the newest item will be visited firstly
  112. BaseThreadIOCache *pioc=*it;
  113. if((pioc->status==ioc_idle)&&(pioc->timeout<cur)){
  114. if(pioc->ref_count==0&&remove_candidate==NULL){
  115. remove_candidate = pioc;
  116. }
  117. continue;//value timeout
  118. }
  119. if(*pioc==*ioc){//matched value find
  120. pioc->ref_count++;//reference it
  121. AESM_DBG_TRACE("IOC %p matching input IOC %p (ref_count:%d,status:%d,timeout:%d) in thread %p",pioc, ioc,(int)pioc->ref_count,(int)pioc->status, (int)pioc->timeout, this);
  122. out_ioc= pioc;
  123. delete ioc;
  124. return false;
  125. }
  126. }
  127. if(thread_state == ths_busy){//It is not permitted to insert in busy status
  128. AESM_DBG_TRACE("thread busy when trying insert input ioc %p",ioc);
  129. delete ioc;
  130. return false;
  131. }
  132. if(remove_candidate!=NULL){
  133. output_cache.remove(remove_candidate);
  134. delete remove_candidate;
  135. }
  136. if(output_cache.size()>=MAX_OUTPUT_CACHE){
  137. std::list<BaseThreadIOCache *>::iterator fit;
  138. bool erased=false;
  139. for(fit = output_cache.begin(); fit!=output_cache.end();++fit){
  140. BaseThreadIOCache *pioc=*fit;
  141. if(pioc->ref_count==0){//find a not timeout item to remove
  142. assert(pioc->status==ioc_idle);
  143. AESM_DBG_TRACE("erase idle ioc %p", pioc);
  144. output_cache.erase(fit);
  145. erased = true;
  146. AESM_DBG_TRACE("thread %p cache size %d",this, output_cache.size());
  147. delete pioc;
  148. break;
  149. }
  150. }
  151. if(!erased){//no item could be removed
  152. AESM_DBG_TRACE("no free ioc found and cannot insert ioc %p",ioc);
  153. delete ioc;
  154. return false;//similar as busy status
  155. }
  156. }
  157. output_cache.push_back(ioc);
  158. out_ioc = cur_iocache = ioc;
  159. cur_iocache->ref_count=2;//initialize to be refenced by parent thread and the thread itself
  160. thread_state = ths_busy;//mark thread to be busy that the thread to be started
  161. AESM_DBG_TRACE("successfully add ioc %p (status=%d,timeout=%d) into thread %p",out_ioc, (int)out_ioc->status, (int)out_ioc->timeout, this);
  162. return true;
  163. }
  164. public:
  165. ThreadStatus():output_cache()
  166. {
  167. thread_state = ths_idle;
  168. status_clock = 0;
  169. cur_iocache = NULL;
  170. }
  171. void set_status_finish(BaseThreadIOCache* ioc);//only called at the end of aesm_long_lived_thread_entry
  172. void deref(BaseThreadIOCache* iocache);
  173. ae_error_t wait_iocache_timeout(BaseThreadIOCache* ioc, uint64_t stop_tick_count);
  174. //create thread and wait at most 'timeout' for the thread to be finished
  175. // It will first look up whether there is a previous run with same input before starting the thread
  176. // we should not delete ioc after calling to this function
  177. ae_error_t set_thread_start(BaseThreadIOCache* ioc, BaseThreadIOCache *&out_ioc, uint32_t timeout=THREAD_TIMEOUT);
  178. void stop_thread(uint64_t stop_milli_second);//We need wait for thread to be terminated and all thread_handle in list to be closed
  179. ~ThreadStatus(){stop_thread(THREAD_INFINITE_TICK_COUNT);}//ThreadStatus instance should be global object. Otherwise, it is possible that the object is destroyed before a thread waiting for and IOCache got notified and causing exception
  180. ae_error_t wait_for_cur_thread(uint64_t millisecond);
  181. //function to query whether current thread is idle,
  182. //if it is idle, return true and reset clock to current clock value
  183. bool query_status_and_reset_clock(void);
  184. };
  185. ae_error_t BaseThreadIOCache::start(BaseThreadIOCache *&out_ioc, uint32_t timeout_value)
  186. {
  187. return get_thread().set_thread_start(this, out_ioc, timeout_value);
  188. }
  189. void BaseThreadIOCache::deref(void)
  190. {
  191. get_thread().deref(this);
  192. }
  193. void BaseThreadIOCache::set_status_finish(void)
  194. {
  195. get_thread().set_status_finish(this);
  196. }
  197. //This is thread entry wrapper for all threads
  198. static ae_error_t aesm_long_lived_thread_entry(aesm_thread_arg_type_t arg)
  199. {
  200. BaseThreadIOCache *cache=(BaseThreadIOCache *)arg;
  201. ae_error_t ae_err = cache->entry();
  202. cache->set_status_finish();
  203. return ae_err;
  204. }
  205. void ThreadStatus::stop_thread(uint64_t stop_tick_count)
  206. {
  207. //change state to stop
  208. thread_mutex.lock();
  209. thread_state = ths_stop;
  210. do{
  211. std::list<BaseThreadIOCache *>::iterator it;
  212. for(it=output_cache.begin(); it!=output_cache.end();++it){
  213. BaseThreadIOCache *p=*it;
  214. if(p->status != ioc_stop){//It has not been processed
  215. p->status = ioc_stop;
  216. break;
  217. }
  218. }
  219. if(it!=output_cache.end()){//found item to stop
  220. BaseThreadIOCache *p=*it;
  221. p->ref_count++;
  222. thread_mutex.unlock();
  223. wait_iocache_timeout(p, stop_tick_count);
  224. thread_mutex.lock();
  225. }else{
  226. break;
  227. }
  228. }while(1);
  229. thread_mutex.unlock();
  230. //This function should only be called at AESM exit
  231. //Leave memory leak here is OK and all pointer to BaseThreadIOCache will not be released
  232. }
  233. ae_error_t ThreadStatus::wait_for_cur_thread(uint64_t millisecond)
  234. {
  235. BaseThreadIOCache *ioc=NULL;
  236. uint64_t stop_tick_count;
  237. if(millisecond == AESM_THREAD_INFINITE){
  238. stop_tick_count = THREAD_INFINITE_TICK_COUNT;
  239. }else{
  240. stop_tick_count = se_get_tick_count() + (millisecond*se_get_tick_count_freq()+500)/1000;
  241. }
  242. thread_mutex.lock();
  243. if(cur_iocache!=NULL){
  244. ioc = cur_iocache;
  245. ioc->ref_count++;
  246. }
  247. thread_mutex.unlock();
  248. if(ioc!=NULL){
  249. return wait_iocache_timeout(ioc, stop_tick_count);
  250. }
  251. return AE_SUCCESS;
  252. }
  253. ae_error_t ThreadStatus::wait_iocache_timeout(BaseThreadIOCache* ioc, uint64_t stop_tick_count)
  254. {
  255. ae_error_t ae_ret=AE_SUCCESS;
  256. uint64_t cur_tick_count = se_get_tick_count();
  257. uint64_t freq = se_get_tick_count_freq();
  258. bool need_wait=false;
  259. aesm_thread_t handle=NULL;
  260. thread_mutex.lock();
  261. if(ioc->thread_handle!=NULL&&(cur_tick_count<stop_tick_count||stop_tick_count==THREAD_INFINITE_TICK_COUNT)){
  262. AESM_DBG_TRACE("wait for busy ioc %p(refcount=%d)",ioc,ioc->ref_count);
  263. need_wait = true;
  264. handle = ioc->thread_handle;
  265. }
  266. thread_mutex.unlock();
  267. if(need_wait){
  268. unsigned long diff_time;
  269. if(stop_tick_count == THREAD_INFINITE_TICK_COUNT){
  270. diff_time = AESM_THREAD_INFINITE;
  271. }else{
  272. double wtime=(double)(stop_tick_count-cur_tick_count)*1000.0/(double)freq;
  273. diff_time = (unsigned long)(wtime+0.5);
  274. }
  275. ae_ret= aesm_wait_thread(handle, &ae_ret, diff_time);
  276. }
  277. deref(ioc);
  278. return ae_ret;
  279. }
  280. void ThreadStatus::deref(BaseThreadIOCache *ioc)
  281. {
  282. aesm_thread_t handle = NULL;
  283. time_t cur=time(NULL);
  284. {
  285. AESMLogicLock locker(thread_mutex);
  286. AESM_DBG_TRACE("deref ioc %p (ref_count=%d,status=%d,timeout=%d) of thread %p",ioc,(int)ioc->ref_count,(int)ioc->status,(int)ioc->timeout, this);
  287. --ioc->ref_count;
  288. if(ioc->ref_count == 0){//try free the thread handle now
  289. handle = ioc->thread_handle;
  290. ioc->thread_handle = NULL;
  291. if(ioc->status == ioc_busy){
  292. ioc->status = ioc_idle;
  293. }
  294. AESM_DBG_TRACE("free thread handle for ioc %p",ioc);
  295. }
  296. if(ioc->ref_count==0 &&(ioc->status==ioc_stop||ioc->timeout<cur)){
  297. AESM_DBG_TRACE("free ioc %p",ioc);
  298. output_cache.remove(ioc);
  299. AESM_DBG_TRACE("thread %p cache's size is %d",this, (int)output_cache.size());
  300. delete ioc;
  301. }
  302. }
  303. if(handle!=NULL){
  304. aesm_free_thread(handle);
  305. }
  306. }
  307. ae_error_t ThreadStatus::set_thread_start(BaseThreadIOCache* ioc, BaseThreadIOCache *&out_ioc, uint32_t timeout)
  308. {
  309. ae_error_t ae_ret = AE_SUCCESS;
  310. ae_error_t ret = AE_FAILURE;
  311. out_ioc=NULL;
  312. bool fork_required = find_or_insert_iocache(ioc, out_ioc);
  313. if(fork_required){
  314. ae_ret = aesm_create_thread(aesm_long_lived_thread_entry, (aesm_thread_arg_type_t)out_ioc, &out_ioc->thread_handle);
  315. if (ae_ret != AE_SUCCESS)
  316. {
  317. AESM_DBG_TRACE("fail to create thread for ioc %p",out_ioc);
  318. AESMLogicLock locker(thread_mutex);
  319. thread_state = ths_idle;
  320. out_ioc->status = ioc_idle;//set to finished status
  321. cur_iocache = NULL;
  322. deref(out_ioc);
  323. return ae_ret;
  324. }else{
  325. AESM_DBG_TRACE("succ create thread %p for ioc %p",this, out_ioc);
  326. }
  327. }
  328. if(out_ioc == NULL){
  329. AESM_DBG_TRACE("no ioc created for input ioc %p in thread %p",ioc, this);
  330. return OAL_THREAD_TIMEOUT_ERROR;
  331. }
  332. {//check whether thread has been finished
  333. AESMLogicLock locker(thread_mutex);
  334. if(out_ioc->status!=ioc_busy){//job is done
  335. AESM_DBG_TRACE("job done for ioc %p (status=%d,timeout=%d,ref_count=%d) in thread %p",out_ioc, (int)out_ioc->status,(int)out_ioc->timeout,(int)out_ioc->ref_count,this);
  336. return AE_SUCCESS;
  337. }
  338. }
  339. if(timeout >= AESM_THREAD_INFINITE ){
  340. ae_ret = aesm_join_thread(out_ioc->thread_handle, &ret);
  341. }else{
  342. uint64_t now = se_get_tick_count();
  343. double timediff = static_cast<double>(timeout) - (static_cast<double>(now - status_clock))/static_cast<double>(se_get_tick_count_freq()) *1000;
  344. if (timediff <= 0.0) {
  345. AESM_DBG_ERROR("long flow thread timeout");
  346. return OAL_THREAD_TIMEOUT_ERROR;
  347. }
  348. else{
  349. AESM_DBG_TRACE("timeout:%u,timediff: %f", timeout,timediff);
  350. ae_ret = aesm_wait_thread(out_ioc->thread_handle, &ret, (unsigned long)timediff);
  351. }
  352. }
  353. AESM_DBG_TRACE("wait for ioc %p (status=%d,timeout=%d,ref_count=%d) result:%d",out_ioc,(int)out_ioc->status,(int)out_ioc->timeout,(int)out_ioc->ref_count, ae_ret);
  354. return ae_ret;
  355. };
  356. #define TIMEOUT_SHORT_TIME 60
  357. #define TIMEOUT_FOR_A_WHILE (5*60)
  358. #define TIMEOUT_LONG_TIME (3600*24) //at most once every day
  359. static time_t get_timeout_via_ae_error(ae_error_t ae)
  360. {
  361. time_t cur=time(NULL);
  362. switch(ae){
  363. case AE_SUCCESS:
  364. case OAL_PROXY_SETTING_ASSIST:
  365. case OAL_NETWORK_RESEND_REQUIRED:
  366. return cur-1;//always timeout, the error code will never be reused
  367. case PVE_INTEGRITY_CHECK_ERROR:
  368. case PSE_OP_ERROR_EPH_SESSION_ESTABLISHMENT_INTEGRITY_ERROR:
  369. case AESM_PSDA_LT_SESSION_INTEGRITY_ERROR:
  370. case OAL_NETWORK_UNAVAILABLE_ERROR:
  371. case OAL_NETWORK_BUSY:
  372. case PVE_SERVER_BUSY_ERROR:
  373. return cur+TIMEOUT_SHORT_TIME; //retry after short time
  374. case QE_REVOKED_ERROR:
  375. case PVE_REVOKED_ERROR:
  376. case PVE_MSG_ERROR:
  377. case PVE_PERFORMANCE_REKEY_NOT_SUPPORTED:
  378. case AESM_PSDA_PLATFORM_KEYS_REVOKED:
  379. case AESM_PSDA_PROTOCOL_NOT_SUPPORTED:
  380. case PSW_UPDATE_REQUIRED:
  381. return cur+TIMEOUT_LONG_TIME;
  382. default:
  383. return cur+TIMEOUT_SHORT_TIME;//retry quicky for unknown error
  384. }
  385. }
  386. void ThreadStatus::set_status_finish(BaseThreadIOCache* ioc)
  387. {
  388. aesm_thread_t handle = NULL;
  389. {
  390. AESMLogicLock locker(thread_mutex);
  391. assert(thread_state==ths_busy||thread_state==ths_stop);
  392. assert(ioc->status == ioc_busy);
  393. AESM_DBG_TRACE("set finish status for ioc %p(status=%d,timeout=%d,ref_count=%d) of thread %p",ioc, (int)ioc->status,(int)ioc->timeout,(int)ioc->ref_count,this);
  394. if(thread_state==ths_busy){
  395. AESM_DBG_TRACE("set thread %p to idle", this);
  396. thread_state=ths_idle;
  397. cur_iocache = NULL;
  398. }
  399. ioc->status=ioc_idle;
  400. ioc->ref_count--;
  401. ioc->timeout = get_timeout_via_ae_error(ioc->ae_ret);
  402. if(ioc->ref_count==0){//try free thread handle
  403. handle = ioc->thread_handle;
  404. ioc->thread_handle = NULL;
  405. AESM_DBG_TRACE("thread handle release for ioc %p and status to idle of thread %p",ioc, this);
  406. }
  407. }
  408. if(handle!=NULL){
  409. aesm_free_thread(handle);
  410. }
  411. }
  412. bool ThreadStatus::query_status_and_reset_clock(void)
  413. {
  414. AESMLogicLock locker(thread_mutex);
  415. if(thread_state == ths_busy || thread_state == ths_stop)
  416. return false;
  417. status_clock = se_get_tick_count();
  418. return true;
  419. }
  420. //Code above implement logic of threads in the AESM Service
  421. //Code below to define IOCache of each thread
  422. static ThreadStatus epid_thread;
  423. static ThreadStatus long_term_paring_thread;
  424. static ThreadStatus white_list_thread;
  425. class EpidProvIOCache:public BaseThreadIOCache{
  426. bool performance_rekey;//input
  427. protected:
  428. EpidProvIOCache(bool perf_rekey){
  429. this->performance_rekey = perf_rekey;
  430. }
  431. virtual ae_error_t entry(void);
  432. virtual ThreadStatus& get_thread();
  433. friend ae_error_t start_epid_provision_thread(bool performance_rekey, unsigned long timeout);
  434. public:
  435. virtual bool operator==(const BaseThreadIOCache& oc)const{
  436. const EpidProvIOCache *p=dynamic_cast<const EpidProvIOCache *>(&oc);
  437. if(p==NULL)return false;
  438. return performance_rekey==p->performance_rekey;//only compare input
  439. }
  440. };
  441. class WhiteListIOCache :public BaseThreadIOCache{
  442. //no input to be cached for white list pulling
  443. protected:
  444. WhiteListIOCache(void){
  445. }
  446. virtual ae_error_t entry(void);
  447. virtual ThreadStatus& get_thread();
  448. friend ae_error_t start_white_list_thread(unsigned long timeout);
  449. public:
  450. virtual bool operator==(const BaseThreadIOCache& oc)const{
  451. const WhiteListIOCache *p = dynamic_cast<const WhiteListIOCache*>(&oc);
  452. if (p == NULL) return false;
  453. return true;
  454. }
  455. };
  456. class CheckLtpIOCache:public BaseThreadIOCache{
  457. bool is_new_pairing;//extra output
  458. protected:
  459. CheckLtpIOCache(){
  460. is_new_pairing=false;
  461. }
  462. virtual ae_error_t entry();
  463. virtual ThreadStatus& get_thread();
  464. friend ae_error_t start_check_ltp_thread(bool& is_new_pairing, unsigned long timeout);
  465. public:
  466. virtual bool operator==(const BaseThreadIOCache& oc)const{
  467. const CheckLtpIOCache *p=dynamic_cast<const CheckLtpIOCache *>(&oc);
  468. if(p==NULL)return false;
  469. return true;//no input, always equal
  470. }
  471. };
  472. class UpdatePseIOCache:public BaseThreadIOCache{
  473. platform_info_blob_wrapper_t pib;//input
  474. uint32_t attestation_status;//input
  475. protected:
  476. UpdatePseIOCache(const platform_info_blob_wrapper_t& pib_info, uint32_t attst_status){
  477. (void)memcpy_s(&this->pib, sizeof(this->pib), &pib_info, sizeof(pib_info));
  478. attestation_status=attst_status;
  479. }
  480. virtual ae_error_t entry();
  481. virtual ThreadStatus& get_thread();
  482. friend ae_error_t start_update_pse_thread(const platform_info_blob_wrapper_t* update_blob, uint32_t attestation_status, unsigned long timeout);
  483. public:
  484. virtual bool operator==(const BaseThreadIOCache& oc)const{
  485. const UpdatePseIOCache *p=dynamic_cast<const UpdatePseIOCache *>(&oc);
  486. if(p==NULL)return false;
  487. return attestation_status==p->attestation_status&&memcmp(&pib, &p->pib, sizeof(pib))==0;
  488. }
  489. };
  490. class CertProvLtpIOCache:public BaseThreadIOCache{
  491. bool is_new_pairing;//extra output
  492. protected:
  493. CertProvLtpIOCache(){
  494. is_new_pairing = false;
  495. }
  496. virtual ae_error_t entry();
  497. virtual ThreadStatus& get_thread();
  498. friend ae_error_t start_long_term_pairing_thread(bool& is_new_paring, unsigned long timeout);
  499. public:
  500. virtual bool operator==(const BaseThreadIOCache& oc)const{
  501. const CertProvLtpIOCache *p=dynamic_cast<const CertProvLtpIOCache *>(&oc);
  502. if(p==NULL)return false;
  503. return true;
  504. }
  505. };
  506. ThreadStatus& EpidProvIOCache::get_thread()
  507. {
  508. return epid_thread;
  509. }
  510. ThreadStatus& CheckLtpIOCache::get_thread()
  511. {
  512. return long_term_paring_thread;
  513. }
  514. ThreadStatus& UpdatePseIOCache::get_thread()
  515. {
  516. return long_term_paring_thread;
  517. }
  518. ThreadStatus& CertProvLtpIOCache::get_thread()
  519. {
  520. return long_term_paring_thread;
  521. }
  522. ThreadStatus& WhiteListIOCache::get_thread()
  523. {
  524. return white_list_thread;
  525. }
  526. ae_error_t EpidProvIOCache::entry()
  527. {
  528. return ae_ret = PvEAESMLogic::epid_provision_thread_func(performance_rekey);
  529. }
  530. ae_error_t CheckLtpIOCache::entry()
  531. {
  532. return ae_ret = PlatformInfoLogic::check_ltp_thread_func(is_new_pairing);
  533. }
  534. ae_error_t UpdatePseIOCache::entry()
  535. {
  536. return ae_ret = PlatformInfoLogic::update_pse_thread_func(&pib, attestation_status);
  537. }
  538. ae_error_t CertProvLtpIOCache::entry()
  539. {
  540. return ae_ret = PSEOPAESMLogic::certificate_provisioning_and_long_term_pairing_func(is_new_pairing);
  541. }
  542. ae_error_t WhiteListIOCache::entry()
  543. {
  544. return ae_ret = CLEClass::update_white_list_by_url();
  545. }
  546. //start implementation of external functions
  547. #define INIT_THREAD(cache_type, timeout, init_list) \
  548. BaseThreadIOCache *ioc = new cache_type init_list; \
  549. BaseThreadIOCache *out_ioc = NULL; \
  550. ae_error_t ae_ret = AE_FAILURE; \
  551. ae_ret = ioc->start(out_ioc, (uint32_t)(timeout)); \
  552. if(ae_ret != AE_SUCCESS){ \
  553. if(out_ioc!=NULL){out_ioc->deref();}\
  554. return ae_ret; \
  555. }\
  556. assert(out_ioc!=NULL);\
  557. cache_type *pioc = dynamic_cast<cache_type *>(out_ioc);\
  558. assert(pioc!=NULL);
  559. //now the thread has finished it's execution and we could read result without lock
  560. #define COPY_OUTPUT(x) x=pioc->x
  561. #define FINI_THREAD() \
  562. ae_ret = pioc->ae_ret;\
  563. pioc->deref();/*derefence the cache object after usage of it*/ \
  564. return ae_ret;
  565. //usage model
  566. //INIT_THREAD(thread_used, cache_type, timeout, init_list)
  567. // COPY_OUTPUT(is_new_pairing);// copy out output parameter except for return value from pioc object to output parameter, such as
  568. //FINI_THREAD(thread_used)
  569. ae_error_t start_epid_provision_thread(bool performance_rekey, unsigned long timeout)
  570. {
  571. INIT_THREAD(EpidProvIOCache, timeout, (performance_rekey))
  572. FINI_THREAD()
  573. }
  574. ae_error_t start_white_list_thread(unsigned long timeout)
  575. {
  576. INIT_THREAD(WhiteListIOCache, timeout, ())
  577. FINI_THREAD()
  578. }
  579. ae_error_t start_check_ltp_thread(bool& is_new_pairing, unsigned long timeout)
  580. {
  581. INIT_THREAD(CheckLtpIOCache, timeout, ())
  582. COPY_OUTPUT(is_new_pairing);
  583. FINI_THREAD()
  584. }
  585. ae_error_t start_update_pse_thread(const platform_info_blob_wrapper_t* update_blob, uint32_t attestation_status, unsigned long timeout)
  586. {
  587. INIT_THREAD(UpdatePseIOCache, timeout, (*update_blob, attestation_status))
  588. FINI_THREAD()
  589. }
  590. ae_error_t start_long_term_pairing_thread(bool& is_new_pairing, unsigned long timeout)
  591. {
  592. INIT_THREAD(CertProvLtpIOCache, timeout, ())
  593. COPY_OUTPUT(is_new_pairing);
  594. FINI_THREAD()
  595. }
  596. bool query_pve_thread_status(void)
  597. {
  598. return epid_thread.query_status_and_reset_clock();
  599. }
  600. bool query_pse_thread_status(void)
  601. {
  602. return long_term_paring_thread.query_status_and_reset_clock();
  603. }
  604. ae_error_t wait_pve_thread(uint64_t time_out_milliseconds)
  605. {
  606. return epid_thread.wait_for_cur_thread(time_out_milliseconds);
  607. }
  608. void stop_all_long_lived_threads(uint64_t time_out_milliseconds)
  609. {
  610. uint64_t freq = se_get_tick_count_freq();
  611. uint64_t stop_tick_count = se_get_tick_count()+(time_out_milliseconds*freq+500)/1000;
  612. epid_thread.stop_thread(stop_tick_count);
  613. long_term_paring_thread.stop_thread(stop_tick_count);
  614. white_list_thread.stop_thread(stop_tick_count);
  615. }