file_read_write.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "sgx_tprotected_fs_t.h"
  32. #include "protected_fs_file.h"
  33. #include <sgx_trts.h>
  34. size_t protected_fs_file::write(const void* ptr, size_t size, size_t count)
  35. {
  36. if (ptr == NULL || size == 0 || count == 0)
  37. return 0;
  38. int32_t result32 = sgx_thread_mutex_lock(&mutex);
  39. if (result32 != 0)
  40. {
  41. last_error = result32;
  42. file_status = SGX_FILE_STATUS_MEMORY_CORRUPTED;
  43. return 0;
  44. }
  45. size_t data_left_to_write = size * count;
  46. // prevent overlap...
  47. #if defined(_WIN64) || defined(__x86_64__)
  48. if (size > UINT32_MAX || count > UINT32_MAX)
  49. {
  50. last_error = EINVAL;
  51. sgx_thread_mutex_unlock(&mutex);
  52. return 0;
  53. }
  54. #else
  55. if (((uint64_t)((uint64_t)size * (uint64_t)count)) != (uint64_t)data_left_to_write)
  56. {
  57. last_error = EINVAL;
  58. sgx_thread_mutex_unlock(&mutex);
  59. return 0;
  60. }
  61. #endif
  62. if (sgx_is_outside_enclave(ptr, data_left_to_write))
  63. {
  64. last_error = SGX_ERROR_INVALID_PARAMETER;
  65. sgx_thread_mutex_unlock(&mutex);
  66. return 0;
  67. }
  68. if (file_status != SGX_FILE_STATUS_OK)
  69. {
  70. last_error = SGX_ERROR_FILE_BAD_STATUS;
  71. sgx_thread_mutex_unlock(&mutex);
  72. return 0;
  73. }
  74. if (open_mode.append == 0 && open_mode.update == 0 && open_mode.write == 0)
  75. {
  76. last_error = EACCES;
  77. sgx_thread_mutex_unlock(&mutex);
  78. return 0;
  79. }
  80. if (open_mode.append == 1)
  81. offset = encrypted_part_plain.size; // add at the end of the file
  82. const unsigned char* data_to_write = (const unsigned char*)ptr;
  83. // the first block of user data is written in the meta-data encrypted part
  84. if (offset < MD_USER_DATA_SIZE)
  85. {
  86. size_t empty_place_left_in_md = MD_USER_DATA_SIZE - (size_t)offset; // offset is smaller than MD_USER_DATA_SIZE
  87. if (data_left_to_write <= empty_place_left_in_md)
  88. {
  89. memcpy(&encrypted_part_plain.data[offset], data_to_write, data_left_to_write);
  90. offset += data_left_to_write;
  91. data_to_write += data_left_to_write; // not needed, to prevent future errors
  92. data_left_to_write = 0;
  93. }
  94. else
  95. {
  96. memcpy(&encrypted_part_plain.data[offset], data_to_write, empty_place_left_in_md);
  97. offset += empty_place_left_in_md;
  98. data_to_write += empty_place_left_in_md;
  99. data_left_to_write -= empty_place_left_in_md;
  100. }
  101. if (offset > encrypted_part_plain.size)
  102. encrypted_part_plain.size = offset; // file grew, update the new file size
  103. need_writing = true;
  104. }
  105. while (data_left_to_write > 0)
  106. {
  107. file_data_node_t* file_data_node = NULL;
  108. file_data_node = get_data_node(); // return the data node of the current offset, will read it from disk or create new one if needed (and also the mht node if needed)
  109. if (file_data_node == NULL)
  110. break;
  111. size_t offset_in_node = (size_t)((offset - MD_USER_DATA_SIZE) % NODE_SIZE);
  112. size_t empty_place_left_in_node = NODE_SIZE - offset_in_node;
  113. if (data_left_to_write <= empty_place_left_in_node)
  114. { // this will be the last write
  115. memcpy(&file_data_node->plain.data[offset_in_node], data_to_write, data_left_to_write);
  116. offset += data_left_to_write;
  117. data_to_write += data_left_to_write; // not needed, to prevent future errors
  118. data_left_to_write = 0;
  119. }
  120. else
  121. {
  122. memcpy(&file_data_node->plain.data[offset_in_node], data_to_write, empty_place_left_in_node);
  123. offset += empty_place_left_in_node;
  124. data_to_write += empty_place_left_in_node;
  125. data_left_to_write -= empty_place_left_in_node;
  126. }
  127. if (offset > encrypted_part_plain.size)
  128. encrypted_part_plain.size = offset; // file grew, update the new file size
  129. if (file_data_node->need_writing == false)
  130. {
  131. file_data_node->need_writing = true;
  132. file_mht_node_t* file_mht_node = file_data_node->parent;
  133. while (file_mht_node->mht_node_number != 0) // set all the mht parent nodes as 'need writing'
  134. {
  135. file_mht_node->need_writing = true;
  136. file_mht_node = file_mht_node->parent;
  137. }
  138. root_mht.need_writing = true;
  139. need_writing = true;
  140. }
  141. }
  142. sgx_thread_mutex_unlock(&mutex);
  143. size_t ret_count = ((size * count) - data_left_to_write) / size;
  144. return ret_count;
  145. }
  146. size_t protected_fs_file::read(void* ptr, size_t size, size_t count)
  147. {
  148. if (ptr == NULL || size == 0 || count == 0)
  149. return 0;
  150. int32_t result32 = sgx_thread_mutex_lock(&mutex);
  151. if (result32 != 0)
  152. {
  153. last_error = result32;
  154. file_status = SGX_FILE_STATUS_MEMORY_CORRUPTED;
  155. return 0;
  156. }
  157. size_t data_left_to_read = size * count;
  158. // prevent overlap...
  159. #if defined(_WIN64) || defined(__x86_64__)
  160. if (size > UINT32_MAX || count > UINT32_MAX)
  161. {
  162. last_error = EINVAL;
  163. sgx_thread_mutex_unlock(&mutex);
  164. return 0;
  165. }
  166. #else
  167. if (((uint64_t)((uint64_t)size * (uint64_t)count)) != (uint64_t)data_left_to_read)
  168. {
  169. last_error = EINVAL;
  170. sgx_thread_mutex_unlock(&mutex);
  171. return 0;
  172. }
  173. #endif
  174. if (sgx_is_outside_enclave(ptr, data_left_to_read))
  175. {
  176. last_error = EINVAL;
  177. sgx_thread_mutex_unlock(&mutex);
  178. return 0;
  179. }
  180. if (file_status != SGX_FILE_STATUS_OK)
  181. {
  182. last_error = SGX_ERROR_FILE_BAD_STATUS;
  183. sgx_thread_mutex_unlock(&mutex);
  184. return 0;
  185. }
  186. if (open_mode.read == 0 && open_mode.update == 0)
  187. {
  188. last_error = EACCES;
  189. sgx_thread_mutex_unlock(&mutex);
  190. return 0;
  191. }
  192. if (end_of_file == true)
  193. {// not an error
  194. sgx_thread_mutex_unlock(&mutex);
  195. return 0;
  196. }
  197. // this check is not really needed, can go on with the code and it will do nothing until the end, but it's more 'right' to check it here
  198. if (offset == encrypted_part_plain.size)
  199. {
  200. end_of_file = true;
  201. sgx_thread_mutex_unlock(&mutex);
  202. return 0;
  203. }
  204. if (((uint64_t)data_left_to_read) > (uint64_t)(encrypted_part_plain.size - offset)) // the request is bigger than what's left in the file
  205. {
  206. data_left_to_read = (size_t)(encrypted_part_plain.size - offset);
  207. }
  208. size_t data_attempted_to_read = data_left_to_read; // used at the end to return how much we actually read
  209. unsigned char* out_buffer = (unsigned char*)ptr;
  210. // the first block of user data is read from the meta-data encrypted part
  211. if (offset < MD_USER_DATA_SIZE)
  212. {
  213. size_t data_left_in_md = MD_USER_DATA_SIZE - (size_t)offset; // offset is smaller than MD_USER_DATA_SIZE
  214. if (data_left_to_read <= data_left_in_md)
  215. {
  216. memcpy(out_buffer, &encrypted_part_plain.data[offset], data_left_to_read);
  217. offset += data_left_to_read;
  218. out_buffer += data_left_to_read; // not needed, to prevent future errors
  219. data_left_to_read = 0;
  220. }
  221. else
  222. {
  223. memcpy(out_buffer, &encrypted_part_plain.data[offset], data_left_in_md);
  224. offset += data_left_in_md;
  225. out_buffer += data_left_in_md;
  226. data_left_to_read -= data_left_in_md;
  227. }
  228. }
  229. while (data_left_to_read > 0)
  230. {
  231. file_data_node_t* file_data_node = NULL;
  232. file_data_node = get_data_node(); // return the data node of the current offset, will read it from disk if needed (and also the mht node if needed)
  233. if (file_data_node == NULL)
  234. break;
  235. size_t offset_in_node = (offset - MD_USER_DATA_SIZE) % NODE_SIZE;
  236. size_t data_left_in_node = NODE_SIZE - offset_in_node;
  237. if (data_left_to_read <= data_left_in_node)
  238. {
  239. memcpy(out_buffer, &file_data_node->plain.data[offset_in_node], data_left_to_read);
  240. offset += data_left_to_read;
  241. out_buffer += data_left_to_read; // not needed, to prevent future errors
  242. data_left_to_read = 0;
  243. }
  244. else
  245. {
  246. memcpy(out_buffer, &file_data_node->plain.data[offset_in_node], data_left_in_node);
  247. offset += data_left_in_node;
  248. out_buffer += data_left_in_node;
  249. data_left_to_read -= data_left_in_node;
  250. }
  251. }
  252. sgx_thread_mutex_unlock(&mutex);
  253. if (data_left_to_read == 0 &&
  254. data_attempted_to_read != (size * count)) // user wanted to read more and we had to shrink the request
  255. {
  256. assert(offset == encrypted_part_plain.size);
  257. end_of_file = true;
  258. }
  259. size_t ret_count = (data_attempted_to_read - data_left_to_read) / size;
  260. return ret_count;
  261. }
  262. // this is a very 'specific' function, tied to the architecture of the file layout, returning the node numbers according to the offset in the file
  263. void get_node_numbers(uint64_t offset, uint64_t* mht_node_number, uint64_t* data_node_number,
  264. uint64_t* physical_mht_node_number, uint64_t* physical_data_node_number)
  265. {
  266. // node 0 - meta data node
  267. // node 1 - mht
  268. // nodes 2-97 - data (ATTACHED_DATA_NODES_COUNT == 96)
  269. // node 98 - mht
  270. // node 99-195 - data
  271. // etc.
  272. uint64_t _mht_node_number;
  273. uint64_t _data_node_number;
  274. uint64_t _physical_mht_node_number;
  275. uint64_t _physical_data_node_number;
  276. assert(offset >= MD_USER_DATA_SIZE);
  277. _data_node_number = (offset - MD_USER_DATA_SIZE) / NODE_SIZE;
  278. _mht_node_number = _data_node_number / ATTACHED_DATA_NODES_COUNT;
  279. _physical_data_node_number = _data_node_number
  280. + 1 // meta data node
  281. + 1 // mht root
  282. + _mht_node_number; // number of mht nodes in the middle (the root mht mht_node_number is 0)
  283. _physical_mht_node_number = _physical_data_node_number
  284. - _data_node_number % ATTACHED_DATA_NODES_COUNT // now we are at the first data node attached to this mht node
  285. - 1; // and now at the mht node itself!
  286. if (mht_node_number != NULL) *mht_node_number = _mht_node_number;
  287. if (data_node_number != NULL) *data_node_number = _data_node_number;
  288. if (physical_mht_node_number != NULL) *physical_mht_node_number = _physical_mht_node_number;
  289. if (physical_data_node_number != NULL) *physical_data_node_number = _physical_data_node_number;
  290. }
  291. file_data_node_t* protected_fs_file::get_data_node()
  292. {
  293. file_data_node_t* file_data_node = NULL;
  294. if (offset < MD_USER_DATA_SIZE)
  295. {
  296. last_error = SGX_ERROR_UNEXPECTED;
  297. return NULL;
  298. }
  299. if ((offset - MD_USER_DATA_SIZE) % NODE_SIZE == 0 &&
  300. offset == encrypted_part_plain.size)
  301. {// new node
  302. file_data_node = append_data_node();
  303. }
  304. else
  305. {// existing node
  306. file_data_node = read_data_node();
  307. }
  308. // bump all the parents mht to reside before the data node in the cache
  309. if (file_data_node != NULL)
  310. {
  311. file_mht_node_t* file_mht_node = file_data_node->parent;
  312. while (file_mht_node->mht_node_number != 0)
  313. {
  314. cache.get(file_mht_node->physical_node_number); // bump the mht node to the head of the lru
  315. file_mht_node = file_mht_node->parent;
  316. }
  317. }
  318. // even if we didn't get the required data_node, we might have read other nodes in the process
  319. while (cache.size() > MAX_PAGES_IN_CACHE)
  320. {
  321. void* data = cache.get_last();
  322. assert(data != NULL);
  323. // for production -
  324. if (data == NULL)
  325. {
  326. last_error = SGX_ERROR_UNEXPECTED;
  327. return NULL;
  328. }
  329. if (((file_data_node_t*)data)->need_writing == false) // need_writing is in the same offset in both node types
  330. {
  331. cache.remove_last();
  332. // before deleting the memory, need to scrub the plain secrets
  333. if (((file_data_node_t*)data)->type == FILE_DATA_NODE_TYPE) // type is in the same offset in both node types
  334. {
  335. file_data_node_t* file_data_node1 = (file_data_node_t*)data;
  336. memset_s(&file_data_node1->plain, sizeof(data_node_t), 0, sizeof(data_node_t));
  337. delete file_data_node1;
  338. }
  339. else
  340. {
  341. file_mht_node_t* file_mht_node = (file_mht_node_t*)data;
  342. memset_s(&file_mht_node->plain, sizeof(mht_node_t), 0, sizeof(mht_node_t));
  343. delete file_mht_node;
  344. }
  345. }
  346. else
  347. {
  348. if (internal_flush(/*false,*/ false) == false) // error, can't flush cache, file status changed to error
  349. {
  350. assert(file_status != SGX_FILE_STATUS_OK);
  351. if (file_status == SGX_FILE_STATUS_OK)
  352. file_status = SGX_FILE_STATUS_FLUSH_ERROR; // for release set this anyway
  353. return NULL; // even if we got the data_node!
  354. }
  355. }
  356. }
  357. return file_data_node;
  358. }
  359. file_data_node_t* protected_fs_file::append_data_node()
  360. {
  361. file_mht_node_t* file_mht_node = get_mht_node();
  362. if (file_mht_node == NULL) // some error happened
  363. return NULL;
  364. file_data_node_t* new_file_data_node = NULL;
  365. try {
  366. new_file_data_node = new file_data_node_t;
  367. }
  368. catch (std::bad_alloc& e) {
  369. (void)e; // remove warning
  370. last_error = ENOMEM;
  371. return NULL;
  372. }
  373. memset(new_file_data_node, 0, sizeof(file_data_node_t));
  374. new_file_data_node->type = FILE_DATA_NODE_TYPE;
  375. new_file_data_node->new_node = true;
  376. new_file_data_node->parent = file_mht_node;
  377. get_node_numbers(offset, NULL, &new_file_data_node->data_node_number, NULL, &new_file_data_node->physical_node_number);
  378. if (cache.add(new_file_data_node->physical_node_number, new_file_data_node) == false)
  379. {
  380. delete new_file_data_node;
  381. last_error = ENOMEM;
  382. return NULL;
  383. }
  384. return new_file_data_node;
  385. }
  386. file_data_node_t* protected_fs_file::read_data_node()
  387. {
  388. uint64_t data_node_number;
  389. uint64_t physical_node_number;
  390. file_mht_node_t* file_mht_node;
  391. int32_t result32;
  392. sgx_status_t status;
  393. get_node_numbers(offset, NULL, &data_node_number, NULL, &physical_node_number);
  394. file_data_node_t* file_data_node = (file_data_node_t*)cache.get(physical_node_number);
  395. if (file_data_node != NULL)
  396. return file_data_node;
  397. // need to read the data node from the disk
  398. file_mht_node = get_mht_node();
  399. if (file_mht_node == NULL) // some error happened
  400. return NULL;
  401. try {
  402. file_data_node = new file_data_node_t;
  403. }
  404. catch (std::bad_alloc& e) {
  405. (void)e; // remove warning
  406. last_error = ENOMEM;
  407. return NULL;
  408. }
  409. memset(file_data_node, 0, sizeof(file_data_node_t));
  410. file_data_node->type = FILE_DATA_NODE_TYPE;
  411. file_data_node->data_node_number = data_node_number;
  412. file_data_node->physical_node_number = physical_node_number;
  413. file_data_node->parent = file_mht_node;
  414. status = u_sgxprotectedfs_fread_node(&result32, file, file_data_node->physical_node_number, file_data_node->encrypted.cipher, NODE_SIZE);
  415. if (status != SGX_SUCCESS || result32 != 0)
  416. {
  417. delete file_data_node;
  418. last_error = (status != SGX_SUCCESS) ? status :
  419. (result32 != -1) ? result32 : EIO;
  420. return NULL;
  421. }
  422. gcm_crypto_data_t* gcm_crypto_data = &file_data_node->parent->plain.data_nodes_crypto[file_data_node->data_node_number % ATTACHED_DATA_NODES_COUNT];
  423. // this function decrypt the data _and_ checks the integrity of the data against the gmac
  424. status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, file_data_node->encrypted.cipher, NODE_SIZE, file_data_node->plain.data, empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac);
  425. if (status != SGX_SUCCESS)
  426. {
  427. delete file_data_node;
  428. last_error = status;
  429. if (status == SGX_ERROR_MAC_MISMATCH)
  430. {
  431. file_status = SGX_FILE_STATUS_CORRUPTED;
  432. }
  433. return NULL;
  434. }
  435. if (cache.add(file_data_node->physical_node_number, file_data_node) == false)
  436. {
  437. memset_s(&file_data_node->plain, sizeof(data_node_t), 0, sizeof(data_node_t)); // scrub the plaintext data
  438. delete file_data_node;
  439. last_error = ENOMEM;
  440. return NULL;
  441. }
  442. return file_data_node;
  443. }
  444. file_mht_node_t* protected_fs_file::get_mht_node()
  445. {
  446. file_mht_node_t* file_mht_node;
  447. uint64_t mht_node_number;
  448. uint64_t physical_mht_node_number;
  449. if (offset < MD_USER_DATA_SIZE)
  450. {
  451. last_error = SGX_ERROR_UNEXPECTED;
  452. return NULL;
  453. }
  454. get_node_numbers(offset, &mht_node_number, NULL, &physical_mht_node_number, NULL);
  455. if (mht_node_number == 0)
  456. return &root_mht;
  457. // file is constructed from 128*4KB = 512KB per MHT node.
  458. if ((offset - MD_USER_DATA_SIZE) % (ATTACHED_DATA_NODES_COUNT * NODE_SIZE) == 0 &&
  459. offset == encrypted_part_plain.size)
  460. {
  461. file_mht_node = append_mht_node(mht_node_number);
  462. }
  463. else
  464. {
  465. file_mht_node = read_mht_node(mht_node_number);
  466. }
  467. return file_mht_node;
  468. }
  469. file_mht_node_t* protected_fs_file::append_mht_node(uint64_t mht_node_number)
  470. {
  471. file_mht_node_t* parent_file_mht_node = read_mht_node((mht_node_number - 1) / CHILD_MHT_NODES_COUNT);
  472. if (parent_file_mht_node == NULL) // some error happened
  473. return NULL;
  474. uint64_t physical_node_number = 1 + // meta data node
  475. mht_node_number * (1 + ATTACHED_DATA_NODES_COUNT); // the '1' is for the mht node preceding every 96 data nodes
  476. file_mht_node_t* new_file_mht_node = NULL;
  477. try {
  478. new_file_mht_node = new file_mht_node_t;
  479. }
  480. catch (std::bad_alloc& e) {
  481. (void)e; // remove warning
  482. last_error = ENOMEM;
  483. return NULL;
  484. }
  485. memset(new_file_mht_node, 0, sizeof(file_mht_node_t));
  486. new_file_mht_node->type = FILE_MHT_NODE_TYPE;
  487. new_file_mht_node->new_node = true;
  488. new_file_mht_node->parent = parent_file_mht_node;
  489. new_file_mht_node->mht_node_number = mht_node_number;
  490. new_file_mht_node->physical_node_number = physical_node_number;
  491. if (cache.add(new_file_mht_node->physical_node_number, new_file_mht_node) == false)
  492. {
  493. delete new_file_mht_node;
  494. last_error = ENOMEM;
  495. return NULL;
  496. }
  497. return new_file_mht_node;
  498. }
  499. file_mht_node_t* protected_fs_file::read_mht_node(uint64_t mht_node_number)
  500. {
  501. int32_t result32;
  502. sgx_status_t status;
  503. if (mht_node_number == 0)
  504. return &root_mht;
  505. uint64_t physical_node_number = 1 + // meta data node
  506. mht_node_number * (1 + ATTACHED_DATA_NODES_COUNT); // the '1' is for the mht node preceding every 96 data nodes
  507. file_mht_node_t* file_mht_node = (file_mht_node_t*)cache.find(physical_node_number);
  508. if (file_mht_node != NULL)
  509. return file_mht_node;
  510. file_mht_node_t* parent_file_mht_node = read_mht_node((mht_node_number - 1) / CHILD_MHT_NODES_COUNT);
  511. if (parent_file_mht_node == NULL) // some error happened
  512. return NULL;
  513. try {
  514. file_mht_node = new file_mht_node_t;
  515. }
  516. catch (std::bad_alloc& e) {
  517. (void)e; // remove warning
  518. last_error = ENOMEM;
  519. return NULL;
  520. }
  521. memset(file_mht_node, 0, sizeof(file_mht_node_t));
  522. file_mht_node->type = FILE_MHT_NODE_TYPE;
  523. file_mht_node->mht_node_number = mht_node_number;
  524. file_mht_node->physical_node_number = physical_node_number;
  525. file_mht_node->parent = parent_file_mht_node;
  526. status = u_sgxprotectedfs_fread_node(&result32, file, file_mht_node->physical_node_number, file_mht_node->encrypted.cipher, NODE_SIZE);
  527. if (status != SGX_SUCCESS || result32 != 0)
  528. {
  529. delete file_mht_node;
  530. last_error = (status != SGX_SUCCESS) ? status :
  531. (result32 != -1) ? result32 : EIO;
  532. return NULL;
  533. }
  534. gcm_crypto_data_t* gcm_crypto_data = &file_mht_node->parent->plain.mht_nodes_crypto[(file_mht_node->mht_node_number - 1) % CHILD_MHT_NODES_COUNT];
  535. // this function decrypt the data _and_ checks the integrity of the data against the gmac
  536. status = sgx_rijndael128GCM_decrypt(&gcm_crypto_data->key, file_mht_node->encrypted.cipher, NODE_SIZE, (uint8_t*)&file_mht_node->plain, empty_iv, SGX_AESGCM_IV_SIZE, NULL, 0, &gcm_crypto_data->gmac);
  537. if (status != SGX_SUCCESS)
  538. {
  539. delete file_mht_node;
  540. last_error = status;
  541. if (status == SGX_ERROR_MAC_MISMATCH)
  542. {
  543. file_status = SGX_FILE_STATUS_CORRUPTED;
  544. }
  545. return NULL;
  546. }
  547. if (cache.add(file_mht_node->physical_node_number, file_mht_node) == false)
  548. {
  549. memset_s(&file_mht_node->plain, sizeof(mht_node_t), 0, sizeof(mht_node_t));
  550. delete file_mht_node;
  551. last_error = ENOMEM;
  552. return NULL;
  553. }
  554. return file_mht_node;
  555. }