monotonic_counter_database_sqlite_cache.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "string.h"
  32. #include "monotonic_counter_database_types.h"
  33. #include "monotonic_counter_database_sqlite_access_hw_mc.h"
  34. #include "monotonic_counter_database_sqlite_cache.h"
  35. #include "monotonic_counter_database_sqlite_bin_hash_tree_utility.h"
  36. #include <stdlib.h>
  37. #include <assert.h>
  38. //#define DEBUG_WITHOUT_CACHE
  39. // will cache a maximum number of 256 leaves
  40. #define MAX_LEAF_CACHE_NUM 256
  41. // define an empty hash tree node pointer array in PSE's memory to cache all hash nodes.
  42. static tree_node_cache_t* g_hash_tree_nodes[INIT_MAX_LEAF_NODE_ID] = {0};
  43. // cache IDs of leaves, to limit the size of the whole cache
  44. static leaf_cache_t g_leaf_cache;
  45. /*******************************************************************
  46. ** Function name: flush_hash_tree_cache
  47. ** Descrption: free memory allocated by cache tree.
  48. **
  49. *******************************************************************/
  50. void flush_hash_tree_cache()
  51. {
  52. #ifndef DEBUG_WITHOUT_CACHE
  53. for(uint32_t index=0; index < INIT_MAX_LEAF_NODE_ID; index++)
  54. {
  55. SAFE_FREE(g_hash_tree_nodes[index]);
  56. }
  57. // clear cached leaves list
  58. while (g_leaf_cache.list)
  59. {
  60. leaf_cache_node_t* node = g_leaf_cache.list;
  61. g_leaf_cache.list = g_leaf_cache.list->next;
  62. SAFE_FREE(node);
  63. }
  64. g_leaf_cache.size = 0;
  65. #endif
  66. }
  67. /*******************************************************************
  68. ** Function name: cache_helper
  69. ** Descrption: helper function to insert node to cache or retrieve node from cache
  70. ** Returns: OP_SUCCESS when success
  71. ** or OP_ERROR_CACHE_MISS when CACHE_OP_READ failed
  72. ** or OP_ERROR_MALLOC for when CACHE_OP_UPDATE failed
  73. *******************************************************************/
  74. static pse_op_error_t cache_helper(const cache_op_t cache_op, // [IN] Read/Update cache
  75. tree_node_cache_t** tree_node, // [IN,OUT] pointer to a single tree_node_cache_t instance
  76. const uint32_t tree_node_sz, // [IN] size of tree_node->node
  77. uint8_t* data, // [IN, OUT] pointer to bufer that stores the value of the (tree_node->node)[]
  78. const uint32_t data_sz) // [IN] size of data[]
  79. {
  80. assert(tree_node != NULL && data != NULL);
  81. if(NULL == *tree_node) // this node has not been allocated.
  82. {
  83. if(cache_op == CACHE_OP_UPDATE) // for insert a new node to cache
  84. {
  85. *tree_node = (tree_node_cache_t*)malloc(tree_node_sz); // allocate memory
  86. if(NULL == *tree_node)
  87. {
  88. return OP_ERROR_MALLOC;
  89. }
  90. (*tree_node)->ref_counter = 0; // initialize the ref_counter with 0
  91. }
  92. else // the node is not cached
  93. {
  94. return OP_ERROR_CACHE_MISS;
  95. }
  96. }
  97. if(cache_op == CACHE_OP_UPDATE) // update cache
  98. {
  99. memcpy((*tree_node)->node, data, data_sz);
  100. }
  101. else // retrieve data from cahce
  102. {
  103. memcpy(data, (*tree_node)->node, data_sz);
  104. }
  105. return OP_SUCCESS;
  106. }
  107. static inline void update_node_ref_counter(const uint32_t node_index, const int value_to_add)
  108. {
  109. // The node should not be NULL
  110. assert(g_hash_tree_nodes[node_index-1] != NULL);
  111. // Increase or decrease ref counter
  112. g_hash_tree_nodes[node_index-1]->ref_counter += value_to_add;
  113. // The ref counter cannot < 0
  114. assert(g_hash_tree_nodes[node_index-1]->ref_counter <= 8192);
  115. // free memory only when ref_counter is 0.
  116. if(g_hash_tree_nodes[node_index-1]->ref_counter == 0)
  117. {
  118. SAFE_FREE(g_hash_tree_nodes[node_index-1]);
  119. }
  120. }
  121. /*******************************************************************
  122. ** Function name: update_related_nodes_ref_count
  123. ** Descrption: Increase or decrease the reference counter of all related nodes.
  124. ** If ref counter reaches zero, release the cached node.
  125. **
  126. *******************************************************************/
  127. static void update_related_nodes_ref_counter(const uint32_t leaf_id, const int value_to_add)
  128. {
  129. #ifdef DEBUG_WITHOUT_CACHE
  130. return;
  131. #else
  132. assert(value_to_add == 1 || value_to_add == -1);
  133. uint32_t ancestor_index = leaf_id;
  134. uint32_t i = 0;
  135. // update leaf's ref counter
  136. update_node_ref_counter(leaf_id, value_to_add);
  137. // update brother's ref counter
  138. update_node_ref_counter(IS_LEFT_CHILD(leaf_id) ? (leaf_id+1) : (leaf_id-1), value_to_add);
  139. // update ancestors and brothers' ref counter
  140. ancestor_index = ( ancestor_index - ancestor_index%2 ) >> 1 ;
  141. while (ancestor_index != 1)
  142. {
  143. update_node_ref_counter(ancestor_index, value_to_add);
  144. update_node_ref_counter(IS_LEFT_CHILD(ancestor_index) ? (ancestor_index+1) : (ancestor_index-1),
  145. value_to_add);
  146. ancestor_index = ( ancestor_index - ancestor_index%2 ) >> 1 ;
  147. i++;
  148. }
  149. #endif
  150. }
  151. static void find_leaf_node_in_cache(const uint32_t leaf_id,
  152. leaf_cache_node_t** cached_leaf_node_prev,
  153. leaf_cache_node_t** cached_leaf_node)
  154. {
  155. // look for the leaf node in the cached list
  156. *cached_leaf_node = g_leaf_cache.list;
  157. while (*cached_leaf_node != NULL) {
  158. if ((*cached_leaf_node)->leaf_id == leaf_id)
  159. break;
  160. *cached_leaf_node_prev = *cached_leaf_node;
  161. *cached_leaf_node = (*cached_leaf_node)->next;
  162. }
  163. }
  164. /*******************************************************************
  165. ** Function name: update_cached_leaf_list
  166. ** Descrption: update cached_leaf_list which maintains a list of all
  167. ** leaves that are cached in memory. If there is no empty
  168. ** slot to cache new leaf node, will remove the last one
  169. ** in the cache list and put the new leaf at the head (FIFO)
  170. *******************************************************************/
  171. static pse_op_error_t update_cached_leaf_list(const uint32_t leaf_id)
  172. {
  173. leaf_cache_node_t* cached_leaf_node_prev = NULL;
  174. leaf_cache_node_t* cached_leaf_node = NULL;
  175. // look for the leaf node in the cached list
  176. find_leaf_node_in_cache(leaf_id, &cached_leaf_node_prev, &cached_leaf_node);
  177. // leaf node not in the cache
  178. if (cached_leaf_node == NULL)
  179. {
  180. // malloc cache node first
  181. leaf_cache_node_t* temp = (leaf_cache_node_t*)malloc(sizeof(leaf_cache_node_t));
  182. if (temp == NULL)
  183. {
  184. return OP_ERROR_MALLOC;
  185. }
  186. // increase ref counter for all related nodes of the leaf node
  187. update_related_nodes_ref_counter(leaf_id, 1);
  188. if (g_leaf_cache.size == MAX_LEAF_CACHE_NUM) // g_leaf_cache.size reaches the limitation.
  189. {
  190. // cache is full, remove the tail node in the list
  191. leaf_cache_node_t* tail_prev = NULL;
  192. // check the pointer g_leaf_cache.list for defense in depth.
  193. if(NULL == g_leaf_cache.list)
  194. {
  195. // head of the list is NULL
  196. SAFE_FREE(temp);
  197. return OP_ERROR_INTERNAL;
  198. }
  199. leaf_cache_node_t* tail = g_leaf_cache.list; // head of the list
  200. while(tail->next != NULL)
  201. {
  202. tail_prev = tail;
  203. tail = tail->next;
  204. }
  205. // update reference counter for related nodes
  206. update_related_nodes_ref_counter(tail->leaf_id, -1);
  207. SAFE_FREE(tail);
  208. if(tail_prev != NULL)
  209. {
  210. tail_prev->next = NULL;
  211. }
  212. // update cached list length
  213. g_leaf_cache.size--;
  214. }
  215. // insert the new node at head
  216. temp->leaf_id = leaf_id;
  217. temp->next = g_leaf_cache.list;
  218. g_leaf_cache.list = temp;
  219. // update cache size
  220. g_leaf_cache.size++;
  221. }
  222. else
  223. {
  224. if (cached_leaf_node_prev == NULL)
  225. {
  226. // already at head
  227. return OP_SUCCESS;
  228. }
  229. else
  230. {
  231. // move the leaf to head
  232. cached_leaf_node_prev->next = cached_leaf_node->next;
  233. cached_leaf_node->next = g_leaf_cache.list;
  234. g_leaf_cache.list = cached_leaf_node;
  235. }
  236. }
  237. return OP_SUCCESS;
  238. }
  239. /*******************************************************************
  240. ** Function name: remove_from_cached_leaf_list
  241. ** Descrption: Remove specified leaf node from cached leaves list
  242. *******************************************************************/
  243. static void remove_from_cached_leaf_list(const uint32_t leaf_id)
  244. {
  245. #ifdef DEBUG_WITHOUT_CACHE
  246. return;
  247. #else
  248. leaf_cache_node_t* cached_leaf_node_prev = NULL;
  249. leaf_cache_node_t* cached_leaf_node = NULL;
  250. // look for the leaf node in the cached list
  251. find_leaf_node_in_cache(leaf_id, &cached_leaf_node_prev, &cached_leaf_node);
  252. if (cached_leaf_node != NULL)
  253. {
  254. if (cached_leaf_node_prev != NULL)
  255. {
  256. cached_leaf_node_prev->next = cached_leaf_node->next;
  257. }
  258. else
  259. {
  260. // node is at the head
  261. g_leaf_cache.list = cached_leaf_node->next;
  262. }
  263. SAFE_FREE(cached_leaf_node);
  264. g_leaf_cache.size--;
  265. // update reference counter
  266. update_related_nodes_ref_counter(leaf_id, -1);
  267. }
  268. #endif
  269. }
  270. /*******************************************************************
  271. ** Function name: access_hash_tree_cache
  272. ** Descrption:
  273. **
  274. *******************************************************************/
  275. pse_op_error_t access_hash_tree_cache(const rpdb_op_t rpdb_op, // vmc operation type
  276. const cache_op_t cache_op, // read/update cache
  277. pse_vmc_hash_tree_cache_t *cache, // buffer that stores tree nodes required by a VMC operation
  278. const uint8_t *root_hash) // current root hash in rpdata read from PSDA
  279. {
  280. assert(cache);
  281. if(cache_op == CACHE_OP_READ)
  282. {
  283. assert(root_hash);
  284. }
  285. #ifdef DEBUG_WITHOUT_CACHE
  286. return OP_ERROR_INTERNAL;
  287. #else
  288. pse_op_error_t ret = OP_SUCCESS;
  289. // store ROOT node into cache if CACHE_OP_UPDATE == cache_op
  290. // or, retrieve ROOT node from cache if CACHE_OP_READ == cache_op
  291. ret = cache_helper(cache_op,
  292. &(g_hash_tree_nodes[0]),
  293. TREE_NODE_CACHE_SIZE + ROOT_NODE_SIZE,
  294. (uint8_t*)&(cache->root),
  295. ROOT_NODE_SIZE);
  296. if(OP_SUCCESS != ret)
  297. {
  298. goto end;
  299. }
  300. // the cached root hash must match the root hash retrieved from PSDA's RPDATA
  301. if(cache_op == CACHE_OP_READ && 0 != memcmp(cache->root.hash, root_hash, ROOT_HASH_SIZE))
  302. {
  303. // the cache is out of date. might be attacked.
  304. // drop the existed cache
  305. flush_hash_tree_cache();
  306. return OP_ERROR_CACHE_MISS;
  307. }
  308. // store internal node into cache if CACHE_OP_UPDATE == cache_op
  309. // or, retrieve internal node from cache if CACHE_OP_READ == cache_op
  310. for(uint32_t index = 0; index < INIT_INTERNAL_NODE_NR; index++)
  311. {
  312. // ancestor nodes
  313. ret = cache_helper(cache_op,
  314. &(g_hash_tree_nodes[cache->ancestors[index].node_id - 1]),
  315. TREE_NODE_CACHE_SIZE + INTERNAL_NODE_SIZE,
  316. (uint8_t*)&(cache->ancestors[index].internal),
  317. INTERNAL_NODE_SIZE);
  318. if(OP_SUCCESS != ret)
  319. {
  320. goto end;
  321. }
  322. // brothers of ancestors
  323. ret = cache_helper(cache_op,
  324. &(g_hash_tree_nodes[cache->brother_of_ancestors[index].node_id - 1]),
  325. TREE_NODE_CACHE_SIZE + INTERNAL_NODE_SIZE,
  326. (uint8_t*)&(cache->brother_of_ancestors[index].internal),
  327. INTERNAL_NODE_SIZE);
  328. if(OP_SUCCESS != ret)
  329. {
  330. goto end;
  331. }
  332. }
  333. // store leaf node into cache if CACHE_OP_UPDATE == cache_op
  334. // or, retrieve leaf node from cache if CACHE_OP_READ == cache_op
  335. ret = cache_helper(cache_op,
  336. &(g_hash_tree_nodes[cache->self.node_id - 1]),
  337. TREE_NODE_CACHE_SIZE + LEAF_NODE_SIZE,
  338. (uint8_t*)&cache->self.leaf,
  339. LEAF_NODE_SIZE);
  340. if(OP_SUCCESS != ret)
  341. {
  342. goto end;
  343. }
  344. ret = cache_helper(cache_op,
  345. &(g_hash_tree_nodes[cache->brother.node_id - 1]),
  346. TREE_NODE_CACHE_SIZE + LEAF_NODE_SIZE,
  347. (uint8_t*)&cache->brother.leaf,
  348. LEAF_NODE_SIZE);
  349. if(OP_SUCCESS != ret)
  350. {
  351. goto end;
  352. }
  353. if (rpdb_op != RPDB_OP_DELETE)
  354. {
  355. ret = update_cached_leaf_list(cache->self.node_id);
  356. }
  357. else if (cache_op == CACHE_OP_UPDATE)
  358. {
  359. // RPDB_OP_DELETE && CACHE_OP_UPDATE
  360. remove_from_cached_leaf_list(cache->self.node_id);
  361. }
  362. end:
  363. if (OP_SUCCESS != ret)
  364. {
  365. switch(ret)
  366. {
  367. case OP_ERROR_MALLOC:/*only possible for CACHE_OP_UPDATE*/
  368. {
  369. flush_hash_tree_cache();
  370. break;
  371. }
  372. case OP_ERROR_CACHE_MISS:/*only possible for CAHE_OP_READ*/
  373. {
  374. break;
  375. }
  376. default:
  377. assert(0); /* should not happen*/
  378. }
  379. }
  380. return ret;
  381. #endif
  382. }