shim_handle.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_handle.c
  17. *
  18. * This file contains codes to maintain bookkeeping for handles in library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_checkpoint.h>
  24. #include <shim_fs.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. static LOCKTYPE handle_mgr_lock;
  28. #define HANDLE_MGR_ALLOC 32
  29. #define system_lock() lock(handle_mgr_lock)
  30. #define system_unlock() unlock(handle_mgr_lock)
  31. #define PAGE_SIZE allocsize
  32. #define OBJ_TYPE struct shim_handle
  33. #include <memmgr.h>
  34. static MEM_MGR handle_mgr = NULL;
  35. #define INIT_HANDLE_MAP_SIZE 32
  36. //#define DEBUG_REF
  37. static inline int init_tty_handle (struct shim_handle * hdl, bool write)
  38. {
  39. struct shim_dentry * dent = NULL;
  40. int ret;
  41. struct shim_thread * cur_thread = get_cur_thread();
  42. /* XXX: Try getting the root FS from current thread? */
  43. assert(cur_thread);
  44. assert(cur_thread->root);
  45. if ((ret = path_lookupat(cur_thread->root, "/dev/tty", LOOKUP_OPEN, &dent,
  46. cur_thread->root->fs)) < 0)
  47. return ret;
  48. int flags = (write ? O_WRONLY : O_RDONLY)|O_APPEND;
  49. struct shim_mount * fs = dent->fs;
  50. ret = fs->d_ops->open(hdl, dent, flags);
  51. if (ret < 0)
  52. return ret;
  53. set_handle_fs(hdl, fs);
  54. hdl->dentry = dent;
  55. hdl->flags = O_RDWR|O_APPEND|0100000;
  56. int size;
  57. char * path = dentry_get_path(dent, true, &size);
  58. if (path)
  59. qstrsetstr(&hdl->path, path, size);
  60. else
  61. qstrsetstr(&hdl->path, "/dev/tty", 8);
  62. return 0;
  63. }
  64. static inline int init_exec_handle (struct shim_thread * thread)
  65. {
  66. if (!PAL_CB(executable))
  67. return 0;
  68. struct shim_handle * exec = get_new_handle();
  69. if (!exec)
  70. return -ENOMEM;
  71. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  72. exec->type = TYPE_FILE;
  73. exec->flags = O_RDONLY;
  74. exec->acc_mode = MAY_READ;
  75. struct shim_mount * fs = find_mount_from_uri(PAL_CB(executable));
  76. if (fs) {
  77. path_lookupat(fs->root, PAL_CB(executable) + fs->uri.len, 0,
  78. &exec->dentry, fs);
  79. set_handle_fs(exec, fs);
  80. if (exec->dentry) {
  81. int len;
  82. const char * path = dentry_get_path(exec->dentry, true, &len);
  83. qstrsetstr(&exec->path, path, len);
  84. }
  85. put_mount(fs);
  86. } else {
  87. set_handle_fs(exec, &chroot_builtin_fs);
  88. }
  89. lock(thread->lock);
  90. thread->exec = exec;
  91. unlock(thread->lock);
  92. return 0;
  93. }
  94. static struct shim_handle_map * get_new_handle_map (FDTYPE size);
  95. PAL_HANDLE shim_stdio = NULL;
  96. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  97. struct shim_handle * hdl, int flags);
  98. static struct shim_handle_map * __enlarge_handle_map
  99. (struct shim_handle_map * map, FDTYPE size);
  100. int init_handle (void)
  101. {
  102. create_lock(handle_mgr_lock);
  103. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  104. if (!handle_mgr)
  105. return -ENOMEM;
  106. return 0;
  107. }
  108. int init_important_handles (void)
  109. {
  110. struct shim_thread * thread = get_cur_thread();
  111. if (thread->handle_map)
  112. goto done;
  113. struct shim_handle_map * handle_map = get_cur_handle_map(thread);
  114. if (!handle_map) {
  115. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  116. if (!handle_map)
  117. return -ENOMEM;
  118. set_handle_map(thread, handle_map);
  119. }
  120. lock(handle_map->lock);
  121. if (handle_map->fd_size < 3) {
  122. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  123. unlock(handle_map->lock);
  124. return -ENOMEM;
  125. }
  126. }
  127. struct shim_handle * hdl = NULL;
  128. int ret;
  129. for (int fd = 0 ; fd < 3 ; fd++)
  130. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  131. if (!hdl) {
  132. hdl = get_new_handle();
  133. if (!hdl)
  134. return -ENOMEM;
  135. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  136. put_handle(hdl);
  137. return ret;
  138. }
  139. } else {
  140. get_handle(hdl);
  141. }
  142. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  143. put_handle(hdl);
  144. if (fd != 1)
  145. hdl = NULL;
  146. } else {
  147. if (fd == 1)
  148. hdl = handle_map->map[fd]->handle;
  149. }
  150. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  151. handle_map->fd_top = 2;
  152. unlock(handle_map->lock);
  153. done:
  154. init_exec_handle(thread);
  155. return 0;
  156. }
  157. struct shim_handle * __get_fd_handle (FDTYPE fd, int * flags,
  158. struct shim_handle_map * map)
  159. {
  160. struct shim_fd_handle * fd_handle = NULL;
  161. if (map->fd_top != FD_NULL &&
  162. fd <= map->fd_top) {
  163. fd_handle = map->map[fd];
  164. if (!HANDLE_ALLOCATED(fd_handle))
  165. return NULL;
  166. if (flags)
  167. *flags = fd_handle->flags;
  168. return fd_handle->handle;
  169. }
  170. return NULL;
  171. }
  172. struct shim_handle * get_fd_handle (FDTYPE fd, int * flags,
  173. struct shim_handle_map * map)
  174. {
  175. if (!map)
  176. map = get_cur_handle_map(NULL);
  177. struct shim_handle * hdl = NULL;
  178. lock(map->lock);
  179. if ((hdl = __get_fd_handle(fd, flags, map)))
  180. get_handle(hdl);
  181. unlock(map->lock);
  182. return hdl;
  183. }
  184. struct shim_handle *
  185. __detach_fd_handle (struct shim_fd_handle * fd, int * flags,
  186. struct shim_handle_map * map)
  187. {
  188. struct shim_handle * handle = NULL;
  189. if (HANDLE_ALLOCATED(fd)) {
  190. int vfd = fd->vfd;
  191. handle = fd->handle;
  192. if (flags)
  193. *flags = fd->flags;
  194. fd->vfd = FD_NULL;
  195. fd->handle = NULL;
  196. fd->flags = 0;
  197. if (vfd == map->fd_top)
  198. do {
  199. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  200. vfd--;
  201. } while (vfd >= 0 &&
  202. !HANDLE_ALLOCATED(map->map[vfd]));
  203. }
  204. return handle;
  205. }
  206. struct shim_handle * detach_fd_handle (FDTYPE fd, int * flags,
  207. struct shim_handle_map * handle_map)
  208. {
  209. struct shim_handle * handle = NULL;
  210. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  211. return NULL;
  212. lock(handle_map->lock);
  213. if (fd < handle_map->fd_size)
  214. handle = __detach_fd_handle(handle_map->map[fd], flags,
  215. handle_map);
  216. unlock(handle_map->lock);
  217. return handle;
  218. }
  219. struct shim_handle * get_new_handle (void)
  220. {
  221. struct shim_handle * new_handle =
  222. get_mem_obj_from_mgr_enlarge(handle_mgr,
  223. size_align_up(HANDLE_MGR_ALLOC));
  224. if (!new_handle)
  225. return NULL;
  226. memset(new_handle, 0, sizeof(struct shim_handle));
  227. REF_SET(new_handle->ref_count, 1);
  228. create_lock(new_handle->lock);
  229. new_handle->owner = cur_process.vmid;
  230. INIT_LISTP(&new_handle->epolls);
  231. return new_handle;
  232. }
  233. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  234. struct shim_handle * hdl, int flags)
  235. {
  236. struct shim_fd_handle * new_handle = *fdhdl;
  237. if (!new_handle) {
  238. new_handle = malloc(sizeof(struct shim_fd_handle));
  239. if (!new_handle)
  240. return -ENOMEM;
  241. *fdhdl = new_handle;
  242. }
  243. new_handle->vfd = fd;
  244. new_handle->flags = flags;
  245. open_handle(hdl);
  246. new_handle->handle = hdl;
  247. return 0;
  248. }
  249. int set_new_fd_handle (struct shim_handle * hdl, int flags,
  250. struct shim_handle_map * handle_map)
  251. {
  252. FDTYPE fd = 0;
  253. int new_size = 0;
  254. int ret = 0;
  255. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  256. return -EBADF;
  257. lock(handle_map->lock);
  258. if (!handle_map->map ||
  259. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  260. new_size = INIT_HANDLE_MAP_SIZE;
  261. if (!handle_map->map)
  262. goto extend;
  263. if (handle_map->fd_top != FD_NULL)
  264. do {
  265. ++fd;
  266. if (fd == handle_map->fd_size) {
  267. new_size = handle_map->fd_size < new_size ? new_size :
  268. handle_map->fd_size * 2;
  269. extend:
  270. if (!__enlarge_handle_map(handle_map, new_size)) {
  271. ret = -ENOMEM;
  272. goto out;
  273. }
  274. }
  275. } while (handle_map->fd_top != FD_NULL &&
  276. fd <= handle_map->fd_top &&
  277. HANDLE_ALLOCATED(handle_map->map[fd]));
  278. if (handle_map->fd_top == FD_NULL ||
  279. fd > handle_map->fd_top)
  280. handle_map->fd_top = fd;
  281. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  282. if (ret < 0) {
  283. if (fd == handle_map->fd_top)
  284. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  285. } else
  286. ret = fd;
  287. out:
  288. unlock(handle_map->lock);
  289. return ret;
  290. }
  291. int set_new_fd_handle_by_fd (FDTYPE fd, struct shim_handle * hdl, int flags,
  292. struct shim_handle_map * handle_map)
  293. {
  294. int new_size = 0;
  295. int ret = 0;
  296. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  297. return -EBADF;
  298. lock(handle_map->lock);
  299. if (!handle_map->map ||
  300. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  301. new_size = INIT_HANDLE_MAP_SIZE;
  302. if (!handle_map->map)
  303. goto extend;
  304. if (fd >= handle_map->fd_size) {
  305. new_size = handle_map->fd_size < new_size ? new_size :
  306. handle_map->fd_size;
  307. extend:
  308. while (new_size <= fd)
  309. new_size *= 2;
  310. if (!__enlarge_handle_map(handle_map, new_size)) {
  311. ret = -ENOMEM;
  312. goto out;
  313. }
  314. }
  315. if (handle_map->fd_top != FD_NULL &&
  316. fd <= handle_map->fd_top &&
  317. HANDLE_ALLOCATED(handle_map->map[fd])) {
  318. ret = -EBADF;
  319. goto out;
  320. }
  321. if (handle_map->fd_top == FD_NULL ||
  322. fd > handle_map->fd_top)
  323. handle_map->fd_top = fd;
  324. struct shim_fd_handle * new_handle = handle_map->map[fd];
  325. if (!new_handle) {
  326. new_handle = malloc(sizeof(struct shim_fd_handle));
  327. if (!new_handle) {
  328. ret = -ENOMEM;
  329. goto out;
  330. }
  331. handle_map->map[fd] = new_handle;
  332. }
  333. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  334. if (ret < 0) {
  335. if (fd == handle_map->fd_top)
  336. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  337. } else
  338. ret = fd;
  339. out:
  340. unlock(handle_map->lock);
  341. return fd;
  342. }
  343. void flush_handle (struct shim_handle * hdl)
  344. {
  345. if (hdl->fs && hdl->fs->fs_ops &&
  346. hdl->fs->fs_ops->flush)
  347. hdl->fs->fs_ops->flush(hdl);
  348. }
  349. static inline __attribute__((unused))
  350. const char * __handle_name (struct shim_handle * hdl)
  351. {
  352. if (!qstrempty(&hdl->path))
  353. return qstrgetstr(&hdl->path);
  354. if (!qstrempty(&hdl->uri))
  355. return qstrgetstr(&hdl->uri);
  356. if (hdl->fs_type[0])
  357. return hdl->fs_type;
  358. return "(unknown)";
  359. }
  360. void open_handle (struct shim_handle * hdl)
  361. {
  362. get_handle(hdl);
  363. #ifdef DEBUG_REF
  364. int opened = REF_INC(hdl->opened);
  365. debug("open handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  366. opened);
  367. #else
  368. REF_INC(hdl->opened);
  369. #endif
  370. }
  371. extern int delete_from_epoll_handles (struct shim_handle * handle);
  372. void close_handle (struct shim_handle * hdl)
  373. {
  374. int opened = REF_DEC(hdl->opened);
  375. #ifdef DEBUG_REF
  376. debug("close handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  377. opened);
  378. #endif
  379. if (!opened) {
  380. if (hdl->type == TYPE_DIR) {
  381. struct shim_dir_handle * dir = &hdl->info.dir;
  382. if (dir->dot) {
  383. put_dentry(dir->dot);
  384. dir->dot = NULL;
  385. }
  386. if (dir->dotdot) {
  387. put_dentry(dir->dotdot);
  388. dir->dotdot = NULL;
  389. }
  390. if (dir->ptr != (void *) -1) {
  391. while (dir->ptr && *dir->ptr) {
  392. struct shim_dentry * dent = *dir->ptr;
  393. put_dentry(dent);
  394. *(dir->ptr++) = NULL;
  395. }
  396. }
  397. } else {
  398. if (hdl->fs && hdl->fs->fs_ops &&
  399. hdl->fs->fs_ops->close)
  400. hdl->fs->fs_ops->close(hdl);
  401. }
  402. delete_from_epoll_handles(hdl);
  403. }
  404. put_handle(hdl);
  405. }
  406. void get_handle (struct shim_handle * hdl)
  407. {
  408. #ifdef DEBUG_REF
  409. int ref_count = REF_INC(hdl->ref_count);
  410. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  411. ref_count);
  412. #else
  413. REF_INC(hdl->ref_count);
  414. #endif
  415. }
  416. static void destroy_handle (struct shim_handle * hdl)
  417. {
  418. destroy_lock(hdl->lock);
  419. if (MEMORY_MIGRATED(hdl))
  420. memset(hdl, 0, sizeof(struct shim_handle));
  421. else
  422. free_mem_obj_to_mgr(handle_mgr, hdl);
  423. }
  424. void put_handle (struct shim_handle * hdl)
  425. {
  426. int ref_count = REF_DEC(hdl->ref_count);
  427. #ifdef DEBUG_REF
  428. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  429. ref_count);
  430. #endif
  431. if (!ref_count) {
  432. if (hdl->fs && hdl->fs->fs_ops &&
  433. hdl->fs->fs_ops->hput)
  434. hdl->fs->fs_ops->hput(hdl);
  435. qstrfree(&hdl->path);
  436. qstrfree(&hdl->uri);
  437. if (hdl->pal_handle)
  438. DkObjectClose(hdl->pal_handle);
  439. if (hdl->dentry)
  440. put_dentry(hdl->dentry);
  441. if (hdl->fs)
  442. put_mount(hdl->fs);
  443. destroy_handle(hdl);
  444. }
  445. }
  446. size_t get_file_size (struct shim_handle * hdl)
  447. {
  448. if (!hdl->fs || !hdl->fs->fs_ops)
  449. return -EINVAL;
  450. if (hdl->fs->fs_ops->poll)
  451. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  452. if (hdl->fs->fs_ops->hstat) {
  453. struct stat stat;
  454. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  455. if (ret < 0)
  456. return ret;
  457. return stat.st_size;
  458. }
  459. return 0;
  460. }
  461. void dup_fd_handle (struct shim_handle_map * map,
  462. const struct shim_fd_handle * old,
  463. struct shim_fd_handle * new)
  464. {
  465. struct shim_handle * replaced = NULL;
  466. lock(map->lock);
  467. if (old->vfd != FD_NULL) {
  468. open_handle(old->handle);
  469. replaced = new->handle;
  470. new->handle = old->handle;
  471. }
  472. unlock(map->lock);
  473. if (replaced)
  474. close_handle(replaced);
  475. }
  476. static struct shim_handle_map * get_new_handle_map (FDTYPE size)
  477. {
  478. struct shim_handle_map * handle_map =
  479. malloc(sizeof(struct shim_handle_map));
  480. if (handle_map == NULL)
  481. return NULL;
  482. memset(handle_map, 0, sizeof(struct shim_handle_map));
  483. handle_map->map = malloc(sizeof(struct shim_fd_handle) * size);
  484. if (handle_map->map == NULL) {
  485. free(handle_map);
  486. return NULL;
  487. }
  488. memset(handle_map->map, 0,
  489. sizeof(struct shim_fd_handle) * size);
  490. handle_map->fd_top = FD_NULL;
  491. handle_map->fd_size = size;
  492. create_lock(handle_map->lock);
  493. return handle_map;
  494. }
  495. static struct shim_handle_map * __enlarge_handle_map
  496. (struct shim_handle_map * map, FDTYPE size)
  497. {
  498. if (size <= map->fd_size)
  499. return NULL;
  500. struct shim_fd_handle ** old_map = map->map;
  501. map->map = malloc(sizeof(struct shim_fd_handle *) * size);
  502. if (map->map == NULL) {
  503. map->map = old_map;
  504. return NULL;
  505. }
  506. size_t copy_size = sizeof(struct shim_fd_handle *) * map->fd_size;
  507. map->fd_size = size;
  508. memset(map->map, 0, sizeof(struct shim_fd_handle *) * size);
  509. if (old_map) {
  510. if (copy_size)
  511. memcpy(map->map, old_map, copy_size);
  512. free(old_map);
  513. }
  514. return map;
  515. }
  516. int dup_handle_map (struct shim_handle_map ** new,
  517. struct shim_handle_map * old_map)
  518. {
  519. lock(old_map->lock);
  520. /* allocate a new handle mapping with the same size as
  521. the old one */
  522. struct shim_handle_map * new_map =
  523. get_new_handle_map(old_map->fd_size);
  524. new_map->fd_top = old_map->fd_top;
  525. if (old_map->fd_top == FD_NULL)
  526. goto done;
  527. for (int i = 0 ; i <= old_map->fd_top ; i++) {
  528. struct shim_fd_handle * fd_old = old_map->map[i];
  529. struct shim_fd_handle * fd_new;
  530. /* now we go through the handle map and reassign each
  531. of them being allocated */
  532. if (HANDLE_ALLOCATED(fd_old)) {
  533. /* first, get the handle to prevent it from being deleted */
  534. struct shim_handle * hdl = fd_old->handle;
  535. open_handle(hdl);
  536. /* DP: I assume we really need a deep copy of the handle map? */
  537. fd_new = malloc(sizeof(struct shim_fd_handle));
  538. new_map->map[i] = fd_new;
  539. fd_new->vfd = fd_old->vfd;
  540. fd_new->handle = hdl;
  541. fd_new->flags = fd_old->flags;
  542. }
  543. }
  544. done:
  545. unlock(old_map->lock);
  546. *new = new_map;
  547. return 0;
  548. }
  549. void get_handle_map (struct shim_handle_map * map)
  550. {
  551. REF_INC(map->ref_count);
  552. }
  553. void put_handle_map (struct shim_handle_map * map)
  554. {
  555. int ref_count = REF_DEC(map->ref_count);
  556. if (!ref_count) {
  557. if (map->fd_top == FD_NULL)
  558. goto done;
  559. for (int i = 0 ; i <= map->fd_top ; i++) {
  560. if (!map->map[i])
  561. continue;
  562. if (map->map[i]->vfd != FD_NULL) {
  563. struct shim_handle * handle = map->map[i]->handle;
  564. if (handle)
  565. close_handle(handle);
  566. }
  567. free(map->map[i]);
  568. }
  569. done:
  570. destroy_lock(map->lock);
  571. free(map->map);
  572. free(map);
  573. }
  574. }
  575. int flush_handle_map (struct shim_handle_map * map)
  576. {
  577. get_handle_map(map);
  578. lock(map->lock);
  579. if (map->fd_top == FD_NULL)
  580. goto done;
  581. /* now we go through the handle map and flush each handle */
  582. for (int i = 0 ; i <= map->fd_top ; i++) {
  583. if (!HANDLE_ALLOCATED(map->map[i]))
  584. continue;
  585. struct shim_handle * handle = map->map[i]->handle;
  586. if (handle)
  587. flush_handle(handle);
  588. }
  589. done:
  590. unlock(map->lock);
  591. put_handle_map(map);
  592. return 0;
  593. }
  594. int walk_handle_map (int (*callback) (struct shim_fd_handle *,
  595. struct shim_handle_map *, void *),
  596. struct shim_handle_map * map, void * arg)
  597. {
  598. int ret = 0;
  599. lock(map->lock);
  600. if (map->fd_top == FD_NULL)
  601. goto done;
  602. for (int i = 0 ; i <= map->fd_top ; i++) {
  603. if (!HANDLE_ALLOCATED(map->map[i]))
  604. continue;
  605. if ((ret = (*callback) (map->map[i], map, arg)) < 0)
  606. break;
  607. }
  608. done:
  609. unlock(map->lock);
  610. return ret;
  611. }
  612. BEGIN_CP_FUNC(handle)
  613. {
  614. assert(size == sizeof(struct shim_handle));
  615. struct shim_handle * hdl = (struct shim_handle *) obj;
  616. struct shim_handle * new_hdl = NULL;
  617. ptr_t off = GET_FROM_CP_MAP(obj);
  618. if (!off) {
  619. off = ADD_CP_OFFSET(sizeof(struct shim_handle));
  620. ADD_TO_CP_MAP(obj, off);
  621. new_hdl = (struct shim_handle *) (base + off);
  622. lock(hdl->lock);
  623. struct shim_mount * fs = hdl->fs;
  624. *new_hdl = *hdl;
  625. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  626. fs->fs_ops->checkout(new_hdl);
  627. new_hdl->dentry = NULL;
  628. REF_SET(new_hdl->opened, 0);
  629. REF_SET(new_hdl->ref_count, 0);
  630. clear_lock(new_hdl->lock);
  631. DO_CP_IN_MEMBER(qstr, new_hdl, path);
  632. DO_CP_IN_MEMBER(qstr, new_hdl, uri);
  633. if (fs && hdl->dentry) {
  634. DO_CP_MEMBER(mount, hdl, new_hdl, fs);
  635. } else {
  636. new_hdl->fs = NULL;
  637. }
  638. if (hdl->dentry)
  639. DO_CP_MEMBER(dentry, hdl, new_hdl, dentry);
  640. if (new_hdl->pal_handle) {
  641. struct shim_palhdl_entry * entry;
  642. DO_CP(palhdl, hdl->pal_handle, &entry);
  643. entry->uri = &new_hdl->uri;
  644. entry->phandle = &new_hdl->pal_handle;
  645. }
  646. if (hdl->type == TYPE_EPOLL)
  647. DO_CP(epoll_fd, &hdl->info.epoll.fds, &new_hdl->info.epoll.fds);
  648. INIT_LISTP(&new_hdl->epolls);
  649. unlock(hdl->lock);
  650. ADD_CP_FUNC_ENTRY(off);
  651. } else {
  652. new_hdl = (struct shim_handle *) (base + off);
  653. }
  654. if (objp)
  655. *objp = (void *) new_hdl;
  656. }
  657. END_CP_FUNC(handle)
  658. BEGIN_RS_FUNC(handle)
  659. {
  660. struct shim_handle * hdl = (void *) (base + GET_CP_FUNC_ENTRY());
  661. CP_REBASE(hdl->fs);
  662. CP_REBASE(hdl->dentry);
  663. CP_REBASE(hdl->epolls);
  664. create_lock(hdl->lock);
  665. if (!hdl->fs) {
  666. assert(hdl->fs_type);
  667. search_builtin_fs(hdl->fs_type, &hdl->fs);
  668. if (!hdl->fs)
  669. return -EINVAL;
  670. }
  671. if (hdl->fs && hdl->fs->fs_ops &&
  672. hdl->fs->fs_ops->checkin)
  673. hdl->fs->fs_ops->checkin(hdl);
  674. DEBUG_RS("path=%s,type=%s,uri=%s,flags=%03o",
  675. qstrgetstr(&hdl->path), hdl->fs_type, qstrgetstr(&hdl->uri),
  676. hdl->flags);
  677. }
  678. END_RS_FUNC(handle)
  679. BEGIN_CP_FUNC(fd_handle)
  680. {
  681. assert(size == sizeof(struct shim_fd_handle));
  682. struct shim_fd_handle * fdhdl = (struct shim_fd_handle *) obj;
  683. struct shim_fd_handle * new_fdhdl = NULL;
  684. ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_fd_handle));
  685. new_fdhdl = (struct shim_fd_handle *) (base + off);
  686. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  687. DO_CP(handle, fdhdl->handle, &new_fdhdl->handle);
  688. ADD_CP_FUNC_ENTRY(off);
  689. if (objp)
  690. *objp = (void *) new_fdhdl;
  691. }
  692. END_CP_FUNC_NO_RS(fd_handle)
  693. BEGIN_CP_FUNC(handle_map)
  694. {
  695. assert(size >= sizeof(struct shim_handle_map));
  696. struct shim_handle_map * handle_map = (struct shim_handle_map *) obj;
  697. struct shim_handle_map * new_handle_map = NULL;
  698. struct shim_fd_handle ** ptr_array;
  699. lock(handle_map->lock);
  700. int fd_size = handle_map->fd_top != FD_NULL ?
  701. handle_map->fd_top + 1 : 0;
  702. size = sizeof(struct shim_handle_map) +
  703. (sizeof(struct shim_fd_handle *) * fd_size);
  704. ptr_t off = GET_FROM_CP_MAP(obj);
  705. if (!off) {
  706. off = ADD_CP_OFFSET(size);
  707. new_handle_map = (struct shim_handle_map *) (base + off);
  708. memcpy(new_handle_map, handle_map,
  709. sizeof(struct shim_handle_map));
  710. ptr_array = (void *) new_handle_map + sizeof(struct shim_handle_map);
  711. new_handle_map->fd_size = fd_size;
  712. new_handle_map->map = fd_size ? ptr_array : NULL;
  713. REF_SET(new_handle_map->ref_count, 0);
  714. clear_lock(new_handle_map->lock);
  715. for (int i = 0 ; i < fd_size ; i++) {
  716. if (HANDLE_ALLOCATED(handle_map->map[i]))
  717. DO_CP(fd_handle, handle_map->map[i], &ptr_array[i]);
  718. else
  719. ptr_array[i] = NULL;
  720. }
  721. ADD_CP_FUNC_ENTRY(off);
  722. } else {
  723. new_handle_map = (struct shim_handle_map *) (base + off);
  724. }
  725. unlock(handle_map->lock);
  726. if (objp)
  727. *objp = (void *) new_handle_map;
  728. }
  729. END_CP_FUNC(handle_map)
  730. BEGIN_RS_FUNC(handle_map)
  731. {
  732. struct shim_handle_map * handle_map = (void *) (base + GET_CP_FUNC_ENTRY());
  733. CP_REBASE(handle_map->map);
  734. assert(handle_map->map);
  735. DEBUG_RS("size=%d,top=%d", handle_map->fd_size, handle_map->fd_top);
  736. create_lock(handle_map->lock);
  737. lock(handle_map->lock);
  738. if (handle_map->fd_top != FD_NULL)
  739. for (int i = 0 ; i <= handle_map->fd_top ; i++) {
  740. CP_REBASE(handle_map->map[i]);
  741. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  742. CP_REBASE(handle_map->map[i]->handle);
  743. struct shim_handle * hdl = handle_map->map[i]->handle;
  744. assert(hdl);
  745. open_handle(hdl);
  746. DEBUG_RS("[%d]%s", i, qstrempty(&hdl->uri) ? hdl->fs_type :
  747. qstrgetstr(&hdl->uri));
  748. }
  749. }
  750. unlock(handle_map->lock);
  751. }
  752. END_RS_FUNC(handle_map)