shim_handle.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_handle.c
  17. *
  18. * This file contains codes to maintain bookkeeping for handles in library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_checkpoint.h>
  24. #include <shim_fs.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. static LOCKTYPE handle_mgr_lock;
  28. #define HANDLE_MGR_ALLOC 32
  29. #define system_lock() lock(handle_mgr_lock)
  30. #define system_unlock() unlock(handle_mgr_lock)
  31. #define PAGE_SIZE allocsize
  32. #define OBJ_TYPE struct shim_handle
  33. #include <memmgr.h>
  34. static MEM_MGR handle_mgr = NULL;
  35. #define INIT_HANDLE_MAP_SIZE 32
  36. //#define DEBUG_REF
  37. static inline int init_tty_handle (struct shim_handle * hdl, bool write)
  38. {
  39. struct shim_dentry * dent = NULL;
  40. int ret;
  41. struct shim_thread * cur_thread = get_cur_thread();
  42. /* XXX: Try getting the root FS from current thread? */
  43. assert(cur_thread);
  44. assert(cur_thread->root);
  45. if ((ret = path_lookupat(cur_thread->root, "/dev/tty", LOOKUP_OPEN, &dent,
  46. cur_thread->root->fs)) < 0)
  47. return ret;
  48. int flags = (write ? O_WRONLY : O_RDONLY)|O_APPEND;
  49. struct shim_mount * fs = dent->fs;
  50. ret = fs->d_ops->open(hdl, dent, flags);
  51. if (ret < 0)
  52. return ret;
  53. set_handle_fs(hdl, fs);
  54. hdl->dentry = dent;
  55. hdl->flags = O_RDWR|O_APPEND|0100000;
  56. int size;
  57. char * path = dentry_get_path(dent, true, &size);
  58. if (path)
  59. qstrsetstr(&hdl->path, path, size);
  60. else
  61. qstrsetstr(&hdl->path, "/dev/tty", 8);
  62. return 0;
  63. }
  64. static inline int init_exec_handle (struct shim_thread * thread)
  65. {
  66. if (!PAL_CB(executable))
  67. return 0;
  68. struct shim_handle * exec = get_new_handle();
  69. if (!exec)
  70. return -ENOMEM;
  71. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  72. exec->type = TYPE_FILE;
  73. exec->flags = O_RDONLY;
  74. exec->acc_mode = MAY_READ;
  75. struct shim_mount * fs = find_mount_from_uri(PAL_CB(executable));
  76. if (fs) {
  77. path_lookupat(fs->root, PAL_CB(executable) + fs->uri.len, 0,
  78. &exec->dentry, fs);
  79. set_handle_fs(exec, fs);
  80. if (exec->dentry) {
  81. int len;
  82. const char * path = dentry_get_path(exec->dentry, true, &len);
  83. qstrsetstr(&exec->path, path, len);
  84. }
  85. put_mount(fs);
  86. } else {
  87. set_handle_fs(exec, &chroot_builtin_fs);
  88. }
  89. lock(thread->lock);
  90. thread->exec = exec;
  91. unlock(thread->lock);
  92. return 0;
  93. }
  94. static struct shim_handle_map * get_new_handle_map (FDTYPE size);
  95. PAL_HANDLE shim_stdio = NULL;
  96. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  97. struct shim_handle * hdl, int flags);
  98. static struct shim_handle_map * __enlarge_handle_map
  99. (struct shim_handle_map * map, FDTYPE size);
  100. int init_handle (void)
  101. {
  102. create_lock(handle_mgr_lock);
  103. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  104. if (!handle_mgr)
  105. return -ENOMEM;
  106. return 0;
  107. }
  108. int init_important_handles (void)
  109. {
  110. struct shim_thread * thread = get_cur_thread();
  111. if (thread->handle_map)
  112. goto done;
  113. struct shim_handle_map * handle_map = get_cur_handle_map(thread);
  114. if (!handle_map) {
  115. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  116. if (!handle_map)
  117. return -ENOMEM;
  118. set_handle_map(thread, handle_map);
  119. }
  120. lock(handle_map->lock);
  121. if (handle_map->fd_size < 3) {
  122. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  123. unlock(handle_map->lock);
  124. return -ENOMEM;
  125. }
  126. }
  127. struct shim_handle * hdl = NULL;
  128. int ret;
  129. for (int fd = 0 ; fd < 3 ; fd++)
  130. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  131. if (!hdl) {
  132. hdl = get_new_handle();
  133. if (!hdl)
  134. return -ENOMEM;
  135. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  136. put_handle(hdl);
  137. return ret;
  138. }
  139. } else {
  140. get_handle(hdl);
  141. }
  142. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  143. put_handle(hdl);
  144. if (fd != 1)
  145. hdl = NULL;
  146. } else {
  147. if (fd == 1)
  148. hdl = handle_map->map[fd]->handle;
  149. }
  150. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  151. handle_map->fd_top = 2;
  152. unlock(handle_map->lock);
  153. done:
  154. init_exec_handle(thread);
  155. return 0;
  156. }
  157. struct shim_handle * __get_fd_handle (FDTYPE fd, int * flags,
  158. struct shim_handle_map * map)
  159. {
  160. struct shim_fd_handle * fd_handle = NULL;
  161. if (map->fd_top != FD_NULL &&
  162. fd <= map->fd_top) {
  163. fd_handle = map->map[fd];
  164. if (!HANDLE_ALLOCATED(fd_handle))
  165. return NULL;
  166. if (flags)
  167. *flags = fd_handle->flags;
  168. return fd_handle->handle;
  169. }
  170. return NULL;
  171. }
  172. struct shim_handle * get_fd_handle (FDTYPE fd, int * flags,
  173. struct shim_handle_map * map)
  174. {
  175. if (!map)
  176. map = get_cur_handle_map(NULL);
  177. struct shim_handle * hdl = NULL;
  178. lock(map->lock);
  179. if ((hdl = __get_fd_handle(fd, flags, map)))
  180. get_handle(hdl);
  181. unlock(map->lock);
  182. return hdl;
  183. }
  184. struct shim_handle *
  185. __detach_fd_handle (struct shim_fd_handle * fd, int * flags,
  186. struct shim_handle_map * map)
  187. {
  188. struct shim_handle * handle = NULL;
  189. if (HANDLE_ALLOCATED(fd)) {
  190. int vfd = fd->vfd;
  191. handle = fd->handle;
  192. if (flags)
  193. *flags = fd->flags;
  194. fd->vfd = FD_NULL;
  195. fd->handle = NULL;
  196. fd->flags = 0;
  197. if (vfd == map->fd_top)
  198. do {
  199. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  200. vfd--;
  201. } while (vfd >= 0 &&
  202. !HANDLE_ALLOCATED(map->map[vfd]));
  203. }
  204. return handle;
  205. }
  206. struct shim_handle * detach_fd_handle (FDTYPE fd, int * flags,
  207. struct shim_handle_map * handle_map)
  208. {
  209. struct shim_handle * handle = NULL;
  210. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  211. return NULL;
  212. lock(handle_map->lock);
  213. if (fd < handle_map->fd_size)
  214. handle = __detach_fd_handle(handle_map->map[fd], flags,
  215. handle_map);
  216. unlock(handle_map->lock);
  217. return handle;
  218. }
  219. struct shim_handle * get_new_handle (void)
  220. {
  221. struct shim_handle * new_handle =
  222. get_mem_obj_from_mgr_enlarge(handle_mgr,
  223. size_align_up(HANDLE_MGR_ALLOC));
  224. if (!new_handle)
  225. return NULL;
  226. memset(new_handle, 0, sizeof(struct shim_handle));
  227. REF_SET(new_handle->ref_count, 1);
  228. create_lock(new_handle->lock);
  229. new_handle->owner = cur_process.vmid;
  230. INIT_LISTP(&new_handle->epolls);
  231. return new_handle;
  232. }
  233. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  234. struct shim_handle * hdl, int flags)
  235. {
  236. struct shim_fd_handle * new_handle = *fdhdl;
  237. if (!new_handle) {
  238. new_handle = malloc(sizeof(struct shim_fd_handle));
  239. if (!new_handle)
  240. return -ENOMEM;
  241. *fdhdl = new_handle;
  242. }
  243. new_handle->vfd = fd;
  244. new_handle->flags = flags;
  245. open_handle(hdl);
  246. new_handle->handle = hdl;
  247. return 0;
  248. }
  249. int set_new_fd_handle (struct shim_handle * hdl, int flags,
  250. struct shim_handle_map * handle_map)
  251. {
  252. FDTYPE fd = 0;
  253. int new_size = 0;
  254. int ret = 0;
  255. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  256. return -EBADF;
  257. lock(handle_map->lock);
  258. if (!handle_map->map ||
  259. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  260. new_size = INIT_HANDLE_MAP_SIZE;
  261. if (!handle_map->map)
  262. goto extend;
  263. if (handle_map->fd_top != FD_NULL)
  264. do {
  265. ++fd;
  266. if (fd == handle_map->fd_size) {
  267. new_size = handle_map->fd_size < new_size ? new_size :
  268. handle_map->fd_size * 2;
  269. extend:
  270. if (!__enlarge_handle_map(handle_map, new_size)) {
  271. ret = -ENOMEM;
  272. goto out;
  273. }
  274. }
  275. } while (handle_map->fd_top != FD_NULL &&
  276. fd <= handle_map->fd_top &&
  277. HANDLE_ALLOCATED(handle_map->map[fd]));
  278. if (handle_map->fd_top == FD_NULL ||
  279. fd > handle_map->fd_top)
  280. handle_map->fd_top = fd;
  281. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  282. if (ret < 0) {
  283. if (fd == handle_map->fd_top)
  284. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  285. } else
  286. ret = fd;
  287. out:
  288. unlock(handle_map->lock);
  289. return ret;
  290. }
  291. int set_new_fd_handle_by_fd (FDTYPE fd, struct shim_handle * hdl, int flags,
  292. struct shim_handle_map * handle_map)
  293. {
  294. int new_size = 0;
  295. int ret = 0;
  296. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  297. return -EBADF;
  298. lock(handle_map->lock);
  299. if (!handle_map->map ||
  300. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  301. new_size = INIT_HANDLE_MAP_SIZE;
  302. if (!handle_map->map)
  303. goto extend;
  304. if (fd >= handle_map->fd_size) {
  305. new_size = handle_map->fd_size < new_size ? new_size :
  306. handle_map->fd_size;
  307. extend:
  308. while (new_size <= fd)
  309. new_size *= 2;
  310. if (!__enlarge_handle_map(handle_map, new_size)) {
  311. ret = -ENOMEM;
  312. goto out;
  313. }
  314. }
  315. if (handle_map->fd_top != FD_NULL &&
  316. fd <= handle_map->fd_top &&
  317. HANDLE_ALLOCATED(handle_map->map[fd])) {
  318. ret = -EBADF;
  319. goto out;
  320. }
  321. if (handle_map->fd_top == FD_NULL ||
  322. fd > handle_map->fd_top)
  323. handle_map->fd_top = fd;
  324. struct shim_fd_handle * new_handle = handle_map->map[fd];
  325. if (!new_handle) {
  326. new_handle = malloc(sizeof(struct shim_fd_handle));
  327. if (!new_handle) {
  328. ret = -ENOMEM;
  329. goto out;
  330. }
  331. handle_map->map[fd] = new_handle;
  332. }
  333. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  334. if (ret < 0) {
  335. if (fd == handle_map->fd_top)
  336. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  337. } else
  338. ret = fd;
  339. out:
  340. unlock(handle_map->lock);
  341. return ret;
  342. }
  343. void flush_handle (struct shim_handle * hdl)
  344. {
  345. if (hdl->fs && hdl->fs->fs_ops &&
  346. hdl->fs->fs_ops->flush)
  347. hdl->fs->fs_ops->flush(hdl);
  348. }
  349. static inline __attribute__((unused))
  350. const char * __handle_name (struct shim_handle * hdl)
  351. {
  352. if (!qstrempty(&hdl->path))
  353. return qstrgetstr(&hdl->path);
  354. if (!qstrempty(&hdl->uri))
  355. return qstrgetstr(&hdl->uri);
  356. if (hdl->fs_type[0])
  357. return hdl->fs_type;
  358. return "(unknown)";
  359. }
  360. void open_handle (struct shim_handle * hdl)
  361. {
  362. get_handle(hdl);
  363. #ifdef DEBUG_REF
  364. int opened = REF_INC(hdl->opened);
  365. debug("open handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  366. opened);
  367. #else
  368. REF_INC(hdl->opened);
  369. #endif
  370. }
  371. extern int delete_from_epoll_handles (struct shim_handle * handle);
  372. void close_handle (struct shim_handle * hdl)
  373. {
  374. int opened = REF_DEC(hdl->opened);
  375. #ifdef DEBUG_REF
  376. debug("close handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  377. opened);
  378. #endif
  379. if (!opened) {
  380. if (hdl->type == TYPE_DIR) {
  381. struct shim_dir_handle * dir = &hdl->info.dir;
  382. if (dir->dot) {
  383. put_dentry(dir->dot);
  384. dir->dot = NULL;
  385. }
  386. if (dir->dotdot) {
  387. put_dentry(dir->dotdot);
  388. dir->dotdot = NULL;
  389. }
  390. if (dir->ptr != (void *) -1) {
  391. while (dir->ptr && *dir->ptr) {
  392. struct shim_dentry * dent = *dir->ptr;
  393. put_dentry(dent);
  394. *(dir->ptr++) = NULL;
  395. }
  396. }
  397. } else {
  398. if (hdl->fs && hdl->fs->fs_ops &&
  399. hdl->fs->fs_ops->close)
  400. hdl->fs->fs_ops->close(hdl);
  401. }
  402. delete_from_epoll_handles(hdl);
  403. }
  404. put_handle(hdl);
  405. }
  406. void get_handle (struct shim_handle * hdl)
  407. {
  408. #ifdef DEBUG_REF
  409. int ref_count = REF_INC(hdl->ref_count);
  410. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  411. ref_count);
  412. #else
  413. REF_INC(hdl->ref_count);
  414. #endif
  415. }
  416. static void destroy_handle (struct shim_handle * hdl)
  417. {
  418. destroy_lock(hdl->lock);
  419. if (MEMORY_MIGRATED(hdl))
  420. memset(hdl, 0, sizeof(struct shim_handle));
  421. else
  422. free_mem_obj_to_mgr(handle_mgr, hdl);
  423. }
  424. void put_handle (struct shim_handle * hdl)
  425. {
  426. int ref_count = REF_DEC(hdl->ref_count);
  427. #ifdef DEBUG_REF
  428. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  429. ref_count);
  430. #endif
  431. if (!ref_count) {
  432. if (hdl->fs && hdl->fs->fs_ops &&
  433. hdl->fs->fs_ops->hput)
  434. hdl->fs->fs_ops->hput(hdl);
  435. qstrfree(&hdl->path);
  436. qstrfree(&hdl->uri);
  437. if (hdl->pal_handle) {
  438. #ifdef DEBUG_REF
  439. debug("handle %p closes PAL handle %p\n", hdl, hdl->pal_handle);
  440. #endif
  441. DkObjectClose(hdl->pal_handle);
  442. hdl->pal_handle = NULL;
  443. }
  444. if (hdl->dentry)
  445. put_dentry(hdl->dentry);
  446. if (hdl->fs)
  447. put_mount(hdl->fs);
  448. destroy_handle(hdl);
  449. }
  450. }
  451. size_t get_file_size (struct shim_handle * hdl)
  452. {
  453. if (!hdl->fs || !hdl->fs->fs_ops)
  454. return -EINVAL;
  455. if (hdl->fs->fs_ops->poll)
  456. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  457. if (hdl->fs->fs_ops->hstat) {
  458. struct stat stat;
  459. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  460. if (ret < 0)
  461. return ret;
  462. return stat.st_size;
  463. }
  464. return 0;
  465. }
  466. void dup_fd_handle (struct shim_handle_map * map,
  467. const struct shim_fd_handle * old,
  468. struct shim_fd_handle * new)
  469. {
  470. struct shim_handle * replaced = NULL;
  471. lock(map->lock);
  472. if (old->vfd != FD_NULL) {
  473. open_handle(old->handle);
  474. replaced = new->handle;
  475. new->handle = old->handle;
  476. }
  477. unlock(map->lock);
  478. if (replaced)
  479. close_handle(replaced);
  480. }
  481. static struct shim_handle_map * get_new_handle_map (FDTYPE size)
  482. {
  483. struct shim_handle_map * handle_map =
  484. calloc(1, sizeof(struct shim_handle_map));
  485. if (!handle_map)
  486. return NULL;
  487. handle_map->map = calloc(size, sizeof(struct shim_fd_handle));
  488. if (!handle_map->map) {
  489. free(handle_map);
  490. return NULL;
  491. }
  492. handle_map->fd_top = FD_NULL;
  493. handle_map->fd_size = size;
  494. create_lock(handle_map->lock);
  495. return handle_map;
  496. }
  497. static struct shim_handle_map * __enlarge_handle_map
  498. (struct shim_handle_map * map, FDTYPE size)
  499. {
  500. if (size <= map->fd_size)
  501. return map;
  502. struct shim_fd_handle ** new_map = calloc(size, sizeof(new_map[0]));
  503. if (!new_map)
  504. return NULL;
  505. memcpy(new_map, map->map, map->fd_size * sizeof(new_map[0]));
  506. memset(new_map + map->fd_size, 0,
  507. (size - map->fd_size) * sizeof(new_map[0]));
  508. free(map->map);
  509. map->map = new_map;
  510. map->fd_size = size;
  511. return map;
  512. }
  513. int dup_handle_map (struct shim_handle_map ** new,
  514. struct shim_handle_map * old_map)
  515. {
  516. lock(old_map->lock);
  517. /* allocate a new handle mapping with the same size as
  518. the old one */
  519. struct shim_handle_map * new_map =
  520. get_new_handle_map(old_map->fd_size);
  521. new_map->fd_top = old_map->fd_top;
  522. if (old_map->fd_top == FD_NULL)
  523. goto done;
  524. for (int i = 0; i <= old_map->fd_top; i++) {
  525. struct shim_fd_handle * fd_old = old_map->map[i];
  526. struct shim_fd_handle * fd_new;
  527. /* now we go through the handle map and reassign each
  528. of them being allocated */
  529. if (HANDLE_ALLOCATED(fd_old)) {
  530. /* first, get the handle to prevent it from being deleted */
  531. struct shim_handle * hdl = fd_old->handle;
  532. open_handle(hdl);
  533. fd_new = malloc(sizeof(struct shim_fd_handle));
  534. if (!fd_new) {
  535. for (int j = 0; j < i; j++) {
  536. close_handle(new_map->map[j]->handle);
  537. free(new_map->map[j]);
  538. }
  539. unlock(old_map->lock);
  540. *new = NULL;
  541. return -ENOMEM;
  542. }
  543. /* DP: I assume we really need a deep copy of the handle map? */
  544. new_map->map[i] = fd_new;
  545. fd_new->vfd = fd_old->vfd;
  546. fd_new->handle = hdl;
  547. fd_new->flags = fd_old->flags;
  548. }
  549. }
  550. done:
  551. unlock(old_map->lock);
  552. *new = new_map;
  553. return 0;
  554. }
  555. void get_handle_map (struct shim_handle_map * map)
  556. {
  557. REF_INC(map->ref_count);
  558. }
  559. void put_handle_map (struct shim_handle_map * map)
  560. {
  561. int ref_count = REF_DEC(map->ref_count);
  562. if (!ref_count) {
  563. if (map->fd_top == FD_NULL)
  564. goto done;
  565. for (int i = 0 ; i <= map->fd_top ; i++) {
  566. if (!map->map[i])
  567. continue;
  568. if (map->map[i]->vfd != FD_NULL) {
  569. struct shim_handle * handle = map->map[i]->handle;
  570. if (handle)
  571. close_handle(handle);
  572. }
  573. free(map->map[i]);
  574. }
  575. done:
  576. destroy_lock(map->lock);
  577. free(map->map);
  578. free(map);
  579. }
  580. }
  581. int flush_handle_map (struct shim_handle_map * map)
  582. {
  583. get_handle_map(map);
  584. lock(map->lock);
  585. if (map->fd_top == FD_NULL)
  586. goto done;
  587. /* now we go through the handle map and flush each handle */
  588. for (int i = 0 ; i <= map->fd_top ; i++) {
  589. if (!HANDLE_ALLOCATED(map->map[i]))
  590. continue;
  591. struct shim_handle * handle = map->map[i]->handle;
  592. if (handle)
  593. flush_handle(handle);
  594. }
  595. done:
  596. unlock(map->lock);
  597. put_handle_map(map);
  598. return 0;
  599. }
  600. int walk_handle_map (int (*callback) (struct shim_fd_handle *,
  601. struct shim_handle_map *, void *),
  602. struct shim_handle_map * map, void * arg)
  603. {
  604. int ret = 0;
  605. lock(map->lock);
  606. if (map->fd_top == FD_NULL)
  607. goto done;
  608. for (int i = 0 ; i <= map->fd_top ; i++) {
  609. if (!HANDLE_ALLOCATED(map->map[i]))
  610. continue;
  611. if ((ret = (*callback) (map->map[i], map, arg)) < 0)
  612. break;
  613. }
  614. done:
  615. unlock(map->lock);
  616. return ret;
  617. }
  618. BEGIN_CP_FUNC(handle)
  619. {
  620. assert(size == sizeof(struct shim_handle));
  621. struct shim_handle * hdl = (struct shim_handle *) obj;
  622. struct shim_handle * new_hdl = NULL;
  623. ptr_t off = GET_FROM_CP_MAP(obj);
  624. if (!off) {
  625. off = ADD_CP_OFFSET(sizeof(struct shim_handle));
  626. ADD_TO_CP_MAP(obj, off);
  627. new_hdl = (struct shim_handle *) (base + off);
  628. lock(hdl->lock);
  629. struct shim_mount * fs = hdl->fs;
  630. *new_hdl = *hdl;
  631. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  632. fs->fs_ops->checkout(new_hdl);
  633. new_hdl->dentry = NULL;
  634. REF_SET(new_hdl->opened, 0);
  635. REF_SET(new_hdl->ref_count, 0);
  636. clear_lock(new_hdl->lock);
  637. DO_CP_IN_MEMBER(qstr, new_hdl, path);
  638. DO_CP_IN_MEMBER(qstr, new_hdl, uri);
  639. if (fs && hdl->dentry) {
  640. DO_CP_MEMBER(mount, hdl, new_hdl, fs);
  641. } else {
  642. new_hdl->fs = NULL;
  643. }
  644. if (hdl->dentry)
  645. DO_CP_MEMBER(dentry, hdl, new_hdl, dentry);
  646. if (new_hdl->pal_handle) {
  647. struct shim_palhdl_entry * entry;
  648. DO_CP(palhdl, hdl->pal_handle, &entry);
  649. entry->uri = &new_hdl->uri;
  650. entry->phandle = &new_hdl->pal_handle;
  651. }
  652. if (hdl->type == TYPE_EPOLL)
  653. DO_CP(epoll_fd, &hdl->info.epoll.fds, &new_hdl->info.epoll.fds);
  654. INIT_LISTP(&new_hdl->epolls);
  655. unlock(hdl->lock);
  656. ADD_CP_FUNC_ENTRY(off);
  657. } else {
  658. new_hdl = (struct shim_handle *) (base + off);
  659. }
  660. if (objp)
  661. *objp = (void *) new_hdl;
  662. }
  663. END_CP_FUNC(handle)
  664. BEGIN_RS_FUNC(handle)
  665. {
  666. struct shim_handle * hdl = (void *) (base + GET_CP_FUNC_ENTRY());
  667. CP_REBASE(hdl->fs);
  668. CP_REBASE(hdl->dentry);
  669. CP_REBASE(hdl->epolls);
  670. create_lock(hdl->lock);
  671. if (!hdl->fs) {
  672. assert(hdl->fs_type);
  673. search_builtin_fs(hdl->fs_type, &hdl->fs);
  674. if (!hdl->fs)
  675. return -EINVAL;
  676. }
  677. if (hdl->fs && hdl->fs->fs_ops &&
  678. hdl->fs->fs_ops->checkin)
  679. hdl->fs->fs_ops->checkin(hdl);
  680. DEBUG_RS("path=%s,type=%s,uri=%s,flags=%03o",
  681. qstrgetstr(&hdl->path), hdl->fs_type, qstrgetstr(&hdl->uri),
  682. hdl->flags);
  683. }
  684. END_RS_FUNC(handle)
  685. BEGIN_CP_FUNC(fd_handle)
  686. {
  687. assert(size == sizeof(struct shim_fd_handle));
  688. struct shim_fd_handle * fdhdl = (struct shim_fd_handle *) obj;
  689. struct shim_fd_handle * new_fdhdl = NULL;
  690. ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_fd_handle));
  691. new_fdhdl = (struct shim_fd_handle *) (base + off);
  692. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  693. DO_CP(handle, fdhdl->handle, &new_fdhdl->handle);
  694. ADD_CP_FUNC_ENTRY(off);
  695. if (objp)
  696. *objp = (void *) new_fdhdl;
  697. }
  698. END_CP_FUNC_NO_RS(fd_handle)
  699. BEGIN_CP_FUNC(handle_map)
  700. {
  701. assert(size >= sizeof(struct shim_handle_map));
  702. struct shim_handle_map * handle_map = (struct shim_handle_map *) obj;
  703. struct shim_handle_map * new_handle_map = NULL;
  704. struct shim_fd_handle ** ptr_array;
  705. lock(handle_map->lock);
  706. int fd_size = handle_map->fd_top != FD_NULL ?
  707. handle_map->fd_top + 1 : 0;
  708. size = sizeof(struct shim_handle_map) +
  709. (sizeof(struct shim_fd_handle *) * fd_size);
  710. ptr_t off = GET_FROM_CP_MAP(obj);
  711. if (!off) {
  712. off = ADD_CP_OFFSET(size);
  713. new_handle_map = (struct shim_handle_map *) (base + off);
  714. memcpy(new_handle_map, handle_map,
  715. sizeof(struct shim_handle_map));
  716. ptr_array = (void *) new_handle_map + sizeof(struct shim_handle_map);
  717. new_handle_map->fd_size = fd_size;
  718. new_handle_map->map = fd_size ? ptr_array : NULL;
  719. REF_SET(new_handle_map->ref_count, 0);
  720. clear_lock(new_handle_map->lock);
  721. for (int i = 0 ; i < fd_size ; i++) {
  722. if (HANDLE_ALLOCATED(handle_map->map[i]))
  723. DO_CP(fd_handle, handle_map->map[i], &ptr_array[i]);
  724. else
  725. ptr_array[i] = NULL;
  726. }
  727. ADD_CP_FUNC_ENTRY(off);
  728. } else {
  729. new_handle_map = (struct shim_handle_map *) (base + off);
  730. }
  731. unlock(handle_map->lock);
  732. if (objp)
  733. *objp = (void *) new_handle_map;
  734. }
  735. END_CP_FUNC(handle_map)
  736. BEGIN_RS_FUNC(handle_map)
  737. {
  738. struct shim_handle_map * handle_map = (void *) (base + GET_CP_FUNC_ENTRY());
  739. CP_REBASE(handle_map->map);
  740. assert(handle_map->map);
  741. DEBUG_RS("size=%d,top=%d", handle_map->fd_size, handle_map->fd_top);
  742. create_lock(handle_map->lock);
  743. lock(handle_map->lock);
  744. if (handle_map->fd_top != FD_NULL)
  745. for (int i = 0 ; i <= handle_map->fd_top ; i++) {
  746. CP_REBASE(handle_map->map[i]);
  747. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  748. CP_REBASE(handle_map->map[i]->handle);
  749. struct shim_handle * hdl = handle_map->map[i]->handle;
  750. assert(hdl);
  751. open_handle(hdl);
  752. DEBUG_RS("[%d]%s", i, qstrempty(&hdl->uri) ? hdl->fs_type :
  753. qstrgetstr(&hdl->uri));
  754. }
  755. }
  756. unlock(handle_map->lock);
  757. }
  758. END_RS_FUNC(handle_map)