shim_handle.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_handle.c
  17. *
  18. * This file contains codes to maintain bookkeeping for handles in library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_checkpoint.h>
  24. #include <shim_fs.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. #include <fcntl.h>
  28. static LOCKTYPE handle_mgr_lock;
  29. #define HANDLE_MGR_ALLOC 32
  30. #define system_lock() lock(handle_mgr_lock)
  31. #define system_unlock() unlock(handle_mgr_lock)
  32. #define PAGE_SIZE allocsize
  33. #define OBJ_TYPE struct shim_handle
  34. #include <memmgr.h>
  35. static MEM_MGR handle_mgr = NULL;
  36. #define INIT_HANDLE_MAP_SIZE 32
  37. //#define DEBUG_REF
  38. static inline int init_tty_handle (struct shim_handle * hdl, bool write)
  39. {
  40. struct shim_dentry * dent = NULL;
  41. int ret;
  42. if ((ret = path_lookupat(NULL, "/dev/tty", LOOKUP_OPEN, &dent)) < 0)
  43. return ret;
  44. int flags = (write ? O_WRONLY : O_RDONLY)|O_APPEND;
  45. struct shim_mount * fs = dent->fs;
  46. ret = fs->d_ops->open(hdl, dent, flags);
  47. if (ret < 0)
  48. return ret;
  49. set_handle_fs(hdl, fs);
  50. hdl->dentry = dent;
  51. hdl->flags = O_RDWR|O_APPEND|0100000;
  52. int size;
  53. char * path = dentry_get_path(dent, true, &size);
  54. if (path)
  55. qstrsetstr(&hdl->path, path, size);
  56. else
  57. qstrsetstr(&hdl->path, "/dev/tty", 8);
  58. return 0;
  59. }
  60. static inline int init_exec_handle (struct shim_thread * thread)
  61. {
  62. if (!PAL_CB(executable))
  63. return 0;
  64. struct shim_handle * exec = get_new_handle();
  65. if (!exec)
  66. return -ENOMEM;
  67. set_handle_fs(exec, &chroot_builtin_fs);
  68. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  69. exec->type = TYPE_FILE;
  70. exec->flags = O_RDONLY;
  71. exec->acc_mode = MAY_READ;
  72. lock(thread->lock);
  73. thread->exec = exec;
  74. unlock(thread->lock);
  75. return 0;
  76. }
  77. static struct shim_handle_map * get_new_handle_map (FDTYPE size);
  78. PAL_HANDLE shim_stdio = NULL;
  79. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  80. struct shim_handle * hdl, int flags);
  81. static struct shim_handle_map * __enlarge_handle_map
  82. (struct shim_handle_map * map, FDTYPE size);
  83. int init_handle (void)
  84. {
  85. create_lock(handle_mgr_lock);
  86. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  87. if (!handle_mgr)
  88. return -ENOMEM;
  89. return 0;
  90. }
  91. int init_important_handles (void)
  92. {
  93. struct shim_thread * thread = get_cur_thread();
  94. if (thread->handle_map)
  95. goto done;
  96. struct shim_handle_map * handle_map = get_cur_handle_map(thread);
  97. if (!handle_map) {
  98. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  99. if (!handle_map)
  100. return -ENOMEM;
  101. set_handle_map(thread, handle_map);
  102. }
  103. lock(handle_map->lock);
  104. if (handle_map->fd_size < 3) {
  105. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  106. unlock(handle_map->lock);
  107. return -ENOMEM;
  108. }
  109. }
  110. struct shim_handle * hdl = NULL;
  111. int ret;
  112. for (int fd = 0 ; fd < 3 ; fd++)
  113. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  114. if (!hdl) {
  115. hdl = get_new_handle();
  116. if (!hdl)
  117. return -ENOMEM;
  118. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  119. put_handle(hdl);
  120. return ret;
  121. }
  122. } else {
  123. get_handle(hdl);
  124. }
  125. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  126. put_handle(hdl);
  127. if (fd != 1)
  128. hdl = NULL;
  129. } else {
  130. if (fd == 1)
  131. hdl = handle_map->map[fd]->handle;
  132. }
  133. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  134. handle_map->fd_top = 2;
  135. unlock(handle_map->lock);
  136. done:
  137. init_exec_handle(thread);
  138. return 0;
  139. }
  140. struct shim_handle * __get_fd_handle (FDTYPE fd, int * flags,
  141. struct shim_handle_map * map)
  142. {
  143. struct shim_fd_handle * fd_handle = NULL;
  144. if (map->fd_top != FD_NULL &&
  145. fd <= map->fd_top) {
  146. fd_handle = map->map[fd];
  147. if (!HANDLE_ALLOCATED(fd_handle))
  148. return NULL;
  149. if (flags)
  150. *flags = fd_handle->flags;
  151. return fd_handle->handle;
  152. }
  153. return NULL;
  154. }
  155. struct shim_handle * get_fd_handle (FDTYPE fd, int * flags,
  156. struct shim_handle_map * map)
  157. {
  158. if (!map)
  159. map = get_cur_handle_map(NULL);
  160. struct shim_handle * hdl = NULL;
  161. lock(map->lock);
  162. if ((hdl = __get_fd_handle(fd, flags, map)))
  163. get_handle(hdl);
  164. unlock(map->lock);
  165. return hdl;
  166. }
  167. struct shim_handle *
  168. __detach_fd_handle (struct shim_fd_handle * fd, int * flags,
  169. struct shim_handle_map * map)
  170. {
  171. struct shim_handle * handle = NULL;
  172. if (HANDLE_ALLOCATED(fd)) {
  173. int vfd = fd->vfd;
  174. handle = fd->handle;
  175. if (flags)
  176. *flags = fd->flags;
  177. fd->vfd = FD_NULL;
  178. fd->handle = NULL;
  179. fd->flags = 0;
  180. if (vfd == map->fd_top)
  181. do {
  182. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  183. vfd--;
  184. } while (vfd >= 0 &&
  185. !HANDLE_ALLOCATED(map->map[vfd]));
  186. }
  187. return handle;
  188. }
  189. struct shim_handle * detach_fd_handle (FDTYPE fd, int * flags,
  190. struct shim_handle_map * handle_map)
  191. {
  192. struct shim_handle * handle = NULL;
  193. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  194. return NULL;
  195. lock(handle_map->lock);
  196. if (fd < handle_map->fd_size)
  197. handle = __detach_fd_handle(handle_map->map[fd], flags,
  198. handle_map);
  199. unlock(handle_map->lock);
  200. return handle;
  201. }
  202. struct shim_handle * get_new_handle (void)
  203. {
  204. struct shim_handle * new_handle =
  205. get_mem_obj_from_mgr_enlarge(handle_mgr,
  206. size_align_up(HANDLE_MGR_ALLOC));
  207. if (!new_handle)
  208. return NULL;
  209. memset(new_handle, 0, sizeof(struct shim_handle));
  210. REF_SET(new_handle->ref_count, 1);
  211. create_lock(new_handle->lock);
  212. new_handle->owner = cur_process.vmid;
  213. return new_handle;
  214. }
  215. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  216. struct shim_handle * hdl, int flags)
  217. {
  218. struct shim_fd_handle * new_handle = *fdhdl;
  219. if (!new_handle) {
  220. new_handle = malloc(sizeof(struct shim_fd_handle));
  221. if (!new_handle)
  222. return -ENOMEM;
  223. *fdhdl = new_handle;
  224. }
  225. new_handle->vfd = fd;
  226. new_handle->flags = flags;
  227. open_handle(hdl);
  228. new_handle->handle = hdl;
  229. return 0;
  230. }
  231. int set_new_fd_handle (struct shim_handle * hdl, int flags,
  232. struct shim_handle_map * handle_map)
  233. {
  234. FDTYPE fd = 0;
  235. int new_size = 0;
  236. int ret = 0;
  237. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  238. return -EBADF;
  239. lock(handle_map->lock);
  240. if (!handle_map->map ||
  241. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  242. new_size = INIT_HANDLE_MAP_SIZE;
  243. if (!handle_map->map)
  244. goto extend;
  245. if (handle_map->fd_top != FD_NULL)
  246. do {
  247. ++fd;
  248. if (fd == handle_map->fd_size) {
  249. new_size = handle_map->fd_size < new_size ? new_size :
  250. handle_map->fd_size * 2;
  251. extend:
  252. if (!__enlarge_handle_map(handle_map, new_size)) {
  253. ret = -ENOMEM;
  254. goto out;
  255. }
  256. }
  257. } while (handle_map->fd_top != FD_NULL &&
  258. fd <= handle_map->fd_top &&
  259. HANDLE_ALLOCATED(handle_map->map[fd]));
  260. if (handle_map->fd_top == FD_NULL ||
  261. fd > handle_map->fd_top)
  262. handle_map->fd_top = fd;
  263. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  264. if (ret < 0) {
  265. if (fd == handle_map->fd_top)
  266. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  267. } else
  268. ret = fd;
  269. out:
  270. unlock(handle_map->lock);
  271. return ret;
  272. }
  273. int set_new_fd_handle_by_fd (FDTYPE fd, struct shim_handle * hdl, int flags,
  274. struct shim_handle_map * handle_map)
  275. {
  276. int new_size = 0;
  277. int ret = 0;
  278. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  279. return -EBADF;
  280. lock(handle_map->lock);
  281. if (!handle_map->map ||
  282. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  283. new_size = INIT_HANDLE_MAP_SIZE;
  284. if (!handle_map->map)
  285. goto extend;
  286. if (fd >= handle_map->fd_size) {
  287. new_size = handle_map->fd_size < new_size ? new_size :
  288. handle_map->fd_size;
  289. extend:
  290. while (new_size <= fd)
  291. new_size *= 2;
  292. if (!__enlarge_handle_map(handle_map, new_size)) {
  293. ret = -ENOMEM;
  294. goto out;
  295. }
  296. }
  297. if (handle_map->fd_top != FD_NULL &&
  298. fd <= handle_map->fd_top &&
  299. HANDLE_ALLOCATED(handle_map->map[fd])) {
  300. ret = -EBADF;
  301. goto out;
  302. }
  303. if (handle_map->fd_top == FD_NULL ||
  304. fd > handle_map->fd_top)
  305. handle_map->fd_top = fd;
  306. struct shim_fd_handle * new_handle = handle_map->map[fd];
  307. if (!new_handle) {
  308. new_handle = malloc(sizeof(struct shim_fd_handle));
  309. if (!new_handle) {
  310. ret = -ENOMEM;
  311. goto out;
  312. }
  313. handle_map->map[fd] = new_handle;
  314. }
  315. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  316. if (ret < 0) {
  317. if (fd == handle_map->fd_top)
  318. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  319. } else
  320. ret = fd;
  321. out:
  322. unlock(handle_map->lock);
  323. return fd;
  324. }
  325. void flush_handle (struct shim_handle * hdl)
  326. {
  327. if (hdl->fs && hdl->fs->fs_ops &&
  328. hdl->fs->fs_ops->flush)
  329. hdl->fs->fs_ops->flush(hdl);
  330. }
  331. static inline __attribute__((unused))
  332. const char * __handle_name (struct shim_handle * hdl)
  333. {
  334. if (!qstrempty(&hdl->path))
  335. return qstrgetstr(&hdl->path);
  336. if (!qstrempty(&hdl->uri))
  337. return qstrgetstr(&hdl->uri);
  338. if (hdl->fs_type[0])
  339. return hdl->fs_type;
  340. return "(unknown)";
  341. }
  342. void open_handle (struct shim_handle * hdl)
  343. {
  344. get_handle(hdl);
  345. #ifdef DEBUG_REF
  346. int opened = REF_INC(hdl->opened);
  347. debug("open handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  348. opened);
  349. #else
  350. REF_INC(hdl->opened);
  351. #endif
  352. }
  353. void close_handle (struct shim_handle * hdl)
  354. {
  355. int opened = REF_DEC(hdl->opened);
  356. #ifdef DEBUG_REF
  357. debug("close handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  358. opened);
  359. #endif
  360. if (!opened) {
  361. if (hdl->type == TYPE_DIR) {
  362. struct shim_dir_handle * dir = &hdl->info.dir;
  363. if (dir->dot) {
  364. put_dentry(dir->dot);
  365. dir->dot = NULL;
  366. }
  367. if (dir->dotdot) {
  368. put_dentry(dir->dotdot);
  369. dir->dotdot = NULL;
  370. }
  371. while (*dir->ptr) {
  372. struct shim_dentry * dent = *dir->ptr;
  373. put_dentry(dent);
  374. *(dir->ptr++) = NULL;
  375. }
  376. } else {
  377. if (hdl->fs && hdl->fs->fs_ops &&
  378. hdl->fs->fs_ops->close)
  379. hdl->fs->fs_ops->close(hdl);
  380. }
  381. }
  382. put_handle(hdl);
  383. }
  384. void get_handle (struct shim_handle * hdl)
  385. {
  386. #ifdef DEBUG_REF
  387. int ref_count = REF_INC(hdl->ref_count);
  388. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  389. ref_count);
  390. #else
  391. REF_INC(hdl->ref_count);
  392. #endif
  393. }
  394. static void destroy_handle (struct shim_handle * hdl)
  395. {
  396. destroy_lock(hdl->lock);
  397. if (MEMORY_MIGRATED(hdl))
  398. memset(hdl, 0, sizeof(struct shim_handle));
  399. else
  400. free_mem_obj_to_mgr(handle_mgr, hdl);
  401. }
  402. void put_handle (struct shim_handle * hdl)
  403. {
  404. int ref_count = REF_DEC(hdl->ref_count);
  405. #ifdef DEBUG_REF
  406. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  407. ref_count);
  408. #endif
  409. if (!ref_count) {
  410. if (hdl->fs && hdl->fs->fs_ops &&
  411. hdl->fs->fs_ops->hput)
  412. hdl->fs->fs_ops->hput(hdl);
  413. qstrfree(&hdl->path);
  414. qstrfree(&hdl->uri);
  415. if (hdl->pal_handle)
  416. DkObjectClose(hdl->pal_handle);
  417. if (hdl->dentry)
  418. put_dentry(hdl->dentry);
  419. if (hdl->fs)
  420. put_mount(hdl->fs);
  421. destroy_handle(hdl);
  422. }
  423. }
  424. size_t get_file_size (struct shim_handle * hdl)
  425. {
  426. if (!hdl->fs || !hdl->fs->fs_ops)
  427. return -EINVAL;
  428. if (hdl->fs->fs_ops->poll)
  429. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  430. if (hdl->fs->fs_ops->hstat) {
  431. struct stat stat;
  432. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  433. if (ret < 0)
  434. return ret;
  435. return stat.st_size;
  436. }
  437. return 0;
  438. }
  439. void dup_fd_handle (struct shim_handle_map * map,
  440. const struct shim_fd_handle * old,
  441. struct shim_fd_handle * new)
  442. {
  443. struct shim_handle * replaced = NULL;
  444. lock(map->lock);
  445. if (old->vfd != FD_NULL) {
  446. open_handle(old->handle);
  447. replaced = new->handle;
  448. new->handle = old->handle;
  449. }
  450. unlock(map->lock);
  451. if (replaced)
  452. close_handle(replaced);
  453. }
  454. static struct shim_handle_map * get_new_handle_map (FDTYPE size)
  455. {
  456. struct shim_handle_map * handle_map =
  457. malloc(sizeof(struct shim_handle_map));
  458. if (handle_map == NULL)
  459. return NULL;
  460. memset(handle_map, 0, sizeof(struct shim_handle_map));
  461. handle_map->map = malloc(sizeof(struct shim_fd_handle) * size);
  462. if (handle_map->map == NULL) {
  463. free(handle_map);
  464. return NULL;
  465. }
  466. memset(handle_map->map, 0,
  467. sizeof(struct shim_fd_handle) * size);
  468. handle_map->fd_top = FD_NULL;
  469. handle_map->fd_size = size;
  470. create_lock(handle_map->lock);
  471. return handle_map;
  472. }
  473. static struct shim_handle_map * __enlarge_handle_map
  474. (struct shim_handle_map * map, FDTYPE size)
  475. {
  476. if (size <= map->fd_size)
  477. return NULL;
  478. struct shim_fd_handle ** old_map = map->map;
  479. map->map = malloc(sizeof(struct shim_fd_handle *) * size);
  480. if (map->map == NULL) {
  481. map->map = old_map;
  482. return NULL;
  483. }
  484. size_t copy_size = sizeof(struct shim_fd_handle *) * map->fd_size;
  485. map->fd_size = size;
  486. if (old_map && copy_size)
  487. memcpy(map->map, old_map, copy_size);
  488. memset(&map->map[map->fd_size], 0,
  489. (sizeof(struct shim_fd_handle *) * size) - copy_size);
  490. if (old_map)
  491. free(old_map);
  492. return map;
  493. }
  494. int dup_handle_map (struct shim_handle_map ** new,
  495. struct shim_handle_map * old_map)
  496. {
  497. lock(old_map->lock);
  498. /* allocate a new handle mapping with the same size as
  499. the old one */
  500. struct shim_handle_map * new_map =
  501. get_new_handle_map(old_map->fd_size);
  502. new_map->fd_top = old_map->fd_top;
  503. if (old_map->fd_top == FD_NULL)
  504. goto done;
  505. for (int i = 0 ; i <= old_map->fd_top ; i++) {
  506. struct shim_fd_handle * fd_old = old_map->map[i];
  507. struct shim_fd_handle * fd_new;
  508. /* now we go through the handle map and reassign each
  509. of them being allocated */
  510. if (HANDLE_ALLOCATED(fd_old)) {
  511. /* first, get the handle to prevent it from being deleted */
  512. struct shim_handle * hdl = fd_old->handle;
  513. open_handle(hdl);
  514. /* DP: I assume we really need a deep copy of the handle map? */
  515. fd_new = malloc(sizeof(struct shim_fd_handle));
  516. new_map->map[i] = fd_new;
  517. fd_new->vfd = fd_old->vfd;
  518. fd_new->handle = hdl;
  519. fd_new->flags = fd_old->flags;
  520. }
  521. }
  522. done:
  523. unlock(old_map->lock);
  524. *new = new_map;
  525. return 0;
  526. }
  527. void get_handle_map (struct shim_handle_map * map)
  528. {
  529. REF_INC(map->ref_count);
  530. }
  531. void put_handle_map (struct shim_handle_map * map)
  532. {
  533. int ref_count = REF_DEC(map->ref_count);
  534. if (!ref_count) {
  535. if (map->fd_top == FD_NULL)
  536. goto done;
  537. for (int i = 0 ; i <= map->fd_top ; i++) {
  538. if (!map->map[i])
  539. continue;
  540. if (map->map[i]->vfd != FD_NULL) {
  541. struct shim_handle * handle = map->map[i]->handle;
  542. if (handle)
  543. close_handle(handle);
  544. }
  545. free(map->map[i]);
  546. }
  547. done:
  548. destroy_lock(map->lock);
  549. free(map->map);
  550. free(map);
  551. }
  552. }
  553. int flush_handle_map (struct shim_handle_map * map)
  554. {
  555. get_handle_map(map);
  556. lock(map->lock);
  557. if (map->fd_top == FD_NULL)
  558. goto done;
  559. /* now we go through the handle map and flush each handle */
  560. for (int i = 0 ; i <= map->fd_top ; i++) {
  561. if (!HANDLE_ALLOCATED(map->map[i]))
  562. continue;
  563. struct shim_handle * handle = map->map[i]->handle;
  564. if (handle)
  565. flush_handle(handle);
  566. }
  567. done:
  568. unlock(map->lock);
  569. put_handle_map(map);
  570. return 0;
  571. }
  572. int walk_handle_map (int (*callback) (struct shim_fd_handle *,
  573. struct shim_handle_map *, void *),
  574. struct shim_handle_map * map, void * arg)
  575. {
  576. int ret = 0;
  577. lock(map->lock);
  578. if (map->fd_top == FD_NULL)
  579. goto done;
  580. for (int i = 0 ; i <= map->fd_top ; i++) {
  581. if (!HANDLE_ALLOCATED(map->map[i]))
  582. continue;
  583. if ((ret = (*callback) (map->map[i], map, arg)) < 0)
  584. break;
  585. }
  586. done:
  587. unlock(map->lock);
  588. return ret;
  589. }
  590. DEFINE_MIGRATE_FUNC(handle)
  591. MIGRATE_FUNC_BODY(handle)
  592. {
  593. assert(size == sizeof(struct shim_handle));
  594. struct shim_handle * hdl = (struct shim_handle *) obj;
  595. struct shim_handle * new_hdl = NULL;
  596. lock(hdl->lock);
  597. struct shim_mount * fs = hdl->fs, * new_fs = NULL;
  598. if (fs && fs->mount_point)
  599. __DO_MIGRATE(mount, fs, &new_fs, 0);
  600. unsigned long off = ADD_TO_MIGRATE_MAP(obj, *offset,
  601. sizeof(struct shim_handle));
  602. if (ENTRY_JUST_CREATED(off)) {
  603. off = ADD_OFFSET(sizeof(struct shim_handle));
  604. if (!dry) {
  605. new_hdl = (struct shim_handle *) (base + off);
  606. memcpy(new_hdl, hdl, sizeof(struct shim_handle));
  607. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  608. fs->fs_ops->checkout(new_hdl);
  609. new_hdl->dentry = NULL;
  610. new_hdl->fs = new_fs;
  611. REF_SET(new_hdl->opened, 0);
  612. REF_SET(new_hdl->ref_count, 0);
  613. clear_lock(new_hdl->lock);
  614. }
  615. DO_MIGRATE_IN_MEMBER(qstr, hdl, new_hdl, path, false);
  616. DO_MIGRATE_IN_MEMBER(qstr, hdl, new_hdl, uri, false);
  617. ADD_FUNC_ENTRY(off);
  618. ADD_ENTRY(SIZE, sizeof(struct shim_handle));
  619. ADD_ENTRY(PALHDL, new_hdl->pal_handle ?
  620. *offset + offsetof(struct shim_handle, pal_handle) : 0);
  621. } else if (!dry) {
  622. new_hdl = (struct shim_handle *) (base + off);
  623. }
  624. if (new_hdl && objp)
  625. *objp = (void *) new_hdl;
  626. if (new_hdl)
  627. assert(new_hdl->uri.len < 1024);
  628. unlock(hdl->lock);
  629. }
  630. END_MIGRATE_FUNC
  631. DEFINE_PROFILE_CATAGORY(inside_resume_handle, resume_func);
  632. DEFINE_PROFILE_INTERVAL(dentry_lookup_for_handle, inside_resume_handle);
  633. RESUME_FUNC_BODY(handle)
  634. {
  635. unsigned long off = GET_FUNC_ENTRY();
  636. assert((size_t) GET_ENTRY(SIZE) == sizeof(struct shim_handle));
  637. GET_ENTRY(PALHDL);
  638. BEGIN_PROFILE_INTERVAL();
  639. struct shim_handle * hdl = (struct shim_handle *) (base + off);
  640. RESUME_REBASE(hdl->fs);
  641. create_lock(hdl->lock);
  642. if (!qstrempty(&hdl->path)) {
  643. UPDATE_PROFILE_INTERVAL();
  644. int ret = path_lookupat(NULL, qstrgetstr(&hdl->path), LOOKUP_OPEN,
  645. &hdl->dentry);
  646. if (ret < 0)
  647. return -EACCES;
  648. get_dentry(hdl->dentry);
  649. SAVE_PROFILE_INTERVAL(dentry_lookup_for_handle);
  650. }
  651. if (!hdl->fs) {
  652. if (hdl->dentry) {
  653. set_handle_fs(hdl, hdl->dentry->fs);
  654. } else {
  655. struct shim_mount * fs = NULL;
  656. assert(hdl->fs_type);
  657. search_builtin_fs(hdl->fs_type, &fs);
  658. if (fs)
  659. set_handle_fs(hdl, fs);
  660. }
  661. }
  662. if (hdl->fs && hdl->fs->fs_ops &&
  663. hdl->fs->fs_ops->checkin)
  664. hdl->fs->fs_ops->checkin(hdl);
  665. #ifdef DEBUG_RESUME
  666. debug("handle: path=%s,fs_type=%s,uri=%s,flags=%03o\n",
  667. qstrgetstr(&hdl->path), hdl->fs_type, qstrgetstr(&hdl->uri),
  668. hdl->flags);
  669. #endif
  670. }
  671. END_RESUME_FUNC
  672. DEFINE_MIGRATE_FUNC(fd_handle)
  673. MIGRATE_FUNC_BODY(fd_handle)
  674. {
  675. assert(size == sizeof(struct shim_fd_handle));
  676. struct shim_fd_handle * fdhdl = (struct shim_fd_handle *) obj;
  677. struct shim_fd_handle * new_fdhdl = NULL;
  678. ADD_OFFSET(sizeof(struct shim_fd_handle));
  679. ADD_FUNC_ENTRY(*offset);
  680. ADD_ENTRY(SIZE, sizeof(struct shim_fd_handle));
  681. if (!dry) {
  682. new_fdhdl = (struct shim_fd_handle *) (base + *offset);
  683. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  684. }
  685. if (new_fdhdl && objp)
  686. *objp = (void *) new_fdhdl;
  687. struct shim_handle ** phdl = dry ? NULL : &(new_fdhdl->handle);
  688. struct shim_handle * hdl = fdhdl->handle;
  689. DO_MIGRATE_IF_RECURSIVE(handle, hdl, phdl, recursive);
  690. }
  691. END_MIGRATE_FUNC
  692. RESUME_FUNC_BODY(fd_handle)
  693. {
  694. GET_FUNC_ENTRY();
  695. assert((size_t) GET_ENTRY(SIZE) == sizeof(struct shim_fd_handle));
  696. }
  697. END_RESUME_FUNC
  698. DEFINE_MIGRATE_FUNC(handle_map)
  699. MIGRATE_FUNC_BODY(handle_map)
  700. {
  701. assert(size >= sizeof(struct shim_handle_map));
  702. struct shim_handle_map * handle_map = (struct shim_handle_map *) obj;
  703. struct shim_handle_map * new_handle_map = NULL;
  704. struct shim_fd_handle ** ptr_array;
  705. lock(handle_map->lock);
  706. int fd_size = handle_map->fd_top != FD_NULL ?
  707. handle_map->fd_top + 1 : 0;
  708. size = sizeof(struct shim_handle_map) +
  709. (sizeof(struct shim_fd_handle *) * fd_size);
  710. unsigned long off = ADD_TO_MIGRATE_MAP(obj, *offset, size);
  711. if (ENTRY_JUST_CREATED(off)) {
  712. ADD_OFFSET(size);
  713. ADD_FUNC_ENTRY(*offset);
  714. ADD_ENTRY(SIZE, size);
  715. if (!dry) {
  716. new_handle_map = (struct shim_handle_map *) (base + *offset);
  717. memcpy(new_handle_map, handle_map,
  718. sizeof(struct shim_handle_map));
  719. ptr_array = (void *) new_handle_map +
  720. sizeof(struct shim_handle_map);
  721. new_handle_map->fd_size = fd_size;
  722. new_handle_map->map = fd_size ? ptr_array : NULL;
  723. REF_SET(new_handle_map->ref_count, 0);
  724. clear_lock(new_handle_map->lock);
  725. }
  726. for (int i = 0 ; i < fd_size ; i++) {
  727. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  728. struct shim_fd_handle ** new_hdl = dry ? NULL :
  729. &(ptr_array[i]);
  730. __DO_MIGRATE(fd_handle, handle_map->map[i],
  731. new_hdl, 1);
  732. } else if (!dry)
  733. ptr_array[i] = NULL;
  734. }
  735. } else if (!dry)
  736. new_handle_map = (struct shim_handle_map *) (base + off);
  737. unlock(handle_map->lock);
  738. if (new_handle_map && objp)
  739. *objp = (void *) new_handle_map;
  740. }
  741. END_MIGRATE_FUNC
  742. RESUME_FUNC_BODY(handle_map)
  743. {
  744. unsigned long off = GET_FUNC_ENTRY();
  745. assert((size_t) GET_ENTRY(SIZE) >= sizeof(struct shim_handle_map));
  746. struct shim_handle_map * handle_map =
  747. (struct shim_handle_map *) (base + off);
  748. RESUME_REBASE(handle_map->map);
  749. assert(handle_map->map);
  750. #ifdef DEBUG_RESUME
  751. debug("handle_map: size=%d,top=%d\n", handle_map->fd_size,
  752. handle_map->fd_top);
  753. #endif
  754. create_lock(handle_map->lock);
  755. lock(handle_map->lock);
  756. if (handle_map->fd_top != FD_NULL)
  757. for (int i = 0 ; i <= handle_map->fd_top ; i++) {
  758. RESUME_REBASE(handle_map->map[i]);
  759. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  760. RESUME_REBASE(handle_map->map[i]->handle);
  761. struct shim_handle * hdl = handle_map->map[i]->handle;
  762. assert(hdl);
  763. open_handle(hdl);
  764. #ifdef DEBUG_RESUME
  765. debug("handle_map[%d]: %s\n", i,
  766. !qstrempty(&hdl->uri) ? qstrgetstr(&hdl->uri) :
  767. hdl->fs_type);
  768. #endif
  769. }
  770. }
  771. unlock(handle_map->lock);
  772. }
  773. END_RESUME_FUNC