shim_handle.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_handle.c
  17. *
  18. * This file contains codes to maintain bookkeeping for handles in library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_checkpoint.h>
  24. #include <shim_fs.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. static LOCKTYPE handle_mgr_lock;
  28. #define HANDLE_MGR_ALLOC 32
  29. #define system_lock() lock(handle_mgr_lock)
  30. #define system_unlock() unlock(handle_mgr_lock)
  31. #define PAGE_SIZE allocsize
  32. #define OBJ_TYPE struct shim_handle
  33. #include <memmgr.h>
  34. static MEM_MGR handle_mgr = NULL;
  35. #define INIT_HANDLE_MAP_SIZE 32
  36. //#define DEBUG_REF
  37. static inline int init_tty_handle (struct shim_handle * hdl, bool write)
  38. {
  39. struct shim_dentry * dent = NULL;
  40. int ret;
  41. struct shim_thread * cur_thread = get_cur_thread();
  42. /* XXX: Try getting the root FS from current thread? */
  43. assert(cur_thread);
  44. assert(cur_thread->root);
  45. if ((ret = path_lookupat(cur_thread->root, "/dev/tty", LOOKUP_OPEN, &dent,
  46. cur_thread->root->fs)) < 0)
  47. return ret;
  48. int flags = (write ? O_WRONLY : O_RDONLY)|O_APPEND;
  49. struct shim_mount * fs = dent->fs;
  50. ret = fs->d_ops->open(hdl, dent, flags);
  51. if (ret < 0)
  52. return ret;
  53. set_handle_fs(hdl, fs);
  54. hdl->dentry = dent;
  55. hdl->flags = O_RDWR|O_APPEND|0100000;
  56. int size;
  57. char * path = dentry_get_path(dent, true, &size);
  58. if (path)
  59. qstrsetstr(&hdl->path, path, size);
  60. else
  61. qstrsetstr(&hdl->path, "/dev/tty", 8);
  62. return 0;
  63. }
  64. static inline int init_exec_handle (struct shim_thread * thread)
  65. {
  66. if (!PAL_CB(executable))
  67. return 0;
  68. struct shim_handle * exec = get_new_handle();
  69. if (!exec)
  70. return -ENOMEM;
  71. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  72. exec->type = TYPE_FILE;
  73. exec->flags = O_RDONLY;
  74. exec->acc_mode = MAY_READ;
  75. struct shim_mount * fs = find_mount_from_uri(PAL_CB(executable));
  76. if (fs) {
  77. path_lookupat(fs->root, PAL_CB(executable) + fs->uri.len, 0,
  78. &exec->dentry, fs);
  79. set_handle_fs(exec, fs);
  80. if (exec->dentry) {
  81. int len;
  82. const char * path = dentry_get_path(exec->dentry, true, &len);
  83. qstrsetstr(&exec->path, path, len);
  84. }
  85. put_mount(fs);
  86. } else {
  87. set_handle_fs(exec, &chroot_builtin_fs);
  88. }
  89. lock(thread->lock);
  90. thread->exec = exec;
  91. unlock(thread->lock);
  92. return 0;
  93. }
  94. static struct shim_handle_map * get_new_handle_map (FDTYPE size);
  95. PAL_HANDLE shim_stdio = NULL;
  96. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  97. struct shim_handle * hdl, int flags);
  98. static struct shim_handle_map * __enlarge_handle_map
  99. (struct shim_handle_map * map, FDTYPE size);
  100. int init_handle (void)
  101. {
  102. create_lock(handle_mgr_lock);
  103. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  104. if (!handle_mgr)
  105. return -ENOMEM;
  106. return 0;
  107. }
  108. int init_important_handles (void)
  109. {
  110. struct shim_thread * thread = get_cur_thread();
  111. if (thread->handle_map)
  112. goto done;
  113. struct shim_handle_map * handle_map = get_cur_handle_map(thread);
  114. if (!handle_map) {
  115. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  116. if (!handle_map)
  117. return -ENOMEM;
  118. set_handle_map(thread, handle_map);
  119. }
  120. lock(handle_map->lock);
  121. if (handle_map->fd_size < 3) {
  122. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  123. unlock(handle_map->lock);
  124. return -ENOMEM;
  125. }
  126. }
  127. struct shim_handle * hdl = NULL;
  128. int ret;
  129. for (int fd = 0 ; fd < 3 ; fd++)
  130. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  131. if (!hdl) {
  132. hdl = get_new_handle();
  133. if (!hdl)
  134. return -ENOMEM;
  135. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  136. put_handle(hdl);
  137. return ret;
  138. }
  139. } else {
  140. get_handle(hdl);
  141. }
  142. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  143. put_handle(hdl);
  144. if (fd != 1)
  145. hdl = NULL;
  146. } else {
  147. if (fd == 1)
  148. hdl = handle_map->map[fd]->handle;
  149. }
  150. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  151. handle_map->fd_top = 2;
  152. unlock(handle_map->lock);
  153. done:
  154. init_exec_handle(thread);
  155. return 0;
  156. }
  157. struct shim_handle * __get_fd_handle (FDTYPE fd, int * flags,
  158. struct shim_handle_map * map)
  159. {
  160. struct shim_fd_handle * fd_handle = NULL;
  161. if (map->fd_top != FD_NULL &&
  162. fd <= map->fd_top) {
  163. fd_handle = map->map[fd];
  164. if (!HANDLE_ALLOCATED(fd_handle))
  165. return NULL;
  166. if (flags)
  167. *flags = fd_handle->flags;
  168. return fd_handle->handle;
  169. }
  170. return NULL;
  171. }
  172. struct shim_handle * get_fd_handle (FDTYPE fd, int * flags,
  173. struct shim_handle_map * map)
  174. {
  175. if (!map)
  176. map = get_cur_handle_map(NULL);
  177. struct shim_handle * hdl = NULL;
  178. lock(map->lock);
  179. if ((hdl = __get_fd_handle(fd, flags, map)))
  180. get_handle(hdl);
  181. unlock(map->lock);
  182. return hdl;
  183. }
  184. struct shim_handle *
  185. __detach_fd_handle (struct shim_fd_handle * fd, int * flags,
  186. struct shim_handle_map * map)
  187. {
  188. struct shim_handle * handle = NULL;
  189. if (HANDLE_ALLOCATED(fd)) {
  190. int vfd = fd->vfd;
  191. handle = fd->handle;
  192. if (flags)
  193. *flags = fd->flags;
  194. fd->vfd = FD_NULL;
  195. fd->handle = NULL;
  196. fd->flags = 0;
  197. if (vfd == map->fd_top)
  198. do {
  199. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  200. vfd--;
  201. } while (vfd >= 0 &&
  202. !HANDLE_ALLOCATED(map->map[vfd]));
  203. }
  204. return handle;
  205. }
  206. struct shim_handle * detach_fd_handle (FDTYPE fd, int * flags,
  207. struct shim_handle_map * handle_map)
  208. {
  209. struct shim_handle * handle = NULL;
  210. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  211. return NULL;
  212. lock(handle_map->lock);
  213. if (fd < handle_map->fd_size)
  214. handle = __detach_fd_handle(handle_map->map[fd], flags,
  215. handle_map);
  216. unlock(handle_map->lock);
  217. return handle;
  218. }
  219. struct shim_handle * get_new_handle (void)
  220. {
  221. struct shim_handle * new_handle =
  222. get_mem_obj_from_mgr_enlarge(handle_mgr,
  223. size_align_up(HANDLE_MGR_ALLOC));
  224. if (!new_handle)
  225. return NULL;
  226. memset(new_handle, 0, sizeof(struct shim_handle));
  227. REF_SET(new_handle->ref_count, 1);
  228. create_lock(new_handle->lock);
  229. new_handle->owner = cur_process.vmid;
  230. INIT_LISTP(&new_handle->epolls);
  231. return new_handle;
  232. }
  233. static int __set_new_fd_handle(struct shim_fd_handle ** fdhdl, FDTYPE fd,
  234. struct shim_handle * hdl, int flags)
  235. {
  236. struct shim_fd_handle * new_handle = *fdhdl;
  237. if (!new_handle) {
  238. new_handle = malloc(sizeof(struct shim_fd_handle));
  239. if (!new_handle)
  240. return -ENOMEM;
  241. *fdhdl = new_handle;
  242. }
  243. new_handle->vfd = fd;
  244. new_handle->flags = flags;
  245. open_handle(hdl);
  246. new_handle->handle = hdl;
  247. return 0;
  248. }
  249. int set_new_fd_handle (struct shim_handle * hdl, int flags,
  250. struct shim_handle_map * handle_map)
  251. {
  252. FDTYPE fd = 0;
  253. int new_size = 0;
  254. int ret = 0;
  255. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  256. return -EBADF;
  257. lock(handle_map->lock);
  258. if (!handle_map->map ||
  259. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  260. new_size = INIT_HANDLE_MAP_SIZE;
  261. if (!handle_map->map)
  262. goto extend;
  263. if (handle_map->fd_top != FD_NULL)
  264. do {
  265. ++fd;
  266. if (fd == handle_map->fd_size) {
  267. new_size = handle_map->fd_size < new_size ? new_size :
  268. handle_map->fd_size * 2;
  269. extend:
  270. if (!__enlarge_handle_map(handle_map, new_size)) {
  271. ret = -ENOMEM;
  272. goto out;
  273. }
  274. }
  275. } while (handle_map->fd_top != FD_NULL &&
  276. fd <= handle_map->fd_top &&
  277. HANDLE_ALLOCATED(handle_map->map[fd]));
  278. if (handle_map->fd_top == FD_NULL ||
  279. fd > handle_map->fd_top)
  280. handle_map->fd_top = fd;
  281. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  282. if (ret < 0) {
  283. if (fd == handle_map->fd_top)
  284. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  285. } else
  286. ret = fd;
  287. out:
  288. unlock(handle_map->lock);
  289. return ret;
  290. }
  291. int set_new_fd_handle_by_fd (FDTYPE fd, struct shim_handle * hdl, int flags,
  292. struct shim_handle_map * handle_map)
  293. {
  294. int new_size = 0;
  295. int ret = 0;
  296. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  297. return -EBADF;
  298. lock(handle_map->lock);
  299. if (!handle_map->map ||
  300. handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  301. new_size = INIT_HANDLE_MAP_SIZE;
  302. if (!handle_map->map)
  303. goto extend;
  304. if (fd >= handle_map->fd_size) {
  305. new_size = handle_map->fd_size < new_size ? new_size :
  306. handle_map->fd_size;
  307. extend:
  308. while (new_size <= fd)
  309. new_size *= 2;
  310. if (!__enlarge_handle_map(handle_map, new_size)) {
  311. ret = -ENOMEM;
  312. goto out;
  313. }
  314. }
  315. if (handle_map->fd_top != FD_NULL &&
  316. fd <= handle_map->fd_top &&
  317. HANDLE_ALLOCATED(handle_map->map[fd])) {
  318. ret = -EBADF;
  319. goto out;
  320. }
  321. if (handle_map->fd_top == FD_NULL ||
  322. fd > handle_map->fd_top)
  323. handle_map->fd_top = fd;
  324. struct shim_fd_handle * new_handle = handle_map->map[fd];
  325. if (!new_handle) {
  326. new_handle = malloc(sizeof(struct shim_fd_handle));
  327. if (!new_handle) {
  328. ret = -ENOMEM;
  329. goto out;
  330. }
  331. handle_map->map[fd] = new_handle;
  332. }
  333. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  334. if (ret < 0) {
  335. if (fd == handle_map->fd_top)
  336. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  337. } else
  338. ret = fd;
  339. out:
  340. unlock(handle_map->lock);
  341. return fd;
  342. }
  343. void flush_handle (struct shim_handle * hdl)
  344. {
  345. if (hdl->fs && hdl->fs->fs_ops &&
  346. hdl->fs->fs_ops->flush)
  347. hdl->fs->fs_ops->flush(hdl);
  348. }
  349. static inline __attribute__((unused))
  350. const char * __handle_name (struct shim_handle * hdl)
  351. {
  352. if (!qstrempty(&hdl->path))
  353. return qstrgetstr(&hdl->path);
  354. if (!qstrempty(&hdl->uri))
  355. return qstrgetstr(&hdl->uri);
  356. if (hdl->fs_type[0])
  357. return hdl->fs_type;
  358. return "(unknown)";
  359. }
  360. void open_handle (struct shim_handle * hdl)
  361. {
  362. get_handle(hdl);
  363. #ifdef DEBUG_REF
  364. int opened = REF_INC(hdl->opened);
  365. debug("open handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  366. opened);
  367. #else
  368. REF_INC(hdl->opened);
  369. #endif
  370. }
  371. extern int delete_from_epoll_handles (struct shim_handle * handle);
  372. void close_handle (struct shim_handle * hdl)
  373. {
  374. int opened = REF_DEC(hdl->opened);
  375. #ifdef DEBUG_REF
  376. debug("close handle %p(%s) (opened = %d)\n", hdl, __handle_name(hdl),
  377. opened);
  378. #endif
  379. if (!opened) {
  380. if (hdl->type == TYPE_DIR) {
  381. struct shim_dir_handle * dir = &hdl->info.dir;
  382. if (dir->dot) {
  383. put_dentry(dir->dot);
  384. dir->dot = NULL;
  385. }
  386. if (dir->dotdot) {
  387. put_dentry(dir->dotdot);
  388. dir->dotdot = NULL;
  389. }
  390. if (dir->ptr != (void *) -1) {
  391. while (dir->ptr && *dir->ptr) {
  392. struct shim_dentry * dent = *dir->ptr;
  393. put_dentry(dent);
  394. *(dir->ptr++) = NULL;
  395. }
  396. }
  397. } else {
  398. if (hdl->fs && hdl->fs->fs_ops &&
  399. hdl->fs->fs_ops->close)
  400. hdl->fs->fs_ops->close(hdl);
  401. }
  402. delete_from_epoll_handles(hdl);
  403. }
  404. put_handle(hdl);
  405. }
  406. void get_handle (struct shim_handle * hdl)
  407. {
  408. #ifdef DEBUG_REF
  409. int ref_count = REF_INC(hdl->ref_count);
  410. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  411. ref_count);
  412. #else
  413. REF_INC(hdl->ref_count);
  414. #endif
  415. }
  416. static void destroy_handle (struct shim_handle * hdl)
  417. {
  418. destroy_lock(hdl->lock);
  419. if (MEMORY_MIGRATED(hdl))
  420. memset(hdl, 0, sizeof(struct shim_handle));
  421. else
  422. free_mem_obj_to_mgr(handle_mgr, hdl);
  423. }
  424. void put_handle (struct shim_handle * hdl)
  425. {
  426. int ref_count = REF_DEC(hdl->ref_count);
  427. #ifdef DEBUG_REF
  428. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl),
  429. ref_count);
  430. #endif
  431. if (!ref_count) {
  432. if (hdl->fs && hdl->fs->fs_ops &&
  433. hdl->fs->fs_ops->hput)
  434. hdl->fs->fs_ops->hput(hdl);
  435. qstrfree(&hdl->path);
  436. qstrfree(&hdl->uri);
  437. if (hdl->pal_handle) {
  438. #ifdef DEBUG_REF
  439. debug("handle %p closes PAL handle %p\n", hdl, hdl->pal_handle);
  440. #endif
  441. DkObjectClose(hdl->pal_handle);
  442. hdl->pal_handle = NULL;
  443. }
  444. if (hdl->dentry)
  445. put_dentry(hdl->dentry);
  446. if (hdl->fs)
  447. put_mount(hdl->fs);
  448. destroy_handle(hdl);
  449. }
  450. }
  451. size_t get_file_size (struct shim_handle * hdl)
  452. {
  453. if (!hdl->fs || !hdl->fs->fs_ops)
  454. return -EINVAL;
  455. if (hdl->fs->fs_ops->poll)
  456. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  457. if (hdl->fs->fs_ops->hstat) {
  458. struct stat stat;
  459. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  460. if (ret < 0)
  461. return ret;
  462. return stat.st_size;
  463. }
  464. return 0;
  465. }
  466. void dup_fd_handle (struct shim_handle_map * map,
  467. const struct shim_fd_handle * old,
  468. struct shim_fd_handle * new)
  469. {
  470. struct shim_handle * replaced = NULL;
  471. lock(map->lock);
  472. if (old->vfd != FD_NULL) {
  473. open_handle(old->handle);
  474. replaced = new->handle;
  475. new->handle = old->handle;
  476. }
  477. unlock(map->lock);
  478. if (replaced)
  479. close_handle(replaced);
  480. }
  481. static struct shim_handle_map * get_new_handle_map (FDTYPE size)
  482. {
  483. struct shim_handle_map * handle_map =
  484. malloc(sizeof(struct shim_handle_map));
  485. if (handle_map == NULL)
  486. return NULL;
  487. memset(handle_map, 0, sizeof(struct shim_handle_map));
  488. handle_map->map = malloc(sizeof(struct shim_fd_handle) * size);
  489. if (handle_map->map == NULL) {
  490. free(handle_map);
  491. return NULL;
  492. }
  493. memset(handle_map->map, 0,
  494. sizeof(struct shim_fd_handle) * size);
  495. handle_map->fd_top = FD_NULL;
  496. handle_map->fd_size = size;
  497. create_lock(handle_map->lock);
  498. return handle_map;
  499. }
  500. static struct shim_handle_map * __enlarge_handle_map
  501. (struct shim_handle_map * map, FDTYPE size)
  502. {
  503. if (size <= map->fd_size)
  504. return NULL;
  505. struct shim_fd_handle ** old_map = map->map;
  506. map->map = malloc(sizeof(struct shim_fd_handle *) * size);
  507. if (map->map == NULL) {
  508. map->map = old_map;
  509. return NULL;
  510. }
  511. size_t copy_size = sizeof(struct shim_fd_handle *) * map->fd_size;
  512. map->fd_size = size;
  513. memset(map->map, 0, sizeof(struct shim_fd_handle *) * size);
  514. if (old_map) {
  515. if (copy_size)
  516. memcpy(map->map, old_map, copy_size);
  517. free(old_map);
  518. }
  519. return map;
  520. }
  521. int dup_handle_map (struct shim_handle_map ** new,
  522. struct shim_handle_map * old_map)
  523. {
  524. lock(old_map->lock);
  525. /* allocate a new handle mapping with the same size as
  526. the old one */
  527. struct shim_handle_map * new_map =
  528. get_new_handle_map(old_map->fd_size);
  529. new_map->fd_top = old_map->fd_top;
  530. if (old_map->fd_top == FD_NULL)
  531. goto done;
  532. for (int i = 0 ; i <= old_map->fd_top ; i++) {
  533. struct shim_fd_handle * fd_old = old_map->map[i];
  534. struct shim_fd_handle * fd_new;
  535. /* now we go through the handle map and reassign each
  536. of them being allocated */
  537. if (HANDLE_ALLOCATED(fd_old)) {
  538. /* first, get the handle to prevent it from being deleted */
  539. struct shim_handle * hdl = fd_old->handle;
  540. open_handle(hdl);
  541. /* DP: I assume we really need a deep copy of the handle map? */
  542. fd_new = malloc(sizeof(struct shim_fd_handle));
  543. new_map->map[i] = fd_new;
  544. fd_new->vfd = fd_old->vfd;
  545. fd_new->handle = hdl;
  546. fd_new->flags = fd_old->flags;
  547. }
  548. }
  549. done:
  550. unlock(old_map->lock);
  551. *new = new_map;
  552. return 0;
  553. }
  554. void get_handle_map (struct shim_handle_map * map)
  555. {
  556. REF_INC(map->ref_count);
  557. }
  558. void put_handle_map (struct shim_handle_map * map)
  559. {
  560. int ref_count = REF_DEC(map->ref_count);
  561. if (!ref_count) {
  562. if (map->fd_top == FD_NULL)
  563. goto done;
  564. for (int i = 0 ; i <= map->fd_top ; i++) {
  565. if (!map->map[i])
  566. continue;
  567. if (map->map[i]->vfd != FD_NULL) {
  568. struct shim_handle * handle = map->map[i]->handle;
  569. if (handle)
  570. close_handle(handle);
  571. }
  572. free(map->map[i]);
  573. }
  574. done:
  575. destroy_lock(map->lock);
  576. free(map->map);
  577. free(map);
  578. }
  579. }
  580. int flush_handle_map (struct shim_handle_map * map)
  581. {
  582. get_handle_map(map);
  583. lock(map->lock);
  584. if (map->fd_top == FD_NULL)
  585. goto done;
  586. /* now we go through the handle map and flush each handle */
  587. for (int i = 0 ; i <= map->fd_top ; i++) {
  588. if (!HANDLE_ALLOCATED(map->map[i]))
  589. continue;
  590. struct shim_handle * handle = map->map[i]->handle;
  591. if (handle)
  592. flush_handle(handle);
  593. }
  594. done:
  595. unlock(map->lock);
  596. put_handle_map(map);
  597. return 0;
  598. }
  599. int walk_handle_map (int (*callback) (struct shim_fd_handle *,
  600. struct shim_handle_map *, void *),
  601. struct shim_handle_map * map, void * arg)
  602. {
  603. int ret = 0;
  604. lock(map->lock);
  605. if (map->fd_top == FD_NULL)
  606. goto done;
  607. for (int i = 0 ; i <= map->fd_top ; i++) {
  608. if (!HANDLE_ALLOCATED(map->map[i]))
  609. continue;
  610. if ((ret = (*callback) (map->map[i], map, arg)) < 0)
  611. break;
  612. }
  613. done:
  614. unlock(map->lock);
  615. return ret;
  616. }
  617. BEGIN_CP_FUNC(handle)
  618. {
  619. assert(size == sizeof(struct shim_handle));
  620. struct shim_handle * hdl = (struct shim_handle *) obj;
  621. struct shim_handle * new_hdl = NULL;
  622. ptr_t off = GET_FROM_CP_MAP(obj);
  623. if (!off) {
  624. off = ADD_CP_OFFSET(sizeof(struct shim_handle));
  625. ADD_TO_CP_MAP(obj, off);
  626. new_hdl = (struct shim_handle *) (base + off);
  627. lock(hdl->lock);
  628. struct shim_mount * fs = hdl->fs;
  629. *new_hdl = *hdl;
  630. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  631. fs->fs_ops->checkout(new_hdl);
  632. new_hdl->dentry = NULL;
  633. REF_SET(new_hdl->opened, 0);
  634. REF_SET(new_hdl->ref_count, 0);
  635. clear_lock(new_hdl->lock);
  636. DO_CP_IN_MEMBER(qstr, new_hdl, path);
  637. DO_CP_IN_MEMBER(qstr, new_hdl, uri);
  638. if (fs && hdl->dentry) {
  639. DO_CP_MEMBER(mount, hdl, new_hdl, fs);
  640. } else {
  641. new_hdl->fs = NULL;
  642. }
  643. if (hdl->dentry)
  644. DO_CP_MEMBER(dentry, hdl, new_hdl, dentry);
  645. if (new_hdl->pal_handle) {
  646. struct shim_palhdl_entry * entry;
  647. DO_CP(palhdl, hdl->pal_handle, &entry);
  648. entry->uri = &new_hdl->uri;
  649. entry->phandle = &new_hdl->pal_handle;
  650. }
  651. if (hdl->type == TYPE_EPOLL)
  652. DO_CP(epoll_fd, &hdl->info.epoll.fds, &new_hdl->info.epoll.fds);
  653. INIT_LISTP(&new_hdl->epolls);
  654. unlock(hdl->lock);
  655. ADD_CP_FUNC_ENTRY(off);
  656. } else {
  657. new_hdl = (struct shim_handle *) (base + off);
  658. }
  659. if (objp)
  660. *objp = (void *) new_hdl;
  661. }
  662. END_CP_FUNC(handle)
  663. BEGIN_RS_FUNC(handle)
  664. {
  665. struct shim_handle * hdl = (void *) (base + GET_CP_FUNC_ENTRY());
  666. CP_REBASE(hdl->fs);
  667. CP_REBASE(hdl->dentry);
  668. CP_REBASE(hdl->epolls);
  669. create_lock(hdl->lock);
  670. if (!hdl->fs) {
  671. assert(hdl->fs_type);
  672. search_builtin_fs(hdl->fs_type, &hdl->fs);
  673. if (!hdl->fs)
  674. return -EINVAL;
  675. }
  676. if (hdl->fs && hdl->fs->fs_ops &&
  677. hdl->fs->fs_ops->checkin)
  678. hdl->fs->fs_ops->checkin(hdl);
  679. DEBUG_RS("path=%s,type=%s,uri=%s,flags=%03o",
  680. qstrgetstr(&hdl->path), hdl->fs_type, qstrgetstr(&hdl->uri),
  681. hdl->flags);
  682. }
  683. END_RS_FUNC(handle)
  684. BEGIN_CP_FUNC(fd_handle)
  685. {
  686. assert(size == sizeof(struct shim_fd_handle));
  687. struct shim_fd_handle * fdhdl = (struct shim_fd_handle *) obj;
  688. struct shim_fd_handle * new_fdhdl = NULL;
  689. ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_fd_handle));
  690. new_fdhdl = (struct shim_fd_handle *) (base + off);
  691. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  692. DO_CP(handle, fdhdl->handle, &new_fdhdl->handle);
  693. ADD_CP_FUNC_ENTRY(off);
  694. if (objp)
  695. *objp = (void *) new_fdhdl;
  696. }
  697. END_CP_FUNC_NO_RS(fd_handle)
  698. BEGIN_CP_FUNC(handle_map)
  699. {
  700. assert(size >= sizeof(struct shim_handle_map));
  701. struct shim_handle_map * handle_map = (struct shim_handle_map *) obj;
  702. struct shim_handle_map * new_handle_map = NULL;
  703. struct shim_fd_handle ** ptr_array;
  704. lock(handle_map->lock);
  705. int fd_size = handle_map->fd_top != FD_NULL ?
  706. handle_map->fd_top + 1 : 0;
  707. size = sizeof(struct shim_handle_map) +
  708. (sizeof(struct shim_fd_handle *) * fd_size);
  709. ptr_t off = GET_FROM_CP_MAP(obj);
  710. if (!off) {
  711. off = ADD_CP_OFFSET(size);
  712. new_handle_map = (struct shim_handle_map *) (base + off);
  713. memcpy(new_handle_map, handle_map,
  714. sizeof(struct shim_handle_map));
  715. ptr_array = (void *) new_handle_map + sizeof(struct shim_handle_map);
  716. new_handle_map->fd_size = fd_size;
  717. new_handle_map->map = fd_size ? ptr_array : NULL;
  718. REF_SET(new_handle_map->ref_count, 0);
  719. clear_lock(new_handle_map->lock);
  720. for (int i = 0 ; i < fd_size ; i++) {
  721. if (HANDLE_ALLOCATED(handle_map->map[i]))
  722. DO_CP(fd_handle, handle_map->map[i], &ptr_array[i]);
  723. else
  724. ptr_array[i] = NULL;
  725. }
  726. ADD_CP_FUNC_ENTRY(off);
  727. } else {
  728. new_handle_map = (struct shim_handle_map *) (base + off);
  729. }
  730. unlock(handle_map->lock);
  731. if (objp)
  732. *objp = (void *) new_handle_map;
  733. }
  734. END_CP_FUNC(handle_map)
  735. BEGIN_RS_FUNC(handle_map)
  736. {
  737. struct shim_handle_map * handle_map = (void *) (base + GET_CP_FUNC_ENTRY());
  738. CP_REBASE(handle_map->map);
  739. assert(handle_map->map);
  740. DEBUG_RS("size=%d,top=%d", handle_map->fd_size, handle_map->fd_top);
  741. create_lock(handle_map->lock);
  742. lock(handle_map->lock);
  743. if (handle_map->fd_top != FD_NULL)
  744. for (int i = 0 ; i <= handle_map->fd_top ; i++) {
  745. CP_REBASE(handle_map->map[i]);
  746. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  747. CP_REBASE(handle_map->map[i]->handle);
  748. struct shim_handle * hdl = handle_map->map[i]->handle;
  749. assert(hdl);
  750. open_handle(hdl);
  751. DEBUG_RS("[%d]%s", i, qstrempty(&hdl->uri) ? hdl->fs_type :
  752. qstrgetstr(&hdl->uri));
  753. }
  754. }
  755. unlock(handle_map->lock);
  756. }
  757. END_RS_FUNC(handle_map)