shim_handle.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_handle.c
  15. *
  16. * This file contains codes to maintain bookkeeping for handles in library OS.
  17. */
  18. #include <pal.h>
  19. #include <pal_error.h>
  20. #include <shim_checkpoint.h>
  21. #include <shim_fs.h>
  22. #include <shim_handle.h>
  23. #include <shim_internal.h>
  24. #include <shim_thread.h>
  25. static struct shim_lock handle_mgr_lock;
  26. #define HANDLE_MGR_ALLOC 32
  27. #define SYSTEM_LOCK() lock(&handle_mgr_lock)
  28. #define SYSTEM_UNLOCK() unlock(&handle_mgr_lock)
  29. #define PAGE_SIZE allocsize
  30. #define OBJ_TYPE struct shim_handle
  31. #include <memmgr.h>
  32. static MEM_MGR handle_mgr = NULL;
  33. #define INIT_HANDLE_MAP_SIZE 32
  34. //#define DEBUG_REF
  35. static inline int init_tty_handle(struct shim_handle* hdl, bool write) {
  36. struct shim_dentry* dent = NULL;
  37. int ret;
  38. struct shim_thread* cur_thread = get_cur_thread();
  39. /* XXX: Try getting the root FS from current thread? */
  40. assert(cur_thread);
  41. assert(cur_thread->root);
  42. if ((ret = path_lookupat(NULL, "/dev/tty", LOOKUP_OPEN, &dent, NULL)) < 0)
  43. return ret;
  44. int flags = (write ? O_WRONLY : O_RDONLY) | O_APPEND;
  45. struct shim_mount* fs = dent->fs;
  46. ret = fs->d_ops->open(hdl, dent, flags);
  47. if (ret < 0)
  48. return ret;
  49. set_handle_fs(hdl, fs);
  50. hdl->dentry = dent;
  51. hdl->flags = O_RDWR | O_APPEND | 0100000;
  52. size_t size;
  53. char* path = dentry_get_path(dent, true, &size);
  54. if (path)
  55. qstrsetstr(&hdl->path, path, size);
  56. else
  57. qstrsetstr(&hdl->path, "/dev/tty", 8);
  58. return 0;
  59. }
  60. static inline int init_exec_handle(struct shim_thread* thread) {
  61. if (!PAL_CB(executable))
  62. return 0;
  63. struct shim_handle* exec = get_new_handle();
  64. if (!exec)
  65. return -ENOMEM;
  66. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  67. exec->type = TYPE_FILE;
  68. exec->flags = O_RDONLY;
  69. exec->acc_mode = MAY_READ;
  70. struct shim_mount* fs = find_mount_from_uri(PAL_CB(executable));
  71. if (fs) {
  72. const char* p = PAL_CB(executable) + fs->uri.len;
  73. /*
  74. * Lookup for PAL_CB(executable) needs to be done under a given
  75. * mount point. which requires a relative path name.
  76. * On the other hand, the one in manifest file can be absolute path.
  77. */
  78. while (*p == '/') {
  79. p++;
  80. }
  81. path_lookupat(fs->root, p, 0, &exec->dentry, fs);
  82. set_handle_fs(exec, fs);
  83. if (exec->dentry) {
  84. size_t len;
  85. const char* path = dentry_get_path(exec->dentry, true, &len);
  86. qstrsetstr(&exec->path, path, len);
  87. }
  88. put_mount(fs);
  89. } else {
  90. set_handle_fs(exec, &chroot_builtin_fs);
  91. }
  92. lock(&thread->lock);
  93. thread->exec = exec;
  94. unlock(&thread->lock);
  95. return 0;
  96. }
  97. static struct shim_handle_map* get_new_handle_map(FDTYPE size);
  98. PAL_HANDLE shim_stdio = NULL;
  99. static int __set_new_fd_handle(struct shim_fd_handle** fdhdl, FDTYPE fd, struct shim_handle* hdl,
  100. int flags);
  101. static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size);
  102. int init_handle(void) {
  103. create_lock(&handle_mgr_lock);
  104. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  105. if (!handle_mgr)
  106. return -ENOMEM;
  107. return 0;
  108. }
  109. int init_important_handles(void) {
  110. struct shim_thread* thread = get_cur_thread();
  111. if (thread->handle_map)
  112. goto done;
  113. struct shim_handle_map* handle_map = get_cur_handle_map(thread);
  114. if (!handle_map) {
  115. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  116. if (!handle_map)
  117. return -ENOMEM;
  118. set_handle_map(thread, handle_map);
  119. }
  120. lock(&handle_map->lock);
  121. if (handle_map->fd_size < 3) {
  122. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  123. unlock(&handle_map->lock);
  124. return -ENOMEM;
  125. }
  126. }
  127. struct shim_handle* hdl = NULL;
  128. int ret;
  129. for (int fd = 0; fd < 3; fd++)
  130. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  131. if (!hdl) {
  132. hdl = get_new_handle();
  133. if (!hdl)
  134. return -ENOMEM;
  135. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  136. put_handle(hdl);
  137. return ret;
  138. }
  139. } else {
  140. get_handle(hdl);
  141. }
  142. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  143. put_handle(hdl);
  144. if (fd != 1)
  145. hdl = NULL;
  146. } else {
  147. if (fd == 1)
  148. hdl = handle_map->map[fd]->handle;
  149. }
  150. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  151. handle_map->fd_top = 2;
  152. unlock(&handle_map->lock);
  153. done:
  154. init_exec_handle(thread);
  155. return 0;
  156. }
  157. struct shim_handle* __get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
  158. struct shim_fd_handle* fd_handle = NULL;
  159. if (map->fd_top != FD_NULL && fd <= map->fd_top) {
  160. fd_handle = map->map[fd];
  161. if (!HANDLE_ALLOCATED(fd_handle))
  162. return NULL;
  163. if (flags)
  164. *flags = fd_handle->flags;
  165. return fd_handle->handle;
  166. }
  167. return NULL;
  168. }
  169. struct shim_handle* get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
  170. if (!map)
  171. map = get_cur_handle_map(NULL);
  172. struct shim_handle* hdl = NULL;
  173. lock(&map->lock);
  174. if ((hdl = __get_fd_handle(fd, flags, map)))
  175. get_handle(hdl);
  176. unlock(&map->lock);
  177. return hdl;
  178. }
  179. struct shim_handle* __detach_fd_handle(struct shim_fd_handle* fd, int* flags,
  180. struct shim_handle_map* map) {
  181. struct shim_handle* handle = NULL;
  182. if (HANDLE_ALLOCATED(fd)) {
  183. int vfd = fd->vfd;
  184. handle = fd->handle;
  185. if (flags)
  186. *flags = fd->flags;
  187. fd->vfd = FD_NULL;
  188. fd->handle = NULL;
  189. fd->flags = 0;
  190. if (vfd == map->fd_top)
  191. do {
  192. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  193. vfd--;
  194. } while (vfd >= 0 && !HANDLE_ALLOCATED(map->map[vfd]));
  195. }
  196. return handle;
  197. }
  198. struct shim_handle* detach_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* handle_map) {
  199. struct shim_handle* handle = NULL;
  200. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  201. return NULL;
  202. lock(&handle_map->lock);
  203. if (fd < handle_map->fd_size)
  204. handle = __detach_fd_handle(handle_map->map[fd], flags, handle_map);
  205. unlock(&handle_map->lock);
  206. return handle;
  207. }
  208. struct shim_handle* get_new_handle(void) {
  209. struct shim_handle* new_handle =
  210. get_mem_obj_from_mgr_enlarge(handle_mgr, size_align_up(HANDLE_MGR_ALLOC));
  211. if (!new_handle)
  212. return NULL;
  213. memset(new_handle, 0, sizeof(struct shim_handle));
  214. REF_SET(new_handle->ref_count, 1);
  215. create_lock(&new_handle->lock);
  216. new_handle->owner = cur_process.vmid;
  217. INIT_LISTP(&new_handle->epolls);
  218. return new_handle;
  219. }
  220. static int __set_new_fd_handle(struct shim_fd_handle** fdhdl, FDTYPE fd, struct shim_handle* hdl,
  221. int flags) {
  222. struct shim_fd_handle* new_handle = *fdhdl;
  223. if (!new_handle) {
  224. new_handle = malloc(sizeof(struct shim_fd_handle));
  225. if (!new_handle)
  226. return -ENOMEM;
  227. *fdhdl = new_handle;
  228. }
  229. new_handle->vfd = fd;
  230. new_handle->flags = flags;
  231. get_handle(hdl);
  232. new_handle->handle = hdl;
  233. return 0;
  234. }
  235. int set_new_fd_handle(struct shim_handle* hdl, int flags, struct shim_handle_map* handle_map) {
  236. FDTYPE fd = 0;
  237. int new_size = 0;
  238. int ret = 0;
  239. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  240. return -EBADF;
  241. lock(&handle_map->lock);
  242. if (!handle_map->map || handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  243. new_size = INIT_HANDLE_MAP_SIZE;
  244. if (!handle_map->map)
  245. goto extend;
  246. if (handle_map->fd_top != FD_NULL)
  247. do {
  248. ++fd;
  249. if (fd == handle_map->fd_size) {
  250. new_size = handle_map->fd_size < new_size ? new_size : handle_map->fd_size * 2;
  251. extend:
  252. if (!__enlarge_handle_map(handle_map, new_size)) {
  253. ret = -ENOMEM;
  254. goto out;
  255. }
  256. }
  257. } while (handle_map->fd_top != FD_NULL && fd <= handle_map->fd_top &&
  258. HANDLE_ALLOCATED(handle_map->map[fd]));
  259. if (handle_map->fd_top == FD_NULL || fd > handle_map->fd_top)
  260. handle_map->fd_top = fd;
  261. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  262. if (ret < 0) {
  263. if (fd == handle_map->fd_top)
  264. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  265. } else
  266. ret = fd;
  267. out:
  268. unlock(&handle_map->lock);
  269. return ret;
  270. }
  271. int set_new_fd_handle_by_fd(FDTYPE fd, struct shim_handle* hdl, int flags,
  272. struct shim_handle_map* handle_map) {
  273. int new_size = 0;
  274. int ret = 0;
  275. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  276. return -EBADF;
  277. lock(&handle_map->lock);
  278. if (!handle_map->map || handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  279. new_size = INIT_HANDLE_MAP_SIZE;
  280. if (!handle_map->map)
  281. goto extend;
  282. if (fd >= handle_map->fd_size) {
  283. new_size = handle_map->fd_size < new_size ? new_size : handle_map->fd_size;
  284. extend:
  285. while (new_size <= fd) new_size *= 2;
  286. if (!__enlarge_handle_map(handle_map, new_size)) {
  287. ret = -ENOMEM;
  288. goto out;
  289. }
  290. }
  291. if (handle_map->fd_top != FD_NULL && fd <= handle_map->fd_top &&
  292. HANDLE_ALLOCATED(handle_map->map[fd])) {
  293. ret = -EBADF;
  294. goto out;
  295. }
  296. if (handle_map->fd_top == FD_NULL || fd > handle_map->fd_top)
  297. handle_map->fd_top = fd;
  298. struct shim_fd_handle* new_handle = handle_map->map[fd];
  299. if (!new_handle) {
  300. new_handle = malloc(sizeof(struct shim_fd_handle));
  301. if (!new_handle) {
  302. ret = -ENOMEM;
  303. goto out;
  304. }
  305. handle_map->map[fd] = new_handle;
  306. }
  307. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  308. if (ret < 0) {
  309. if (fd == handle_map->fd_top)
  310. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  311. } else
  312. ret = fd;
  313. out:
  314. unlock(&handle_map->lock);
  315. return ret;
  316. }
  317. void flush_handle(struct shim_handle* hdl) {
  318. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->flush)
  319. hdl->fs->fs_ops->flush(hdl);
  320. }
  321. static inline __attribute__((unused)) const char* __handle_name(struct shim_handle* hdl) {
  322. if (!qstrempty(&hdl->path))
  323. return qstrgetstr(&hdl->path);
  324. if (!qstrempty(&hdl->uri))
  325. return qstrgetstr(&hdl->uri);
  326. if (hdl->fs_type[0])
  327. return hdl->fs_type;
  328. return "(unknown)";
  329. }
  330. void get_handle(struct shim_handle* hdl) {
  331. #ifdef DEBUG_REF
  332. int ref_count = REF_INC(hdl->ref_count);
  333. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl), ref_count);
  334. #else
  335. REF_INC(hdl->ref_count);
  336. #endif
  337. }
  338. static void destroy_handle(struct shim_handle* hdl) {
  339. destroy_lock(&hdl->lock);
  340. if (memory_migrated(hdl))
  341. memset(hdl, 0, sizeof(struct shim_handle));
  342. else
  343. free_mem_obj_to_mgr(handle_mgr, hdl);
  344. }
  345. extern int delete_from_epoll_handles(struct shim_handle* handle);
  346. void put_handle(struct shim_handle* hdl) {
  347. int ref_count = REF_DEC(hdl->ref_count);
  348. #ifdef DEBUG_REF
  349. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl), ref_count);
  350. #endif
  351. if (!ref_count) {
  352. if (hdl->type == TYPE_DIR) {
  353. struct shim_dir_handle* dir = &hdl->dir_info;
  354. if (dir->dot) {
  355. put_dentry(dir->dot);
  356. dir->dot = NULL;
  357. }
  358. if (dir->dotdot) {
  359. put_dentry(dir->dotdot);
  360. dir->dotdot = NULL;
  361. }
  362. if (dir->ptr != (void*)-1) {
  363. while (dir->ptr && *dir->ptr) {
  364. struct shim_dentry* dent = *dir->ptr;
  365. put_dentry(dent);
  366. *(dir->ptr++) = NULL;
  367. }
  368. }
  369. } else {
  370. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->close)
  371. hdl->fs->fs_ops->close(hdl);
  372. }
  373. delete_from_epoll_handles(hdl);
  374. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->hput)
  375. hdl->fs->fs_ops->hput(hdl);
  376. qstrfree(&hdl->path);
  377. qstrfree(&hdl->uri);
  378. if (hdl->pal_handle) {
  379. #ifdef DEBUG_REF
  380. debug("handle %p closes PAL handle %p\n", hdl, hdl->pal_handle);
  381. #endif
  382. DkObjectClose(hdl->pal_handle);
  383. hdl->pal_handle = NULL;
  384. }
  385. if (hdl->dentry)
  386. put_dentry(hdl->dentry);
  387. if (hdl->fs)
  388. put_mount(hdl->fs);
  389. destroy_handle(hdl);
  390. }
  391. }
  392. off_t get_file_size(struct shim_handle* hdl) {
  393. if (!hdl->fs || !hdl->fs->fs_ops)
  394. return -EINVAL;
  395. if (hdl->fs->fs_ops->poll)
  396. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  397. if (hdl->fs->fs_ops->hstat) {
  398. struct stat stat;
  399. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  400. if (ret < 0)
  401. return ret;
  402. return stat.st_size;
  403. }
  404. return 0;
  405. }
  406. void dup_fd_handle(struct shim_handle_map* map, const struct shim_fd_handle* old,
  407. struct shim_fd_handle* new) {
  408. struct shim_handle* replaced = NULL;
  409. lock(&map->lock);
  410. if (old->vfd != FD_NULL) {
  411. get_handle(old->handle);
  412. replaced = new->handle;
  413. new->handle = old->handle;
  414. }
  415. unlock(&map->lock);
  416. if (replaced)
  417. put_handle(replaced);
  418. }
  419. static struct shim_handle_map* get_new_handle_map(FDTYPE size) {
  420. struct shim_handle_map* handle_map = calloc(1, sizeof(struct shim_handle_map));
  421. if (!handle_map)
  422. return NULL;
  423. handle_map->map = calloc(size, sizeof(struct shim_fd_handle));
  424. if (!handle_map->map) {
  425. free(handle_map);
  426. return NULL;
  427. }
  428. handle_map->fd_top = FD_NULL;
  429. handle_map->fd_size = size;
  430. create_lock(&handle_map->lock);
  431. return handle_map;
  432. }
  433. static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size) {
  434. if (size <= map->fd_size)
  435. return map;
  436. struct shim_fd_handle** new_map = calloc(size, sizeof(new_map[0]));
  437. if (!new_map)
  438. return NULL;
  439. memcpy(new_map, map->map, map->fd_size * sizeof(new_map[0]));
  440. memset(new_map + map->fd_size, 0, (size - map->fd_size) * sizeof(new_map[0]));
  441. free(map->map);
  442. map->map = new_map;
  443. map->fd_size = size;
  444. return map;
  445. }
  446. int dup_handle_map(struct shim_handle_map** new, struct shim_handle_map* old_map) {
  447. lock(&old_map->lock);
  448. /* allocate a new handle mapping with the same size as
  449. the old one */
  450. struct shim_handle_map* new_map = get_new_handle_map(old_map->fd_size);
  451. if (!new_map)
  452. return -ENOMEM;
  453. new_map->fd_top = old_map->fd_top;
  454. if (old_map->fd_top == FD_NULL)
  455. goto done;
  456. for (int i = 0; i <= old_map->fd_top; i++) {
  457. struct shim_fd_handle* fd_old = old_map->map[i];
  458. struct shim_fd_handle* fd_new;
  459. /* now we go through the handle map and reassign each
  460. of them being allocated */
  461. if (HANDLE_ALLOCATED(fd_old)) {
  462. /* first, get the handle to prevent it from being deleted */
  463. struct shim_handle* hdl = fd_old->handle;
  464. get_handle(hdl);
  465. fd_new = malloc(sizeof(struct shim_fd_handle));
  466. if (!fd_new) {
  467. for (int j = 0; j < i; j++) {
  468. put_handle(new_map->map[j]->handle);
  469. free(new_map->map[j]);
  470. }
  471. unlock(&old_map->lock);
  472. *new = NULL;
  473. free(new_map);
  474. return -ENOMEM;
  475. }
  476. /* DP: I assume we really need a deep copy of the handle map? */
  477. new_map->map[i] = fd_new;
  478. fd_new->vfd = fd_old->vfd;
  479. fd_new->handle = hdl;
  480. fd_new->flags = fd_old->flags;
  481. }
  482. }
  483. done:
  484. unlock(&old_map->lock);
  485. *new = new_map;
  486. return 0;
  487. }
  488. void get_handle_map(struct shim_handle_map* map) {
  489. REF_INC(map->ref_count);
  490. }
  491. void put_handle_map(struct shim_handle_map* map) {
  492. int ref_count = REF_DEC(map->ref_count);
  493. if (!ref_count) {
  494. if (map->fd_top == FD_NULL)
  495. goto done;
  496. for (int i = 0; i <= map->fd_top; i++) {
  497. if (!map->map[i])
  498. continue;
  499. if (map->map[i]->vfd != FD_NULL) {
  500. struct shim_handle* handle = map->map[i]->handle;
  501. if (handle)
  502. put_handle(handle);
  503. }
  504. free(map->map[i]);
  505. }
  506. done:
  507. destroy_lock(&map->lock);
  508. free(map->map);
  509. free(map);
  510. }
  511. }
  512. int flush_handle_map(struct shim_handle_map* map) {
  513. get_handle_map(map);
  514. lock(&map->lock);
  515. if (map->fd_top == FD_NULL)
  516. goto done;
  517. /* now we go through the handle map and flush each handle */
  518. for (int i = 0; i <= map->fd_top; i++) {
  519. if (!HANDLE_ALLOCATED(map->map[i]))
  520. continue;
  521. struct shim_handle* handle = map->map[i]->handle;
  522. if (handle)
  523. flush_handle(handle);
  524. }
  525. done:
  526. unlock(&map->lock);
  527. put_handle_map(map);
  528. return 0;
  529. }
  530. int walk_handle_map(int (*callback)(struct shim_fd_handle*, struct shim_handle_map*),
  531. struct shim_handle_map* map) {
  532. int ret = 0;
  533. lock(&map->lock);
  534. if (map->fd_top == FD_NULL)
  535. goto done;
  536. for (int i = 0; i <= map->fd_top; i++) {
  537. if (!HANDLE_ALLOCATED(map->map[i]))
  538. continue;
  539. if ((ret = (*callback)(map->map[i], map)) < 0)
  540. break;
  541. }
  542. done:
  543. unlock(&map->lock);
  544. return ret;
  545. }
  546. BEGIN_CP_FUNC(handle) {
  547. assert(size == sizeof(struct shim_handle));
  548. struct shim_handle* hdl = (struct shim_handle*)obj;
  549. struct shim_handle* new_hdl = NULL;
  550. ptr_t off = GET_FROM_CP_MAP(obj);
  551. if (!off) {
  552. off = ADD_CP_OFFSET(sizeof(struct shim_handle));
  553. ADD_TO_CP_MAP(obj, off);
  554. new_hdl = (struct shim_handle*)(base + off);
  555. lock(&hdl->lock);
  556. struct shim_mount* fs = hdl->fs;
  557. *new_hdl = *hdl;
  558. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  559. fs->fs_ops->checkout(new_hdl);
  560. new_hdl->dentry = NULL;
  561. REF_SET(new_hdl->ref_count, 0);
  562. clear_lock(&new_hdl->lock);
  563. DO_CP_IN_MEMBER(qstr, new_hdl, path);
  564. DO_CP_IN_MEMBER(qstr, new_hdl, uri);
  565. if (fs && hdl->dentry) {
  566. DO_CP_MEMBER(mount, hdl, new_hdl, fs);
  567. } else {
  568. new_hdl->fs = NULL;
  569. }
  570. if (hdl->dentry)
  571. DO_CP_MEMBER(dentry, hdl, new_hdl, dentry);
  572. if (new_hdl->pal_handle) {
  573. struct shim_palhdl_entry* entry;
  574. DO_CP(palhdl, hdl->pal_handle, &entry);
  575. entry->uri = &new_hdl->uri;
  576. entry->phandle = &new_hdl->pal_handle;
  577. }
  578. if (hdl->type == TYPE_EPOLL)
  579. DO_CP(epoll_fd, &hdl->info.epoll.fds, &new_hdl->info.epoll.fds);
  580. INIT_LISTP(&new_hdl->epolls);
  581. unlock(&hdl->lock);
  582. ADD_CP_FUNC_ENTRY(off);
  583. } else {
  584. new_hdl = (struct shim_handle*)(base + off);
  585. }
  586. if (objp)
  587. *objp = (void*)new_hdl;
  588. }
  589. END_CP_FUNC(handle)
  590. BEGIN_RS_FUNC(handle) {
  591. struct shim_handle* hdl = (void*)(base + GET_CP_FUNC_ENTRY());
  592. __UNUSED(offset);
  593. CP_REBASE(hdl->fs);
  594. CP_REBASE(hdl->dentry);
  595. CP_REBASE(hdl->epolls);
  596. create_lock(&hdl->lock);
  597. if (!hdl->fs) {
  598. assert(hdl->fs_type);
  599. search_builtin_fs(hdl->fs_type, &hdl->fs);
  600. if (!hdl->fs)
  601. return -EINVAL;
  602. }
  603. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->checkin)
  604. hdl->fs->fs_ops->checkin(hdl);
  605. DEBUG_RS("path=%s,type=%s,uri=%s,flags=%03o", qstrgetstr(&hdl->path), hdl->fs_type,
  606. qstrgetstr(&hdl->uri), hdl->flags);
  607. }
  608. END_RS_FUNC(handle)
  609. BEGIN_CP_FUNC(fd_handle) {
  610. assert(size == sizeof(struct shim_fd_handle));
  611. struct shim_fd_handle* fdhdl = (struct shim_fd_handle*)obj;
  612. struct shim_fd_handle* new_fdhdl = NULL;
  613. ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_fd_handle));
  614. new_fdhdl = (struct shim_fd_handle*)(base + off);
  615. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  616. DO_CP(handle, fdhdl->handle, &new_fdhdl->handle);
  617. ADD_CP_FUNC_ENTRY(off);
  618. if (objp)
  619. *objp = (void*)new_fdhdl;
  620. }
  621. END_CP_FUNC_NO_RS(fd_handle)
  622. BEGIN_CP_FUNC(handle_map) {
  623. assert(size >= sizeof(struct shim_handle_map));
  624. struct shim_handle_map* handle_map = (struct shim_handle_map*)obj;
  625. struct shim_handle_map* new_handle_map = NULL;
  626. struct shim_fd_handle** ptr_array;
  627. lock(&handle_map->lock);
  628. int fd_size = handle_map->fd_top != FD_NULL ? handle_map->fd_top + 1 : 0;
  629. size = sizeof(struct shim_handle_map) + (sizeof(struct shim_fd_handle*) * fd_size);
  630. ptr_t off = GET_FROM_CP_MAP(obj);
  631. if (!off) {
  632. off = ADD_CP_OFFSET(size);
  633. new_handle_map = (struct shim_handle_map*)(base + off);
  634. memcpy(new_handle_map, handle_map, sizeof(struct shim_handle_map));
  635. ptr_array = (void*)new_handle_map + sizeof(struct shim_handle_map);
  636. new_handle_map->fd_size = fd_size;
  637. new_handle_map->map = fd_size ? ptr_array : NULL;
  638. REF_SET(new_handle_map->ref_count, 0);
  639. clear_lock(&new_handle_map->lock);
  640. for (int i = 0; i < fd_size; i++) {
  641. if (HANDLE_ALLOCATED(handle_map->map[i]))
  642. DO_CP(fd_handle, handle_map->map[i], &ptr_array[i]);
  643. else
  644. ptr_array[i] = NULL;
  645. }
  646. ADD_CP_FUNC_ENTRY(off);
  647. } else {
  648. new_handle_map = (struct shim_handle_map*)(base + off);
  649. }
  650. unlock(&handle_map->lock);
  651. if (objp)
  652. *objp = (void*)new_handle_map;
  653. }
  654. END_CP_FUNC(handle_map)
  655. BEGIN_RS_FUNC(handle_map) {
  656. struct shim_handle_map* handle_map = (void*)(base + GET_CP_FUNC_ENTRY());
  657. __UNUSED(offset);
  658. CP_REBASE(handle_map->map);
  659. assert(handle_map->map);
  660. DEBUG_RS("size=%d,top=%d", handle_map->fd_size, handle_map->fd_top);
  661. create_lock(&handle_map->lock);
  662. lock(&handle_map->lock);
  663. if (handle_map->fd_top != FD_NULL)
  664. for (int i = 0; i <= handle_map->fd_top; i++) {
  665. CP_REBASE(handle_map->map[i]);
  666. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  667. CP_REBASE(handle_map->map[i]->handle);
  668. struct shim_handle* hdl = handle_map->map[i]->handle;
  669. assert(hdl);
  670. get_handle(hdl);
  671. DEBUG_RS("[%d]%s", i, qstrempty(&hdl->uri) ? hdl->fs_type : qstrgetstr(&hdl->uri));
  672. }
  673. }
  674. unlock(&handle_map->lock);
  675. }
  676. END_RS_FUNC(handle_map)