shim_handle.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_handle.c
  15. *
  16. * This file contains codes to maintain bookkeeping for handles in library OS.
  17. */
  18. #include <pal.h>
  19. #include <pal_error.h>
  20. #include <shim_checkpoint.h>
  21. #include <shim_fs.h>
  22. #include <shim_handle.h>
  23. #include <shim_internal.h>
  24. #include <shim_thread.h>
  25. static struct shim_lock handle_mgr_lock;
  26. #define HANDLE_MGR_ALLOC 32
  27. #define SYSTEM_LOCK() lock(&handle_mgr_lock)
  28. #define SYSTEM_UNLOCK() unlock(&handle_mgr_lock)
  29. #define SYSTEM_LOCKED() locked(&handle_mgr_lock)
  30. #define OBJ_TYPE struct shim_handle
  31. #include <memmgr.h>
  32. static MEM_MGR handle_mgr = NULL;
  33. #define INIT_HANDLE_MAP_SIZE 32
  34. //#define DEBUG_REF
  35. static inline int init_tty_handle(struct shim_handle* hdl, bool write) {
  36. struct shim_dentry* dent = NULL;
  37. int ret;
  38. struct shim_thread* cur_thread = get_cur_thread();
  39. __UNUSED(cur_thread);
  40. /* XXX: Try getting the root FS from current thread? */
  41. assert(cur_thread);
  42. assert(cur_thread->root);
  43. if ((ret = path_lookupat(NULL, "/dev/tty", LOOKUP_OPEN, &dent, NULL)) < 0)
  44. return ret;
  45. int flags = (write ? O_WRONLY : O_RDONLY) | O_APPEND;
  46. struct shim_mount* fs = dent->fs;
  47. ret = fs->d_ops->open(hdl, dent, flags);
  48. if (ret < 0)
  49. return ret;
  50. set_handle_fs(hdl, fs);
  51. hdl->dentry = dent;
  52. hdl->flags = O_RDWR | O_APPEND | 0100000;
  53. size_t size;
  54. char* path = dentry_get_path(dent, true, &size);
  55. if (path)
  56. qstrsetstr(&hdl->path, path, size);
  57. else
  58. qstrsetstr(&hdl->path, "/dev/tty", 8);
  59. return 0;
  60. }
  61. static inline int init_exec_handle(struct shim_thread* thread) {
  62. if (!PAL_CB(executable))
  63. return 0;
  64. struct shim_handle* exec = get_new_handle();
  65. if (!exec)
  66. return -ENOMEM;
  67. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  68. exec->type = TYPE_FILE;
  69. exec->flags = O_RDONLY;
  70. exec->acc_mode = MAY_READ;
  71. struct shim_mount* fs = find_mount_from_uri(PAL_CB(executable));
  72. if (fs) {
  73. const char* p = PAL_CB(executable) + fs->uri.len;
  74. /*
  75. * Lookup for PAL_CB(executable) needs to be done under a given
  76. * mount point. which requires a relative path name.
  77. * On the other hand, the one in manifest file can be absolute path.
  78. */
  79. while (*p == '/') {
  80. p++;
  81. }
  82. path_lookupat(fs->root, p, 0, &exec->dentry, fs);
  83. set_handle_fs(exec, fs);
  84. if (exec->dentry) {
  85. size_t len;
  86. const char* path = dentry_get_path(exec->dentry, true, &len);
  87. qstrsetstr(&exec->path, path, len);
  88. }
  89. put_mount(fs);
  90. } else {
  91. set_handle_fs(exec, &chroot_builtin_fs);
  92. }
  93. lock(&thread->lock);
  94. thread->exec = exec;
  95. unlock(&thread->lock);
  96. return 0;
  97. }
  98. static struct shim_handle_map* get_new_handle_map(FDTYPE size);
  99. PAL_HANDLE shim_stdio = NULL;
  100. static int __set_new_fd_handle(struct shim_fd_handle** fdhdl, FDTYPE fd, struct shim_handle* hdl,
  101. int flags);
  102. static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size);
  103. int init_handle(void) {
  104. create_lock(&handle_mgr_lock);
  105. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  106. if (!handle_mgr)
  107. return -ENOMEM;
  108. return 0;
  109. }
  110. int init_important_handles(void) {
  111. struct shim_thread* thread = get_cur_thread();
  112. if (thread->handle_map)
  113. goto done;
  114. struct shim_handle_map* handle_map = get_cur_handle_map(thread);
  115. if (!handle_map) {
  116. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  117. if (!handle_map)
  118. return -ENOMEM;
  119. set_handle_map(thread, handle_map);
  120. }
  121. lock(&handle_map->lock);
  122. if (handle_map->fd_size < 3) {
  123. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  124. unlock(&handle_map->lock);
  125. return -ENOMEM;
  126. }
  127. }
  128. struct shim_handle* hdl = NULL;
  129. int ret;
  130. for (int fd = 0; fd < 3; fd++)
  131. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  132. if (!hdl) {
  133. hdl = get_new_handle();
  134. if (!hdl)
  135. return -ENOMEM;
  136. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  137. put_handle(hdl);
  138. return ret;
  139. }
  140. } else {
  141. get_handle(hdl);
  142. }
  143. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  144. put_handle(hdl);
  145. if (fd != 1)
  146. hdl = NULL;
  147. } else {
  148. if (fd == 1)
  149. hdl = handle_map->map[fd]->handle;
  150. }
  151. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  152. handle_map->fd_top = 2;
  153. unlock(&handle_map->lock);
  154. done:
  155. init_exec_handle(thread);
  156. return 0;
  157. }
  158. struct shim_handle* __get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
  159. assert(locked(&map->lock));
  160. struct shim_fd_handle* fd_handle = NULL;
  161. if (map->fd_top != FD_NULL && fd <= map->fd_top) {
  162. fd_handle = map->map[fd];
  163. if (!HANDLE_ALLOCATED(fd_handle))
  164. return NULL;
  165. if (flags)
  166. *flags = fd_handle->flags;
  167. return fd_handle->handle;
  168. }
  169. return NULL;
  170. }
  171. struct shim_handle* get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
  172. if (!map)
  173. map = get_cur_handle_map(NULL);
  174. struct shim_handle* hdl = NULL;
  175. lock(&map->lock);
  176. if ((hdl = __get_fd_handle(fd, flags, map)))
  177. get_handle(hdl);
  178. unlock(&map->lock);
  179. return hdl;
  180. }
  181. struct shim_handle* __detach_fd_handle(struct shim_fd_handle* fd, int* flags,
  182. struct shim_handle_map* map) {
  183. assert(locked(&map->lock));
  184. struct shim_handle* handle = NULL;
  185. if (HANDLE_ALLOCATED(fd)) {
  186. int vfd = fd->vfd;
  187. handle = fd->handle;
  188. if (flags)
  189. *flags = fd->flags;
  190. fd->vfd = FD_NULL;
  191. fd->handle = NULL;
  192. fd->flags = 0;
  193. if (vfd == map->fd_top)
  194. do {
  195. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  196. vfd--;
  197. } while (vfd >= 0 && !HANDLE_ALLOCATED(map->map[vfd]));
  198. }
  199. return handle;
  200. }
  201. struct shim_handle* detach_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* handle_map) {
  202. struct shim_handle* handle = NULL;
  203. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  204. return NULL;
  205. lock(&handle_map->lock);
  206. if (fd < handle_map->fd_size)
  207. handle = __detach_fd_handle(handle_map->map[fd], flags, handle_map);
  208. unlock(&handle_map->lock);
  209. return handle;
  210. }
  211. struct shim_handle* get_new_handle(void) {
  212. struct shim_handle* new_handle =
  213. get_mem_obj_from_mgr_enlarge(handle_mgr, size_align_up(HANDLE_MGR_ALLOC));
  214. if (!new_handle)
  215. return NULL;
  216. memset(new_handle, 0, sizeof(struct shim_handle));
  217. REF_SET(new_handle->ref_count, 1);
  218. create_lock(&new_handle->lock);
  219. new_handle->owner = cur_process.vmid;
  220. INIT_LISTP(&new_handle->epolls);
  221. return new_handle;
  222. }
  223. static int __set_new_fd_handle(struct shim_fd_handle** fdhdl, FDTYPE fd, struct shim_handle* hdl,
  224. int flags) {
  225. struct shim_fd_handle* new_handle = *fdhdl;
  226. if (!new_handle) {
  227. new_handle = malloc(sizeof(struct shim_fd_handle));
  228. if (!new_handle)
  229. return -ENOMEM;
  230. *fdhdl = new_handle;
  231. }
  232. new_handle->vfd = fd;
  233. new_handle->flags = flags;
  234. get_handle(hdl);
  235. new_handle->handle = hdl;
  236. return 0;
  237. }
  238. int set_new_fd_handle(struct shim_handle* hdl, int flags, struct shim_handle_map* handle_map) {
  239. int ret = -EMFILE;
  240. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  241. return -EBADF;
  242. lock(&handle_map->lock);
  243. FDTYPE fd = 0;
  244. if (handle_map->fd_top != FD_NULL) {
  245. // find first free fd
  246. while (fd <= handle_map->fd_top && HANDLE_ALLOCATED(handle_map->map[fd])) {
  247. fd++;
  248. }
  249. if (fd > handle_map->fd_top) {
  250. // no free fd found (fd == handle_map->fd_top + 1)
  251. if (fd >= handle_map->fd_size) {
  252. // no space left, need to enlarge handle_map->map
  253. if (!__enlarge_handle_map(handle_map, handle_map->fd_size * 2)) {
  254. ret = -ENOMEM;
  255. goto out;
  256. }
  257. }
  258. }
  259. } else {
  260. fd = 0;
  261. }
  262. if ((ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags)) < 0) {
  263. goto out;
  264. }
  265. ret = fd;
  266. if (handle_map->fd_top == FD_NULL || fd > handle_map->fd_top) {
  267. handle_map->fd_top = fd;
  268. }
  269. out:
  270. unlock(&handle_map->lock);
  271. return ret;
  272. }
  273. int set_new_fd_handle_by_fd(FDTYPE fd, struct shim_handle* hdl, int flags,
  274. struct shim_handle_map* handle_map) {
  275. int new_size = 0;
  276. int ret = 0;
  277. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  278. return -EBADF;
  279. lock(&handle_map->lock);
  280. if (!handle_map->map || handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  281. new_size = INIT_HANDLE_MAP_SIZE;
  282. if (!handle_map->map)
  283. goto extend;
  284. if (fd >= handle_map->fd_size) {
  285. new_size = handle_map->fd_size < new_size ? new_size : handle_map->fd_size;
  286. extend:
  287. while (new_size <= fd) new_size *= 2;
  288. if (!__enlarge_handle_map(handle_map, new_size)) {
  289. ret = -ENOMEM;
  290. goto out;
  291. }
  292. }
  293. if (handle_map->fd_top != FD_NULL && fd <= handle_map->fd_top &&
  294. HANDLE_ALLOCATED(handle_map->map[fd])) {
  295. ret = -EBADF;
  296. goto out;
  297. }
  298. if (handle_map->fd_top == FD_NULL || fd > handle_map->fd_top)
  299. handle_map->fd_top = fd;
  300. struct shim_fd_handle* new_handle = handle_map->map[fd];
  301. if (!new_handle) {
  302. new_handle = malloc(sizeof(struct shim_fd_handle));
  303. if (!new_handle) {
  304. ret = -ENOMEM;
  305. goto out;
  306. }
  307. handle_map->map[fd] = new_handle;
  308. }
  309. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  310. if (ret < 0) {
  311. if (fd == handle_map->fd_top)
  312. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  313. } else {
  314. ret = fd;
  315. }
  316. out:
  317. unlock(&handle_map->lock);
  318. return ret;
  319. }
  320. void flush_handle(struct shim_handle* hdl) {
  321. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->flush)
  322. hdl->fs->fs_ops->flush(hdl);
  323. }
  324. static inline __attribute__((unused)) const char* __handle_name(struct shim_handle* hdl) {
  325. if (!qstrempty(&hdl->path))
  326. return qstrgetstr(&hdl->path);
  327. if (!qstrempty(&hdl->uri))
  328. return qstrgetstr(&hdl->uri);
  329. if (hdl->fs_type[0])
  330. return hdl->fs_type;
  331. return "(unknown)";
  332. }
  333. void get_handle(struct shim_handle* hdl) {
  334. #ifdef DEBUG_REF
  335. int ref_count = REF_INC(hdl->ref_count);
  336. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl), ref_count);
  337. #else
  338. REF_INC(hdl->ref_count);
  339. #endif
  340. }
  341. static void destroy_handle(struct shim_handle* hdl) {
  342. destroy_lock(&hdl->lock);
  343. if (memory_migrated(hdl))
  344. memset(hdl, 0, sizeof(struct shim_handle));
  345. else
  346. free_mem_obj_to_mgr(handle_mgr, hdl);
  347. }
  348. void put_handle(struct shim_handle* hdl) {
  349. int ref_count = REF_DEC(hdl->ref_count);
  350. #ifdef DEBUG_REF
  351. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl), ref_count);
  352. #endif
  353. if (!ref_count) {
  354. if (hdl->type == TYPE_DIR) {
  355. struct shim_dir_handle* dir = &hdl->dir_info;
  356. if (dir->dot) {
  357. put_dentry(dir->dot);
  358. dir->dot = NULL;
  359. }
  360. if (dir->dotdot) {
  361. put_dentry(dir->dotdot);
  362. dir->dotdot = NULL;
  363. }
  364. if (dir->ptr != (void*)-1) {
  365. while (dir->ptr && *dir->ptr) {
  366. struct shim_dentry* dent = *dir->ptr;
  367. put_dentry(dent);
  368. *(dir->ptr++) = NULL;
  369. }
  370. }
  371. } else {
  372. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->close)
  373. hdl->fs->fs_ops->close(hdl);
  374. if (hdl->type == TYPE_SOCK && hdl->info.sock.peek_buffer) {
  375. free(hdl->info.sock.peek_buffer);
  376. hdl->info.sock.peek_buffer = NULL;
  377. }
  378. }
  379. delete_from_epoll_handles(hdl);
  380. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->hput)
  381. hdl->fs->fs_ops->hput(hdl);
  382. qstrfree(&hdl->path);
  383. qstrfree(&hdl->uri);
  384. if (hdl->pal_handle) {
  385. #ifdef DEBUG_REF
  386. debug("handle %p closes PAL handle %p\n", hdl, hdl->pal_handle);
  387. #endif
  388. DkObjectClose(hdl->pal_handle);
  389. hdl->pal_handle = NULL;
  390. }
  391. if (hdl->dentry)
  392. put_dentry(hdl->dentry);
  393. if (hdl->fs)
  394. put_mount(hdl->fs);
  395. destroy_handle(hdl);
  396. }
  397. }
  398. off_t get_file_size(struct shim_handle* hdl) {
  399. if (!hdl->fs || !hdl->fs->fs_ops)
  400. return -EINVAL;
  401. if (hdl->fs->fs_ops->poll)
  402. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  403. if (hdl->fs->fs_ops->hstat) {
  404. struct stat stat;
  405. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  406. if (ret < 0)
  407. return ret;
  408. return stat.st_size;
  409. }
  410. return 0;
  411. }
  412. void dup_fd_handle(struct shim_handle_map* map, const struct shim_fd_handle* old,
  413. struct shim_fd_handle* new) {
  414. struct shim_handle* replaced = NULL;
  415. lock(&map->lock);
  416. if (old->vfd != FD_NULL) {
  417. get_handle(old->handle);
  418. replaced = new->handle;
  419. new->handle = old->handle;
  420. }
  421. unlock(&map->lock);
  422. if (replaced)
  423. put_handle(replaced);
  424. }
  425. static struct shim_handle_map* get_new_handle_map(FDTYPE size) {
  426. struct shim_handle_map* handle_map = calloc(1, sizeof(struct shim_handle_map));
  427. if (!handle_map)
  428. return NULL;
  429. handle_map->map = calloc(size, sizeof(struct shim_fd_handle));
  430. if (!handle_map->map) {
  431. free(handle_map);
  432. return NULL;
  433. }
  434. handle_map->fd_top = FD_NULL;
  435. handle_map->fd_size = size;
  436. create_lock(&handle_map->lock);
  437. return handle_map;
  438. }
  439. static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size) {
  440. assert(locked(&map->lock));
  441. if (size <= map->fd_size)
  442. return map;
  443. struct shim_fd_handle** new_map = calloc(size, sizeof(new_map[0]));
  444. if (!new_map)
  445. return NULL;
  446. memcpy(new_map, map->map, map->fd_size * sizeof(new_map[0]));
  447. free(map->map);
  448. map->map = new_map;
  449. map->fd_size = size;
  450. return map;
  451. }
  452. int dup_handle_map(struct shim_handle_map** new, struct shim_handle_map* old_map) {
  453. lock(&old_map->lock);
  454. /* allocate a new handle mapping with the same size as
  455. the old one */
  456. struct shim_handle_map* new_map = get_new_handle_map(old_map->fd_size);
  457. if (!new_map)
  458. return -ENOMEM;
  459. new_map->fd_top = old_map->fd_top;
  460. if (old_map->fd_top == FD_NULL)
  461. goto done;
  462. for (int i = 0; i <= old_map->fd_top; i++) {
  463. struct shim_fd_handle* fd_old = old_map->map[i];
  464. struct shim_fd_handle* fd_new;
  465. /* now we go through the handle map and reassign each
  466. of them being allocated */
  467. if (HANDLE_ALLOCATED(fd_old)) {
  468. /* first, get the handle to prevent it from being deleted */
  469. struct shim_handle* hdl = fd_old->handle;
  470. get_handle(hdl);
  471. fd_new = malloc(sizeof(struct shim_fd_handle));
  472. if (!fd_new) {
  473. for (int j = 0; j < i; j++) {
  474. put_handle(new_map->map[j]->handle);
  475. free(new_map->map[j]);
  476. }
  477. unlock(&old_map->lock);
  478. *new = NULL;
  479. free(new_map);
  480. return -ENOMEM;
  481. }
  482. /* DP: I assume we really need a deep copy of the handle map? */
  483. new_map->map[i] = fd_new;
  484. fd_new->vfd = fd_old->vfd;
  485. fd_new->handle = hdl;
  486. fd_new->flags = fd_old->flags;
  487. }
  488. }
  489. done:
  490. unlock(&old_map->lock);
  491. *new = new_map;
  492. return 0;
  493. }
  494. void get_handle_map(struct shim_handle_map* map) {
  495. REF_INC(map->ref_count);
  496. }
  497. void put_handle_map(struct shim_handle_map* map) {
  498. int ref_count = REF_DEC(map->ref_count);
  499. if (!ref_count) {
  500. if (map->fd_top == FD_NULL)
  501. goto done;
  502. for (int i = 0; i <= map->fd_top; i++) {
  503. if (!map->map[i])
  504. continue;
  505. if (map->map[i]->vfd != FD_NULL) {
  506. struct shim_handle* handle = map->map[i]->handle;
  507. if (handle)
  508. put_handle(handle);
  509. }
  510. free(map->map[i]);
  511. }
  512. done:
  513. destroy_lock(&map->lock);
  514. free(map->map);
  515. free(map);
  516. }
  517. }
  518. int flush_handle_map(struct shim_handle_map* map) {
  519. get_handle_map(map);
  520. lock(&map->lock);
  521. if (map->fd_top == FD_NULL)
  522. goto done;
  523. /* now we go through the handle map and flush each handle */
  524. for (int i = 0; i <= map->fd_top; i++) {
  525. if (!HANDLE_ALLOCATED(map->map[i]))
  526. continue;
  527. struct shim_handle* handle = map->map[i]->handle;
  528. if (handle)
  529. flush_handle(handle);
  530. }
  531. done:
  532. unlock(&map->lock);
  533. put_handle_map(map);
  534. return 0;
  535. }
  536. int walk_handle_map(int (*callback)(struct shim_fd_handle*, struct shim_handle_map*),
  537. struct shim_handle_map* map) {
  538. int ret = 0;
  539. lock(&map->lock);
  540. if (map->fd_top == FD_NULL)
  541. goto done;
  542. for (int i = 0; i <= map->fd_top; i++) {
  543. if (!HANDLE_ALLOCATED(map->map[i]))
  544. continue;
  545. if ((ret = (*callback)(map->map[i], map)) < 0)
  546. break;
  547. }
  548. done:
  549. unlock(&map->lock);
  550. return ret;
  551. }
  552. BEGIN_CP_FUNC(handle) {
  553. __UNUSED(size);
  554. assert(size == sizeof(struct shim_handle));
  555. struct shim_handle* hdl = (struct shim_handle*)obj;
  556. struct shim_handle* new_hdl = NULL;
  557. ptr_t off = GET_FROM_CP_MAP(obj);
  558. if (!off) {
  559. off = ADD_CP_OFFSET(sizeof(struct shim_handle));
  560. ADD_TO_CP_MAP(obj, off);
  561. new_hdl = (struct shim_handle*)(base + off);
  562. lock(&hdl->lock);
  563. struct shim_mount* fs = hdl->fs;
  564. *new_hdl = *hdl;
  565. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  566. fs->fs_ops->checkout(new_hdl);
  567. new_hdl->dentry = NULL;
  568. REF_SET(new_hdl->ref_count, 0);
  569. clear_lock(&new_hdl->lock);
  570. DO_CP_IN_MEMBER(qstr, new_hdl, path);
  571. DO_CP_IN_MEMBER(qstr, new_hdl, uri);
  572. if (fs && hdl->dentry) {
  573. DO_CP_MEMBER(mount, hdl, new_hdl, fs);
  574. } else {
  575. new_hdl->fs = NULL;
  576. }
  577. if (hdl->dentry)
  578. DO_CP_MEMBER(dentry, hdl, new_hdl, dentry);
  579. if (new_hdl->pal_handle) {
  580. struct shim_palhdl_entry* entry;
  581. DO_CP(palhdl, hdl->pal_handle, &entry);
  582. entry->uri = &new_hdl->uri;
  583. entry->phandle = &new_hdl->pal_handle;
  584. }
  585. if (hdl->type == TYPE_EPOLL)
  586. DO_CP(epoll_item, &hdl->info.epoll.fds, &new_hdl->info.epoll.fds);
  587. if (hdl->type == TYPE_SOCK) {
  588. /* no support for multiple processes sharing options/peek buffer of the socket */
  589. new_hdl->info.sock.pending_options = NULL;
  590. new_hdl->info.sock.peek_buffer = NULL;
  591. }
  592. INIT_LISTP(&new_hdl->epolls);
  593. unlock(&hdl->lock);
  594. ADD_CP_FUNC_ENTRY(off);
  595. } else {
  596. new_hdl = (struct shim_handle*)(base + off);
  597. }
  598. if (objp)
  599. *objp = (void*)new_hdl;
  600. }
  601. END_CP_FUNC(handle)
  602. BEGIN_RS_FUNC(handle) {
  603. struct shim_handle* hdl = (void*)(base + GET_CP_FUNC_ENTRY());
  604. __UNUSED(offset);
  605. CP_REBASE(hdl->fs);
  606. CP_REBASE(hdl->dentry);
  607. CP_REBASE(hdl->epolls);
  608. create_lock(&hdl->lock);
  609. if (!hdl->fs) {
  610. assert(hdl->fs_type);
  611. search_builtin_fs(hdl->fs_type, &hdl->fs);
  612. if (!hdl->fs)
  613. return -EINVAL;
  614. }
  615. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->checkin)
  616. hdl->fs->fs_ops->checkin(hdl);
  617. DEBUG_RS("path=%s,type=%s,uri=%s,flags=%03o", qstrgetstr(&hdl->path), hdl->fs_type,
  618. qstrgetstr(&hdl->uri), hdl->flags);
  619. }
  620. END_RS_FUNC(handle)
  621. BEGIN_CP_FUNC(fd_handle) {
  622. __UNUSED(size);
  623. assert(size == sizeof(struct shim_fd_handle));
  624. struct shim_fd_handle* fdhdl = (struct shim_fd_handle*)obj;
  625. struct shim_fd_handle* new_fdhdl = NULL;
  626. ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_fd_handle));
  627. new_fdhdl = (struct shim_fd_handle*)(base + off);
  628. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  629. DO_CP(handle, fdhdl->handle, &new_fdhdl->handle);
  630. ADD_CP_FUNC_ENTRY(off);
  631. if (objp)
  632. *objp = (void*)new_fdhdl;
  633. }
  634. END_CP_FUNC_NO_RS(fd_handle)
  635. BEGIN_CP_FUNC(handle_map) {
  636. __UNUSED(size);
  637. assert(size >= sizeof(struct shim_handle_map));
  638. struct shim_handle_map* handle_map = (struct shim_handle_map*)obj;
  639. struct shim_handle_map* new_handle_map = NULL;
  640. struct shim_fd_handle** ptr_array;
  641. lock(&handle_map->lock);
  642. int fd_size = handle_map->fd_top != FD_NULL ? handle_map->fd_top + 1 : 0;
  643. size = sizeof(struct shim_handle_map) + (sizeof(struct shim_fd_handle*) * fd_size);
  644. ptr_t off = GET_FROM_CP_MAP(obj);
  645. if (!off) {
  646. off = ADD_CP_OFFSET(size);
  647. new_handle_map = (struct shim_handle_map*)(base + off);
  648. memcpy(new_handle_map, handle_map, sizeof(struct shim_handle_map));
  649. ptr_array = (void*)new_handle_map + sizeof(struct shim_handle_map);
  650. new_handle_map->fd_size = fd_size;
  651. new_handle_map->map = fd_size ? ptr_array : NULL;
  652. REF_SET(new_handle_map->ref_count, 0);
  653. clear_lock(&new_handle_map->lock);
  654. for (int i = 0; i < fd_size; i++) {
  655. if (HANDLE_ALLOCATED(handle_map->map[i]))
  656. DO_CP(fd_handle, handle_map->map[i], &ptr_array[i]);
  657. else
  658. ptr_array[i] = NULL;
  659. }
  660. ADD_CP_FUNC_ENTRY(off);
  661. } else {
  662. new_handle_map = (struct shim_handle_map*)(base + off);
  663. }
  664. unlock(&handle_map->lock);
  665. if (objp)
  666. *objp = (void*)new_handle_map;
  667. }
  668. END_CP_FUNC(handle_map)
  669. BEGIN_RS_FUNC(handle_map) {
  670. struct shim_handle_map* handle_map = (void*)(base + GET_CP_FUNC_ENTRY());
  671. __UNUSED(offset);
  672. CP_REBASE(handle_map->map);
  673. assert(handle_map->map);
  674. DEBUG_RS("size=%d,top=%d", handle_map->fd_size, handle_map->fd_top);
  675. create_lock(&handle_map->lock);
  676. lock(&handle_map->lock);
  677. if (handle_map->fd_top != FD_NULL)
  678. for (int i = 0; i <= handle_map->fd_top; i++) {
  679. CP_REBASE(handle_map->map[i]);
  680. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  681. CP_REBASE(handle_map->map[i]->handle);
  682. struct shim_handle* hdl = handle_map->map[i]->handle;
  683. assert(hdl);
  684. get_handle(hdl);
  685. DEBUG_RS("[%d]%s", i, qstrempty(&hdl->uri) ? hdl->fs_type : qstrgetstr(&hdl->uri));
  686. }
  687. }
  688. unlock(&handle_map->lock);
  689. }
  690. END_RS_FUNC(handle_map)