shim_handle.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_handle.c
  15. *
  16. * This file contains codes to maintain bookkeeping for handles in library OS.
  17. */
  18. #include <pal.h>
  19. #include <pal_error.h>
  20. #include <shim_checkpoint.h>
  21. #include <shim_fs.h>
  22. #include <shim_handle.h>
  23. #include <shim_internal.h>
  24. #include <shim_thread.h>
  25. static struct shim_lock handle_mgr_lock;
  26. #define HANDLE_MGR_ALLOC 32
  27. #define SYSTEM_LOCK() lock(&handle_mgr_lock)
  28. #define SYSTEM_UNLOCK() unlock(&handle_mgr_lock)
  29. #define OBJ_TYPE struct shim_handle
  30. #include <memmgr.h>
  31. static MEM_MGR handle_mgr = NULL;
  32. #define INIT_HANDLE_MAP_SIZE 32
  33. //#define DEBUG_REF
  34. static inline int init_tty_handle(struct shim_handle* hdl, bool write) {
  35. struct shim_dentry* dent = NULL;
  36. int ret;
  37. struct shim_thread* cur_thread = get_cur_thread();
  38. __UNUSED(cur_thread);
  39. /* XXX: Try getting the root FS from current thread? */
  40. assert(cur_thread);
  41. assert(cur_thread->root);
  42. if ((ret = path_lookupat(NULL, "/dev/tty", LOOKUP_OPEN, &dent, NULL)) < 0)
  43. return ret;
  44. int flags = (write ? O_WRONLY : O_RDONLY) | O_APPEND;
  45. struct shim_mount* fs = dent->fs;
  46. ret = fs->d_ops->open(hdl, dent, flags);
  47. if (ret < 0)
  48. return ret;
  49. set_handle_fs(hdl, fs);
  50. hdl->dentry = dent;
  51. hdl->flags = O_RDWR | O_APPEND | 0100000;
  52. size_t size;
  53. char* path = dentry_get_path(dent, true, &size);
  54. if (path)
  55. qstrsetstr(&hdl->path, path, size);
  56. else
  57. qstrsetstr(&hdl->path, "/dev/tty", 8);
  58. return 0;
  59. }
  60. static inline int init_exec_handle(struct shim_thread* thread) {
  61. if (!PAL_CB(executable))
  62. return 0;
  63. struct shim_handle* exec = get_new_handle();
  64. if (!exec)
  65. return -ENOMEM;
  66. qstrsetstr(&exec->uri, PAL_CB(executable), strlen(PAL_CB(executable)));
  67. exec->type = TYPE_FILE;
  68. exec->flags = O_RDONLY;
  69. exec->acc_mode = MAY_READ;
  70. struct shim_mount* fs = find_mount_from_uri(PAL_CB(executable));
  71. if (fs) {
  72. const char* p = PAL_CB(executable) + fs->uri.len;
  73. /*
  74. * Lookup for PAL_CB(executable) needs to be done under a given
  75. * mount point. which requires a relative path name.
  76. * On the other hand, the one in manifest file can be absolute path.
  77. */
  78. while (*p == '/') {
  79. p++;
  80. }
  81. path_lookupat(fs->root, p, 0, &exec->dentry, fs);
  82. set_handle_fs(exec, fs);
  83. if (exec->dentry) {
  84. size_t len;
  85. const char* path = dentry_get_path(exec->dentry, true, &len);
  86. qstrsetstr(&exec->path, path, len);
  87. }
  88. put_mount(fs);
  89. } else {
  90. set_handle_fs(exec, &chroot_builtin_fs);
  91. }
  92. lock(&thread->lock);
  93. thread->exec = exec;
  94. unlock(&thread->lock);
  95. return 0;
  96. }
  97. static struct shim_handle_map* get_new_handle_map(FDTYPE size);
  98. PAL_HANDLE shim_stdio = NULL;
  99. static int __set_new_fd_handle(struct shim_fd_handle** fdhdl, FDTYPE fd, struct shim_handle* hdl,
  100. int flags);
  101. static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size);
  102. int init_handle(void) {
  103. create_lock(&handle_mgr_lock);
  104. handle_mgr = create_mem_mgr(init_align_up(HANDLE_MGR_ALLOC));
  105. if (!handle_mgr)
  106. return -ENOMEM;
  107. return 0;
  108. }
  109. int init_important_handles(void) {
  110. struct shim_thread* thread = get_cur_thread();
  111. if (thread->handle_map)
  112. goto done;
  113. struct shim_handle_map* handle_map = get_cur_handle_map(thread);
  114. if (!handle_map) {
  115. handle_map = get_new_handle_map(INIT_HANDLE_MAP_SIZE);
  116. if (!handle_map)
  117. return -ENOMEM;
  118. set_handle_map(thread, handle_map);
  119. }
  120. lock(&handle_map->lock);
  121. if (handle_map->fd_size < 3) {
  122. if (!__enlarge_handle_map(handle_map, INIT_HANDLE_MAP_SIZE)) {
  123. unlock(&handle_map->lock);
  124. return -ENOMEM;
  125. }
  126. }
  127. struct shim_handle* hdl = NULL;
  128. int ret;
  129. for (int fd = 0; fd < 3; fd++)
  130. if (!HANDLE_ALLOCATED(handle_map->map[fd])) {
  131. if (!hdl) {
  132. hdl = get_new_handle();
  133. if (!hdl)
  134. return -ENOMEM;
  135. if ((ret = init_tty_handle(hdl, fd)) < 0) {
  136. put_handle(hdl);
  137. return ret;
  138. }
  139. } else {
  140. get_handle(hdl);
  141. }
  142. __set_new_fd_handle(&handle_map->map[fd], fd, hdl, 0);
  143. put_handle(hdl);
  144. if (fd != 1)
  145. hdl = NULL;
  146. } else {
  147. if (fd == 1)
  148. hdl = handle_map->map[fd]->handle;
  149. }
  150. if (handle_map->fd_top == FD_NULL || handle_map->fd_top < 2)
  151. handle_map->fd_top = 2;
  152. unlock(&handle_map->lock);
  153. done:
  154. init_exec_handle(thread);
  155. return 0;
  156. }
  157. struct shim_handle* __get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
  158. struct shim_fd_handle* fd_handle = NULL;
  159. if (map->fd_top != FD_NULL && fd <= map->fd_top) {
  160. fd_handle = map->map[fd];
  161. if (!HANDLE_ALLOCATED(fd_handle))
  162. return NULL;
  163. if (flags)
  164. *flags = fd_handle->flags;
  165. return fd_handle->handle;
  166. }
  167. return NULL;
  168. }
  169. struct shim_handle* get_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* map) {
  170. if (!map)
  171. map = get_cur_handle_map(NULL);
  172. struct shim_handle* hdl = NULL;
  173. lock(&map->lock);
  174. if ((hdl = __get_fd_handle(fd, flags, map)))
  175. get_handle(hdl);
  176. unlock(&map->lock);
  177. return hdl;
  178. }
  179. struct shim_handle* __detach_fd_handle(struct shim_fd_handle* fd, int* flags,
  180. struct shim_handle_map* map) {
  181. struct shim_handle* handle = NULL;
  182. if (HANDLE_ALLOCATED(fd)) {
  183. int vfd = fd->vfd;
  184. handle = fd->handle;
  185. if (flags)
  186. *flags = fd->flags;
  187. fd->vfd = FD_NULL;
  188. fd->handle = NULL;
  189. fd->flags = 0;
  190. if (vfd == map->fd_top)
  191. do {
  192. map->fd_top = vfd ? vfd - 1 : FD_NULL;
  193. vfd--;
  194. } while (vfd >= 0 && !HANDLE_ALLOCATED(map->map[vfd]));
  195. }
  196. return handle;
  197. }
  198. struct shim_handle* detach_fd_handle(FDTYPE fd, int* flags, struct shim_handle_map* handle_map) {
  199. struct shim_handle* handle = NULL;
  200. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  201. return NULL;
  202. lock(&handle_map->lock);
  203. if (fd < handle_map->fd_size)
  204. handle = __detach_fd_handle(handle_map->map[fd], flags, handle_map);
  205. unlock(&handle_map->lock);
  206. return handle;
  207. }
  208. struct shim_handle* get_new_handle(void) {
  209. struct shim_handle* new_handle =
  210. get_mem_obj_from_mgr_enlarge(handle_mgr, size_align_up(HANDLE_MGR_ALLOC));
  211. if (!new_handle)
  212. return NULL;
  213. memset(new_handle, 0, sizeof(struct shim_handle));
  214. REF_SET(new_handle->ref_count, 1);
  215. create_lock(&new_handle->lock);
  216. new_handle->owner = cur_process.vmid;
  217. INIT_LISTP(&new_handle->epolls);
  218. return new_handle;
  219. }
  220. static int __set_new_fd_handle(struct shim_fd_handle** fdhdl, FDTYPE fd, struct shim_handle* hdl,
  221. int flags) {
  222. struct shim_fd_handle* new_handle = *fdhdl;
  223. if (!new_handle) {
  224. new_handle = malloc(sizeof(struct shim_fd_handle));
  225. if (!new_handle)
  226. return -ENOMEM;
  227. *fdhdl = new_handle;
  228. }
  229. new_handle->vfd = fd;
  230. new_handle->flags = flags;
  231. get_handle(hdl);
  232. new_handle->handle = hdl;
  233. return 0;
  234. }
  235. int set_new_fd_handle(struct shim_handle* hdl, int flags, struct shim_handle_map* handle_map) {
  236. int ret = -EMFILE;
  237. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  238. return -EBADF;
  239. lock(&handle_map->lock);
  240. FDTYPE fd = 0;
  241. if (handle_map->fd_top != FD_NULL) {
  242. // find first free fd
  243. while (fd <= handle_map->fd_top && HANDLE_ALLOCATED(handle_map->map[fd])) {
  244. fd++;
  245. }
  246. if (fd > handle_map->fd_top) {
  247. // no free fd found (fd == handle_map->fd_top + 1)
  248. if (fd >= handle_map->fd_size) {
  249. // no space left, need to enlarge handle_map->map
  250. if (!__enlarge_handle_map(handle_map, handle_map->fd_size * 2)) {
  251. ret = -ENOMEM;
  252. goto out;
  253. }
  254. }
  255. }
  256. } else {
  257. fd = 0;
  258. }
  259. if ((ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags)) < 0) {
  260. goto out;
  261. }
  262. ret = fd;
  263. if (handle_map->fd_top == FD_NULL || fd > handle_map->fd_top) {
  264. handle_map->fd_top = fd;
  265. }
  266. out:
  267. unlock(&handle_map->lock);
  268. return ret;
  269. }
  270. int set_new_fd_handle_by_fd(FDTYPE fd, struct shim_handle* hdl, int flags,
  271. struct shim_handle_map* handle_map) {
  272. int new_size = 0;
  273. int ret = 0;
  274. if (!handle_map && !(handle_map = get_cur_handle_map(NULL)))
  275. return -EBADF;
  276. lock(&handle_map->lock);
  277. if (!handle_map->map || handle_map->fd_size < INIT_HANDLE_MAP_SIZE)
  278. new_size = INIT_HANDLE_MAP_SIZE;
  279. if (!handle_map->map)
  280. goto extend;
  281. if (fd >= handle_map->fd_size) {
  282. new_size = handle_map->fd_size < new_size ? new_size : handle_map->fd_size;
  283. extend:
  284. while (new_size <= fd) new_size *= 2;
  285. if (!__enlarge_handle_map(handle_map, new_size)) {
  286. ret = -ENOMEM;
  287. goto out;
  288. }
  289. }
  290. if (handle_map->fd_top != FD_NULL && fd <= handle_map->fd_top &&
  291. HANDLE_ALLOCATED(handle_map->map[fd])) {
  292. ret = -EBADF;
  293. goto out;
  294. }
  295. if (handle_map->fd_top == FD_NULL || fd > handle_map->fd_top)
  296. handle_map->fd_top = fd;
  297. struct shim_fd_handle* new_handle = handle_map->map[fd];
  298. if (!new_handle) {
  299. new_handle = malloc(sizeof(struct shim_fd_handle));
  300. if (!new_handle) {
  301. ret = -ENOMEM;
  302. goto out;
  303. }
  304. handle_map->map[fd] = new_handle;
  305. }
  306. ret = __set_new_fd_handle(&handle_map->map[fd], fd, hdl, flags);
  307. if (ret < 0) {
  308. if (fd == handle_map->fd_top)
  309. handle_map->fd_top = fd ? fd - 1 : FD_NULL;
  310. } else {
  311. ret = fd;
  312. }
  313. out:
  314. unlock(&handle_map->lock);
  315. return ret;
  316. }
  317. void flush_handle(struct shim_handle* hdl) {
  318. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->flush)
  319. hdl->fs->fs_ops->flush(hdl);
  320. }
  321. static inline __attribute__((unused)) const char* __handle_name(struct shim_handle* hdl) {
  322. if (!qstrempty(&hdl->path))
  323. return qstrgetstr(&hdl->path);
  324. if (!qstrempty(&hdl->uri))
  325. return qstrgetstr(&hdl->uri);
  326. if (hdl->fs_type[0])
  327. return hdl->fs_type;
  328. return "(unknown)";
  329. }
  330. void get_handle(struct shim_handle* hdl) {
  331. #ifdef DEBUG_REF
  332. int ref_count = REF_INC(hdl->ref_count);
  333. debug("get handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl), ref_count);
  334. #else
  335. REF_INC(hdl->ref_count);
  336. #endif
  337. }
  338. static void destroy_handle(struct shim_handle* hdl) {
  339. destroy_lock(&hdl->lock);
  340. if (memory_migrated(hdl))
  341. memset(hdl, 0, sizeof(struct shim_handle));
  342. else
  343. free_mem_obj_to_mgr(handle_mgr, hdl);
  344. }
  345. void put_handle(struct shim_handle* hdl) {
  346. int ref_count = REF_DEC(hdl->ref_count);
  347. #ifdef DEBUG_REF
  348. debug("put handle %p(%s) (ref_count = %d)\n", hdl, __handle_name(hdl), ref_count);
  349. #endif
  350. if (!ref_count) {
  351. if (hdl->type == TYPE_DIR) {
  352. struct shim_dir_handle* dir = &hdl->dir_info;
  353. if (dir->dot) {
  354. put_dentry(dir->dot);
  355. dir->dot = NULL;
  356. }
  357. if (dir->dotdot) {
  358. put_dentry(dir->dotdot);
  359. dir->dotdot = NULL;
  360. }
  361. if (dir->ptr != (void*)-1) {
  362. while (dir->ptr && *dir->ptr) {
  363. struct shim_dentry* dent = *dir->ptr;
  364. put_dentry(dent);
  365. *(dir->ptr++) = NULL;
  366. }
  367. }
  368. } else {
  369. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->close)
  370. hdl->fs->fs_ops->close(hdl);
  371. }
  372. delete_from_epoll_handles(hdl);
  373. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->hput)
  374. hdl->fs->fs_ops->hput(hdl);
  375. qstrfree(&hdl->path);
  376. qstrfree(&hdl->uri);
  377. if (hdl->pal_handle) {
  378. #ifdef DEBUG_REF
  379. debug("handle %p closes PAL handle %p\n", hdl, hdl->pal_handle);
  380. #endif
  381. DkObjectClose(hdl->pal_handle);
  382. hdl->pal_handle = NULL;
  383. }
  384. if (hdl->dentry)
  385. put_dentry(hdl->dentry);
  386. if (hdl->fs)
  387. put_mount(hdl->fs);
  388. destroy_handle(hdl);
  389. }
  390. }
  391. off_t get_file_size(struct shim_handle* hdl) {
  392. if (!hdl->fs || !hdl->fs->fs_ops)
  393. return -EINVAL;
  394. if (hdl->fs->fs_ops->poll)
  395. return hdl->fs->fs_ops->poll(hdl, FS_POLL_SZ);
  396. if (hdl->fs->fs_ops->hstat) {
  397. struct stat stat;
  398. int ret = hdl->fs->fs_ops->hstat(hdl, &stat);
  399. if (ret < 0)
  400. return ret;
  401. return stat.st_size;
  402. }
  403. return 0;
  404. }
  405. void dup_fd_handle(struct shim_handle_map* map, const struct shim_fd_handle* old,
  406. struct shim_fd_handle* new) {
  407. struct shim_handle* replaced = NULL;
  408. lock(&map->lock);
  409. if (old->vfd != FD_NULL) {
  410. get_handle(old->handle);
  411. replaced = new->handle;
  412. new->handle = old->handle;
  413. }
  414. unlock(&map->lock);
  415. if (replaced)
  416. put_handle(replaced);
  417. }
  418. static struct shim_handle_map* get_new_handle_map(FDTYPE size) {
  419. struct shim_handle_map* handle_map = calloc(1, sizeof(struct shim_handle_map));
  420. if (!handle_map)
  421. return NULL;
  422. handle_map->map = calloc(size, sizeof(struct shim_fd_handle));
  423. if (!handle_map->map) {
  424. free(handle_map);
  425. return NULL;
  426. }
  427. handle_map->fd_top = FD_NULL;
  428. handle_map->fd_size = size;
  429. create_lock(&handle_map->lock);
  430. return handle_map;
  431. }
  432. static struct shim_handle_map* __enlarge_handle_map(struct shim_handle_map* map, FDTYPE size) {
  433. if (size <= map->fd_size)
  434. return map;
  435. struct shim_fd_handle** new_map = calloc(size, sizeof(new_map[0]));
  436. if (!new_map)
  437. return NULL;
  438. memcpy(new_map, map->map, map->fd_size * sizeof(new_map[0]));
  439. free(map->map);
  440. map->map = new_map;
  441. map->fd_size = size;
  442. return map;
  443. }
  444. int dup_handle_map(struct shim_handle_map** new, struct shim_handle_map* old_map) {
  445. lock(&old_map->lock);
  446. /* allocate a new handle mapping with the same size as
  447. the old one */
  448. struct shim_handle_map* new_map = get_new_handle_map(old_map->fd_size);
  449. if (!new_map)
  450. return -ENOMEM;
  451. new_map->fd_top = old_map->fd_top;
  452. if (old_map->fd_top == FD_NULL)
  453. goto done;
  454. for (int i = 0; i <= old_map->fd_top; i++) {
  455. struct shim_fd_handle* fd_old = old_map->map[i];
  456. struct shim_fd_handle* fd_new;
  457. /* now we go through the handle map and reassign each
  458. of them being allocated */
  459. if (HANDLE_ALLOCATED(fd_old)) {
  460. /* first, get the handle to prevent it from being deleted */
  461. struct shim_handle* hdl = fd_old->handle;
  462. get_handle(hdl);
  463. fd_new = malloc(sizeof(struct shim_fd_handle));
  464. if (!fd_new) {
  465. for (int j = 0; j < i; j++) {
  466. put_handle(new_map->map[j]->handle);
  467. free(new_map->map[j]);
  468. }
  469. unlock(&old_map->lock);
  470. *new = NULL;
  471. free(new_map);
  472. return -ENOMEM;
  473. }
  474. /* DP: I assume we really need a deep copy of the handle map? */
  475. new_map->map[i] = fd_new;
  476. fd_new->vfd = fd_old->vfd;
  477. fd_new->handle = hdl;
  478. fd_new->flags = fd_old->flags;
  479. }
  480. }
  481. done:
  482. unlock(&old_map->lock);
  483. *new = new_map;
  484. return 0;
  485. }
  486. void get_handle_map(struct shim_handle_map* map) {
  487. REF_INC(map->ref_count);
  488. }
  489. void put_handle_map(struct shim_handle_map* map) {
  490. int ref_count = REF_DEC(map->ref_count);
  491. if (!ref_count) {
  492. if (map->fd_top == FD_NULL)
  493. goto done;
  494. for (int i = 0; i <= map->fd_top; i++) {
  495. if (!map->map[i])
  496. continue;
  497. if (map->map[i]->vfd != FD_NULL) {
  498. struct shim_handle* handle = map->map[i]->handle;
  499. if (handle)
  500. put_handle(handle);
  501. }
  502. free(map->map[i]);
  503. }
  504. done:
  505. destroy_lock(&map->lock);
  506. free(map->map);
  507. free(map);
  508. }
  509. }
  510. int flush_handle_map(struct shim_handle_map* map) {
  511. get_handle_map(map);
  512. lock(&map->lock);
  513. if (map->fd_top == FD_NULL)
  514. goto done;
  515. /* now we go through the handle map and flush each handle */
  516. for (int i = 0; i <= map->fd_top; i++) {
  517. if (!HANDLE_ALLOCATED(map->map[i]))
  518. continue;
  519. struct shim_handle* handle = map->map[i]->handle;
  520. if (handle)
  521. flush_handle(handle);
  522. }
  523. done:
  524. unlock(&map->lock);
  525. put_handle_map(map);
  526. return 0;
  527. }
  528. int walk_handle_map(int (*callback)(struct shim_fd_handle*, struct shim_handle_map*),
  529. struct shim_handle_map* map) {
  530. int ret = 0;
  531. lock(&map->lock);
  532. if (map->fd_top == FD_NULL)
  533. goto done;
  534. for (int i = 0; i <= map->fd_top; i++) {
  535. if (!HANDLE_ALLOCATED(map->map[i]))
  536. continue;
  537. if ((ret = (*callback)(map->map[i], map)) < 0)
  538. break;
  539. }
  540. done:
  541. unlock(&map->lock);
  542. return ret;
  543. }
  544. BEGIN_CP_FUNC(handle) {
  545. __UNUSED(size);
  546. assert(size == sizeof(struct shim_handle));
  547. struct shim_handle* hdl = (struct shim_handle*)obj;
  548. struct shim_handle* new_hdl = NULL;
  549. ptr_t off = GET_FROM_CP_MAP(obj);
  550. if (!off) {
  551. off = ADD_CP_OFFSET(sizeof(struct shim_handle));
  552. ADD_TO_CP_MAP(obj, off);
  553. new_hdl = (struct shim_handle*)(base + off);
  554. lock(&hdl->lock);
  555. struct shim_mount* fs = hdl->fs;
  556. *new_hdl = *hdl;
  557. if (fs && fs->fs_ops && fs->fs_ops->checkout)
  558. fs->fs_ops->checkout(new_hdl);
  559. new_hdl->dentry = NULL;
  560. REF_SET(new_hdl->ref_count, 0);
  561. clear_lock(&new_hdl->lock);
  562. DO_CP_IN_MEMBER(qstr, new_hdl, path);
  563. DO_CP_IN_MEMBER(qstr, new_hdl, uri);
  564. if (fs && hdl->dentry) {
  565. DO_CP_MEMBER(mount, hdl, new_hdl, fs);
  566. } else {
  567. new_hdl->fs = NULL;
  568. }
  569. if (hdl->dentry)
  570. DO_CP_MEMBER(dentry, hdl, new_hdl, dentry);
  571. if (new_hdl->pal_handle) {
  572. struct shim_palhdl_entry* entry;
  573. DO_CP(palhdl, hdl->pal_handle, &entry);
  574. entry->uri = &new_hdl->uri;
  575. entry->phandle = &new_hdl->pal_handle;
  576. }
  577. if (hdl->type == TYPE_EPOLL)
  578. DO_CP(epoll_item, &hdl->info.epoll.fds, &new_hdl->info.epoll.fds);
  579. INIT_LISTP(&new_hdl->epolls);
  580. unlock(&hdl->lock);
  581. ADD_CP_FUNC_ENTRY(off);
  582. } else {
  583. new_hdl = (struct shim_handle*)(base + off);
  584. }
  585. if (objp)
  586. *objp = (void*)new_hdl;
  587. }
  588. END_CP_FUNC(handle)
  589. BEGIN_RS_FUNC(handle) {
  590. struct shim_handle* hdl = (void*)(base + GET_CP_FUNC_ENTRY());
  591. __UNUSED(offset);
  592. CP_REBASE(hdl->fs);
  593. CP_REBASE(hdl->dentry);
  594. CP_REBASE(hdl->epolls);
  595. create_lock(&hdl->lock);
  596. if (!hdl->fs) {
  597. assert(hdl->fs_type);
  598. search_builtin_fs(hdl->fs_type, &hdl->fs);
  599. if (!hdl->fs)
  600. return -EINVAL;
  601. }
  602. if (hdl->fs && hdl->fs->fs_ops && hdl->fs->fs_ops->checkin)
  603. hdl->fs->fs_ops->checkin(hdl);
  604. DEBUG_RS("path=%s,type=%s,uri=%s,flags=%03o", qstrgetstr(&hdl->path), hdl->fs_type,
  605. qstrgetstr(&hdl->uri), hdl->flags);
  606. }
  607. END_RS_FUNC(handle)
  608. BEGIN_CP_FUNC(fd_handle) {
  609. __UNUSED(size);
  610. assert(size == sizeof(struct shim_fd_handle));
  611. struct shim_fd_handle* fdhdl = (struct shim_fd_handle*)obj;
  612. struct shim_fd_handle* new_fdhdl = NULL;
  613. ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_fd_handle));
  614. new_fdhdl = (struct shim_fd_handle*)(base + off);
  615. memcpy(new_fdhdl, fdhdl, sizeof(struct shim_fd_handle));
  616. DO_CP(handle, fdhdl->handle, &new_fdhdl->handle);
  617. ADD_CP_FUNC_ENTRY(off);
  618. if (objp)
  619. *objp = (void*)new_fdhdl;
  620. }
  621. END_CP_FUNC_NO_RS(fd_handle)
  622. BEGIN_CP_FUNC(handle_map) {
  623. __UNUSED(size);
  624. assert(size >= sizeof(struct shim_handle_map));
  625. struct shim_handle_map* handle_map = (struct shim_handle_map*)obj;
  626. struct shim_handle_map* new_handle_map = NULL;
  627. struct shim_fd_handle** ptr_array;
  628. lock(&handle_map->lock);
  629. int fd_size = handle_map->fd_top != FD_NULL ? handle_map->fd_top + 1 : 0;
  630. size = sizeof(struct shim_handle_map) + (sizeof(struct shim_fd_handle*) * fd_size);
  631. ptr_t off = GET_FROM_CP_MAP(obj);
  632. if (!off) {
  633. off = ADD_CP_OFFSET(size);
  634. new_handle_map = (struct shim_handle_map*)(base + off);
  635. memcpy(new_handle_map, handle_map, sizeof(struct shim_handle_map));
  636. ptr_array = (void*)new_handle_map + sizeof(struct shim_handle_map);
  637. new_handle_map->fd_size = fd_size;
  638. new_handle_map->map = fd_size ? ptr_array : NULL;
  639. REF_SET(new_handle_map->ref_count, 0);
  640. clear_lock(&new_handle_map->lock);
  641. for (int i = 0; i < fd_size; i++) {
  642. if (HANDLE_ALLOCATED(handle_map->map[i]))
  643. DO_CP(fd_handle, handle_map->map[i], &ptr_array[i]);
  644. else
  645. ptr_array[i] = NULL;
  646. }
  647. ADD_CP_FUNC_ENTRY(off);
  648. } else {
  649. new_handle_map = (struct shim_handle_map*)(base + off);
  650. }
  651. unlock(&handle_map->lock);
  652. if (objp)
  653. *objp = (void*)new_handle_map;
  654. }
  655. END_CP_FUNC(handle_map)
  656. BEGIN_RS_FUNC(handle_map) {
  657. struct shim_handle_map* handle_map = (void*)(base + GET_CP_FUNC_ENTRY());
  658. __UNUSED(offset);
  659. CP_REBASE(handle_map->map);
  660. assert(handle_map->map);
  661. DEBUG_RS("size=%d,top=%d", handle_map->fd_size, handle_map->fd_top);
  662. create_lock(&handle_map->lock);
  663. lock(&handle_map->lock);
  664. if (handle_map->fd_top != FD_NULL)
  665. for (int i = 0; i <= handle_map->fd_top; i++) {
  666. CP_REBASE(handle_map->map[i]);
  667. if (HANDLE_ALLOCATED(handle_map->map[i])) {
  668. CP_REBASE(handle_map->map[i]->handle);
  669. struct shim_handle* hdl = handle_map->map[i]->handle;
  670. assert(hdl);
  671. get_handle(hdl);
  672. DEBUG_RS("[%d]%s", i, qstrempty(&hdl->uri) ? hdl->fs_type : qstrgetstr(&hdl->uri));
  673. }
  674. }
  675. unlock(&handle_map->lock);
  676. }
  677. END_RS_FUNC(handle_map)