shim_ipc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_ipc.c
  15. *
  16. * This file contains code to maintain generic bookkeeping of IPC: operations
  17. * on shim_ipc_msg (one-way IPC messages), shim_ipc_msg_duplex (IPC messages
  18. * with acknowledgement), shim_ipc_info (IPC ports of process), shim_process.
  19. */
  20. #include <list.h>
  21. #include <pal.h>
  22. #include <pal_error.h>
  23. #include <shim_checkpoint.h>
  24. #include <shim_handle.h>
  25. #include <shim_internal.h>
  26. #include <shim_ipc.h>
  27. #include <shim_profile.h>
  28. #include <shim_thread.h>
  29. #include <shim_unistd.h>
  30. #include <shim_utils.h>
  31. static struct shim_lock ipc_info_mgr_lock;
  32. #define SYSTEM_LOCK() lock(&ipc_info_mgr_lock)
  33. #define SYSTEM_UNLOCK() unlock(&ipc_info_mgr_lock)
  34. #define SYSTEM_LOCKED() locked(&ipc_info_mgr_lock)
  35. #define IPC_INFO_MGR_ALLOC 32
  36. #define OBJ_TYPE struct shim_ipc_info
  37. #include "memmgr.h"
  38. static MEM_MGR ipc_info_mgr;
  39. struct shim_lock ipc_info_lock;
  40. struct shim_process cur_process;
  41. #define CLIENT_HASH_BITLEN 6
  42. #define CLIENT_HASH_NUM (1 << CLIENT_HASH_BITLEN)
  43. #define CLIENT_HASH_MASK (CLIENT_HASH_NUM - 1)
  44. #define CLIENT_HASH(vmid) ((vmid)&CLIENT_HASH_MASK)
  45. DEFINE_LISTP(shim_ipc_info);
  46. static LISTP_TYPE(shim_ipc_info) info_hlist[CLIENT_HASH_NUM];
  47. DEFINE_PROFILE_CATEGORY(ipc, );
  48. DEFINE_PROFILE_OCCURENCE(syscall_use_ipc, ipc);
  49. int init_ipc_ports(void);
  50. int init_ns_pid(void);
  51. int init_ns_sysv(void);
  52. int init_ipc(void) {
  53. int ret = 0;
  54. if (!create_lock(&ipc_info_lock)
  55. || !create_lock(&cur_process.lock)
  56. || !create_lock(&ipc_info_mgr_lock)) {
  57. return -ENOMEM;
  58. }
  59. if (!(ipc_info_mgr = create_mem_mgr(init_align_up(IPC_INFO_MGR_ALLOC))))
  60. return -ENOMEM;
  61. if ((ret = init_ipc_ports()) < 0)
  62. return ret;
  63. if ((ret = init_ns_pid()) < 0)
  64. return ret;
  65. if ((ret = init_ns_sysv()) < 0)
  66. return ret;
  67. return 0;
  68. }
  69. int prepare_ns_leaders(void) {
  70. int ret = 0;
  71. if ((ret = prepare_pid_leader()) < 0)
  72. return ret;
  73. if ((ret = prepare_sysv_leader()) < 0)
  74. return ret;
  75. return 0;
  76. }
  77. static struct shim_ipc_info* __create_ipc_info(IDTYPE vmid, const char* uri, size_t len) {
  78. assert(locked(&ipc_info_lock));
  79. struct shim_ipc_info* info =
  80. get_mem_obj_from_mgr_enlarge(ipc_info_mgr, size_align_up(IPC_INFO_MGR_ALLOC));
  81. if (!info)
  82. return NULL;
  83. memset(info, 0, sizeof(struct shim_ipc_info));
  84. info->vmid = vmid;
  85. if (uri)
  86. qstrsetstr(&info->uri, uri, len);
  87. REF_SET(info->ref_count, 1);
  88. INIT_LIST_HEAD(info, hlist);
  89. return info;
  90. }
  91. static void __free_ipc_info(struct shim_ipc_info* info) {
  92. assert(locked(&ipc_info_lock));
  93. if (info->pal_handle) {
  94. DkObjectClose(info->pal_handle);
  95. info->pal_handle = NULL;
  96. }
  97. if (info->port)
  98. put_ipc_port(info->port);
  99. qstrfree(&info->uri);
  100. free_mem_obj_to_mgr(ipc_info_mgr, info);
  101. }
  102. static void __get_ipc_info(struct shim_ipc_info* info) {
  103. REF_INC(info->ref_count);
  104. }
  105. static void __put_ipc_info(struct shim_ipc_info* info) {
  106. assert(locked(&ipc_info_lock));
  107. int ref_count = REF_DEC(info->ref_count);
  108. if (!ref_count)
  109. __free_ipc_info(info);
  110. }
  111. void get_ipc_info(struct shim_ipc_info* info) {
  112. /* no need to grab ipc_info_lock because __get_ipc_info() does not touch global state */
  113. __get_ipc_info(info);
  114. }
  115. void put_ipc_info(struct shim_ipc_info* info) {
  116. /* this is atomic so we don't grab lock in common case of ref_count > 0 */
  117. int ref_count = REF_DEC(info->ref_count);
  118. if (!ref_count) {
  119. lock(&ipc_info_lock);
  120. __free_ipc_info(info);
  121. unlock(&ipc_info_lock);
  122. }
  123. }
  124. struct shim_ipc_info* create_ipc_info(IDTYPE vmid, const char* uri, size_t len) {
  125. lock(&ipc_info_lock);
  126. struct shim_ipc_info* info = __create_ipc_info(vmid, uri, len);
  127. unlock(&ipc_info_lock);
  128. return info;
  129. }
  130. struct shim_ipc_info* create_ipc_info_in_list(IDTYPE vmid, const char* uri, size_t len) {
  131. assert(vmid);
  132. struct shim_ipc_info* info;
  133. lock(&ipc_info_lock);
  134. /* check if info with this vmid & uri already exists and return it */
  135. LISTP_TYPE(shim_ipc_info)* info_bucket = &info_hlist[CLIENT_HASH(vmid)];
  136. LISTP_FOR_EACH_ENTRY(info, info_bucket, hlist) {
  137. if (info->vmid == vmid && !qstrcmpstr(&info->uri, uri, len)) {
  138. get_ipc_info(info);
  139. unlock(&ipc_info_lock);
  140. return info;
  141. }
  142. }
  143. /* otherwise create new info and return it */
  144. info = __create_ipc_info(vmid, uri, len);
  145. if (info) {
  146. LISTP_ADD(info, info_bucket, hlist);
  147. get_ipc_info(info);
  148. }
  149. unlock(&ipc_info_lock);
  150. return info;
  151. }
  152. void put_ipc_info_in_list(struct shim_ipc_info* info) {
  153. LISTP_TYPE(shim_ipc_info)* info_bucket = &info_hlist[CLIENT_HASH(info->vmid)];
  154. lock(&ipc_info_lock);
  155. __put_ipc_info(info);
  156. if (REF_GET(info->ref_count) == 1) {
  157. LISTP_DEL_INIT(info, info_bucket, hlist);
  158. __put_ipc_info(info);
  159. }
  160. unlock(&ipc_info_lock);
  161. }
  162. struct shim_ipc_info* lookup_ipc_info(IDTYPE vmid) {
  163. assert(vmid);
  164. lock(&ipc_info_lock);
  165. struct shim_ipc_info* info;
  166. LISTP_TYPE(shim_ipc_info)* info_bucket = &info_hlist[CLIENT_HASH(vmid)];
  167. LISTP_FOR_EACH_ENTRY(info, info_bucket, hlist) {
  168. if (info->vmid == vmid && !qstrempty(&info->uri)) {
  169. __get_ipc_info(info);
  170. unlock(&ipc_info_lock);
  171. return info;
  172. }
  173. }
  174. unlock(&ipc_info_lock);
  175. return NULL;
  176. }
  177. struct shim_process* create_process(bool dup_cur_process) {
  178. struct shim_process* new_process = calloc(1, sizeof(struct shim_process));
  179. if (!new_process)
  180. return NULL;
  181. lock(&cur_process.lock);
  182. /* current process must have been initialized with info on its own IPC info */
  183. assert(cur_process.self);
  184. assert(cur_process.self->pal_handle && !qstrempty(&cur_process.self->uri));
  185. if (dup_cur_process) {
  186. /* execve case, new process assumes identity of current process and thus has
  187. * - same vmid as current process
  188. * - same self IPC info as current process
  189. * - same parent IPC info as current process
  190. */
  191. new_process->vmid = cur_process.vmid;
  192. new_process->self = create_ipc_info(
  193. cur_process.self->vmid, qstrgetstr(&cur_process.self->uri), cur_process.self->uri.len);
  194. new_process->self->pal_handle = cur_process.self->pal_handle;
  195. if (!new_process->self) {
  196. unlock(&cur_process.lock);
  197. return NULL;
  198. }
  199. /* there is a corner case of execve in very first process; such process does
  200. * not have parent process, so cannot copy parent IPC info */
  201. if (cur_process.parent) {
  202. new_process->parent =
  203. create_ipc_info(cur_process.parent->vmid, qstrgetstr(&cur_process.parent->uri),
  204. cur_process.parent->uri.len);
  205. new_process->parent->pal_handle = cur_process.parent->pal_handle;
  206. }
  207. } else {
  208. /* fork/clone case, new process has new identity but inherits parent */
  209. new_process->vmid = 0;
  210. new_process->self = NULL;
  211. new_process->parent = create_ipc_info(
  212. cur_process.self->vmid, qstrgetstr(&cur_process.self->uri), cur_process.self->uri.len);
  213. }
  214. if (cur_process.parent && !new_process->parent) {
  215. if (new_process->self)
  216. put_ipc_info(new_process->self);
  217. unlock(&cur_process.lock);
  218. return NULL;
  219. }
  220. /* new process inherits the same namespace leaders */
  221. for (int i = 0; i < TOTAL_NS; i++) {
  222. if (cur_process.ns[i]) {
  223. new_process->ns[i] =
  224. create_ipc_info(cur_process.ns[i]->vmid, qstrgetstr(&cur_process.ns[i]->uri),
  225. cur_process.ns[i]->uri.len);
  226. if (!new_process->ns[i]) {
  227. if (new_process->self)
  228. put_ipc_info(new_process->self);
  229. if (new_process->parent)
  230. put_ipc_info(new_process->parent);
  231. for (int j = 0; j < i; j++) {
  232. put_ipc_info(new_process->ns[j]);
  233. }
  234. unlock(&cur_process.lock);
  235. return NULL;
  236. }
  237. }
  238. }
  239. unlock(&cur_process.lock);
  240. return new_process;
  241. }
  242. void free_process(struct shim_process* process) {
  243. if (process->self)
  244. put_ipc_info(process->self);
  245. if (process->parent)
  246. put_ipc_info(process->parent);
  247. for (int i = 0; i < TOTAL_NS; i++)
  248. if (process->ns[i])
  249. put_ipc_info(process->ns[i]);
  250. free(process);
  251. }
  252. void init_ipc_msg(struct shim_ipc_msg* msg, int code, size_t size, IDTYPE dest) {
  253. msg->code = code;
  254. msg->size = get_ipc_msg_size(size);
  255. msg->src = cur_process.vmid;
  256. msg->dst = dest;
  257. msg->seq = 0;
  258. }
  259. void init_ipc_msg_duplex(struct shim_ipc_msg_duplex* msg, int code, size_t size, IDTYPE dest) {
  260. init_ipc_msg(&msg->msg, code, size, dest);
  261. msg->thread = NULL;
  262. INIT_LIST_HEAD(msg, list);
  263. msg->retval = 0;
  264. msg->private = NULL;
  265. }
  266. int send_ipc_message(struct shim_ipc_msg* msg, struct shim_ipc_port* port) {
  267. assert(msg->size >= IPC_MSG_MINIMAL_SIZE);
  268. msg->src = cur_process.vmid;
  269. debug("Sending ipc message to port %p (handle %p)\n", port, port->pal_handle);
  270. size_t total_bytes = msg->size;
  271. size_t bytes = 0;
  272. do {
  273. PAL_NUM ret =
  274. DkStreamWrite(port->pal_handle, 0, total_bytes - bytes, (void*)msg + bytes, NULL);
  275. if (ret == PAL_STREAM_ERROR) {
  276. if (PAL_ERRNO == EINTR || PAL_ERRNO == EAGAIN || PAL_ERRNO == EWOULDBLOCK)
  277. continue;
  278. debug("Port %p (handle %p) was removed during sending\n", port, port->pal_handle);
  279. del_ipc_port_fini(port, -ECHILD);
  280. return -PAL_ERRNO;
  281. }
  282. bytes += ret;
  283. } while (bytes < total_bytes);
  284. return 0;
  285. }
  286. struct shim_ipc_msg_duplex* pop_ipc_msg_duplex(struct shim_ipc_port* port, unsigned long seq) {
  287. struct shim_ipc_msg_duplex* found = NULL;
  288. lock(&port->msgs_lock);
  289. struct shim_ipc_msg_duplex* tmp;
  290. LISTP_FOR_EACH_ENTRY(tmp, &port->msgs, list) {
  291. if (tmp->msg.seq == seq) {
  292. found = tmp;
  293. LISTP_DEL_INIT(tmp, &port->msgs, list);
  294. break;
  295. }
  296. }
  297. unlock(&port->msgs_lock);
  298. return found;
  299. }
  300. int send_ipc_message_duplex(struct shim_ipc_msg_duplex* msg, struct shim_ipc_port* port,
  301. unsigned long* seq, void* private_data) {
  302. int ret = 0;
  303. struct shim_thread* thread = get_cur_thread();
  304. assert(thread);
  305. /* prepare thread which will send the message for waiting for response
  306. * (this also acquires reference to the thread) */
  307. if (!msg->thread)
  308. thread_setwait(&msg->thread, thread);
  309. static struct atomic_int ipc_seq_counter;
  310. msg->msg.seq = atomic_inc_return(&ipc_seq_counter);
  311. /* save the message to list of port msgs together with its private data */
  312. lock(&port->msgs_lock);
  313. msg->private = private_data;
  314. LISTP_ADD_TAIL(msg, &port->msgs, list);
  315. unlock(&port->msgs_lock);
  316. ret = send_ipc_message(&msg->msg, port);
  317. if (ret < 0)
  318. goto out;
  319. if (seq)
  320. *seq = msg->msg.seq;
  321. debug("Waiting for response (seq = %lu)\n", msg->msg.seq);
  322. /* force thread which will send the message to wait for response;
  323. * ignore unrelated interrupts but fail on actual errors */
  324. do {
  325. ret = thread_sleep(NO_TIMEOUT);
  326. if (ret < 0 && ret != -EINTR && ret != -EAGAIN)
  327. goto out;
  328. } while (ret != 0);
  329. debug("Finished waiting for response (seq = %lu, ret = %d)\n", msg->msg.seq, msg->retval);
  330. ret = msg->retval;
  331. out:
  332. lock(&port->msgs_lock);
  333. if (!LIST_EMPTY(msg, list))
  334. LISTP_DEL_INIT(msg, &port->msgs, list);
  335. unlock(&port->msgs_lock);
  336. if (msg->thread) {
  337. /* put reference to the thread acquired earlier */
  338. put_thread(msg->thread);
  339. msg->thread = NULL;
  340. }
  341. return ret;
  342. }
  343. /* must be called with cur_process.lock taken */
  344. struct shim_ipc_info* create_ipc_info_cur_process(bool is_self_ipc_info) {
  345. struct shim_ipc_info* info = create_ipc_info(cur_process.vmid, NULL, 0);
  346. if (!info)
  347. return NULL;
  348. /* pipe for cur_process.self is of format "pipe:<cur_process.vmid>", others with random name */
  349. char uri[PIPE_URI_SIZE];
  350. if (create_pipe(NULL, uri, PIPE_URI_SIZE, &info->pal_handle, &info->uri, is_self_ipc_info) <
  351. 0) {
  352. put_ipc_info(info);
  353. return NULL;
  354. }
  355. add_ipc_port_by_id(cur_process.vmid, info->pal_handle, IPC_PORT_SERVER, NULL, &info->port);
  356. return info;
  357. }
  358. int get_ipc_info_cur_process(struct shim_ipc_info** info) {
  359. lock(&cur_process.lock);
  360. if (!cur_process.self) {
  361. cur_process.self = create_ipc_info_cur_process(true);
  362. if (!cur_process.self) {
  363. unlock(&cur_process.lock);
  364. return -EACCES;
  365. }
  366. }
  367. get_ipc_info(cur_process.self);
  368. *info = cur_process.self;
  369. unlock(&cur_process.lock);
  370. return 0;
  371. }
  372. DEFINE_PROFILE_INTERVAL(ipc_checkpoint_send, ipc);
  373. DEFINE_PROFILE_INTERVAL(ipc_checkpoint_callback, ipc);
  374. /* Graphene's checkpoint() syscall broadcasts a msg to all processes
  375. * asking to checkpoint their state and save in process-unique file in
  376. * directory cpdir under session cpsession. */
  377. int ipc_checkpoint_send(const char* cpdir, IDTYPE cpsession) {
  378. BEGIN_PROFILE_INTERVAL();
  379. int ret;
  380. size_t len = strlen(cpdir);
  381. size_t total_msg_size = get_ipc_msg_size(sizeof(struct shim_ipc_checkpoint) + len + 1);
  382. struct shim_ipc_msg* msg = __alloca(total_msg_size);
  383. init_ipc_msg(msg, IPC_CHECKPOINT, total_msg_size, 0);
  384. struct shim_ipc_checkpoint* msgin = (struct shim_ipc_checkpoint*)&msg->msg;
  385. msgin->cpsession = cpsession;
  386. memcpy(&msgin->cpdir, cpdir, len + 1);
  387. debug("IPC broadcast to all: IPC_CHECKPOINT(%u, %s)\n", cpsession, cpdir);
  388. /* broadcast to all including myself (so I can also checkpoint) */
  389. ret = broadcast_ipc(msg, IPC_PORT_DIRCLD | IPC_PORT_DIRPRT,
  390. /*exclude_port=*/NULL);
  391. SAVE_PROFILE_INTERVAL(ipc_checkpoint_send);
  392. return ret;
  393. }
  394. /* This process is asked to create a checkpoint, so it:
  395. * - sends a Graphene-specific SIGCP signal to all its threads (for
  396. * all to stop and join the checkpoint for consistent state),
  397. * - broadcasts checkpoint msg further to other processes. */
  398. int ipc_checkpoint_callback(struct shim_ipc_msg* msg, struct shim_ipc_port* port) {
  399. BEGIN_PROFILE_INTERVAL();
  400. int ret = 0;
  401. struct shim_ipc_checkpoint* msgin = (struct shim_ipc_checkpoint*)msg->msg;
  402. debug("IPC callback from %u: IPC_CHECKPOINT(%u, %s)\n", msg->src, msgin->cpsession,
  403. msgin->cpdir);
  404. ret = create_checkpoint(msgin->cpdir, &msgin->cpsession);
  405. if (ret < 0)
  406. goto out;
  407. kill_all_threads(NULL, msgin->cpsession, SIGCP);
  408. broadcast_ipc(msg, IPC_PORT_DIRCLD | IPC_PORT_DIRPRT, port);
  409. out:
  410. SAVE_PROFILE_INTERVAL(ipc_checkpoint_callback);
  411. return ret;
  412. }
  413. BEGIN_CP_FUNC(ipc_info) {
  414. __UNUSED(size);
  415. assert(size == sizeof(struct shim_ipc_info));
  416. struct shim_ipc_info* info = (struct shim_ipc_info*)obj;
  417. struct shim_ipc_info* new_info = NULL;
  418. ptr_t off = GET_FROM_CP_MAP(obj);
  419. if (!off) {
  420. off = ADD_CP_OFFSET(sizeof(struct shim_ipc_info));
  421. ADD_TO_CP_MAP(obj, off);
  422. new_info = (struct shim_ipc_info*)(base + off);
  423. memcpy(new_info, info, sizeof(struct shim_ipc_info));
  424. REF_SET(new_info->ref_count, 0);
  425. /* call qstr-specific checkpointing function for new_info->uri */
  426. DO_CP_IN_MEMBER(qstr, new_info, uri);
  427. if (info->pal_handle) {
  428. struct shim_palhdl_entry* entry;
  429. /* call palhdl-specific checkpointing function to checkpoint
  430. * info->pal_handle and return created object in entry */
  431. DO_CP(palhdl, info->pal_handle, &entry);
  432. /* info's PAL handle will be re-opened with new URI during
  433. * palhdl restore (see checkpoint.c) */
  434. entry->uri = &new_info->uri;
  435. entry->phandle = &new_info->pal_handle;
  436. }
  437. } else {
  438. /* already checkpointed */
  439. new_info = (struct shim_ipc_info*)(base + off);
  440. }
  441. if (new_info && objp)
  442. *objp = (void*)new_info;
  443. }
  444. END_CP_FUNC_NO_RS(ipc_info)
  445. BEGIN_CP_FUNC(process) {
  446. __UNUSED(size);
  447. assert(size == sizeof(struct shim_process));
  448. struct shim_process* process = (struct shim_process*)obj;
  449. struct shim_process* new_process = NULL;
  450. ptr_t off = GET_FROM_CP_MAP(obj);
  451. if (!off) {
  452. off = ADD_CP_OFFSET(sizeof(struct shim_process));
  453. ADD_TO_CP_MAP(obj, off);
  454. new_process = (struct shim_process*)(base + off);
  455. memcpy(new_process, process, sizeof(struct shim_process));
  456. /* call ipc_info-specific checkpointing functions
  457. * for new_process's self, parent, and ns infos */
  458. if (process->self)
  459. DO_CP_MEMBER(ipc_info, process, new_process, self);
  460. if (process->parent)
  461. DO_CP_MEMBER(ipc_info, process, new_process, parent);
  462. for (int i = 0; i < TOTAL_NS; i++)
  463. if (process->ns[i])
  464. DO_CP_MEMBER(ipc_info, process, new_process, ns[i]);
  465. ADD_CP_FUNC_ENTRY(off);
  466. } else {
  467. /* already checkpointed */
  468. new_process = (struct shim_process*)(base + off);
  469. }
  470. if (objp)
  471. *objp = (void*)new_process;
  472. }
  473. END_CP_FUNC(process)
  474. BEGIN_RS_FUNC(process) {
  475. __UNUSED(offset);
  476. struct shim_process* process = (void*)(base + GET_CP_FUNC_ENTRY());
  477. /* process vmid = 0: fork/clone case, forces to pick up new host-OS vmid
  478. * process vmid != 0: execve case, forces to re-use vmid of parent */
  479. if (!process->vmid)
  480. process->vmid = cur_process.vmid;
  481. CP_REBASE(process->self);
  482. CP_REBASE(process->parent);
  483. CP_REBASE(process->ns);
  484. if (process->self) {
  485. process->self->vmid = process->vmid;
  486. get_ipc_info(process->self);
  487. }
  488. if (process->parent)
  489. get_ipc_info(process->parent);
  490. for (int i = 0; i < TOTAL_NS; i++)
  491. if (process->ns[i])
  492. get_ipc_info(process->ns[i]);
  493. memcpy(&cur_process, process, sizeof(struct shim_process));
  494. // this lock will be created in init_ipc
  495. clear_lock(&cur_process.lock);
  496. DEBUG_RS("vmid=%u,uri=%s,parent=%u(%s)", process->vmid,
  497. process->self ? qstrgetstr(&process->self->uri) : "",
  498. process->parent ? process->parent->vmid : 0,
  499. process->parent ? qstrgetstr(&process->parent->uri) : "");
  500. }
  501. END_RS_FUNC(process)