shim_ipc_nsimpl.h 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_ipc_nsimpl.h
  17. *
  18. * This file contains a template for generic functions and callbacks to
  19. * implement a namespace.
  20. */
  21. #include <shim_internal.h>
  22. #include <shim_ipc.h>
  23. #include <shim_utils.h>
  24. #include <shim_profile.h>
  25. #include <errno.h>
  26. #ifndef INCLUDE_IPC_NSIMPL
  27. # warning "Be sure before including \"shim_ipc_nsimpl.h\"."
  28. #endif
  29. #ifdef __SHIM_IPC_NSIMPL__
  30. # error "Include \"shim_ipc_nsimpl.h\" only once."
  31. #endif
  32. #define __SHIM_IPC_NSIMPL__
  33. #if !defined(NS) || !defined(NS_CAP)
  34. # error "NS or NS_CAP is not defined"
  35. #endif
  36. #define NS_STR XSTRINGIFY(NS)
  37. #define NS_CAP_STR XSTRINGIFY(NS_CAP)
  38. #define RANGE_SIZE CONCAT2(NS_CAP, RANGE_SIZE)
  39. #define BITS (sizeof(unsigned char) * 8)
  40. struct idx_bitmap {
  41. unsigned char map[RANGE_SIZE / BITS];
  42. };
  43. struct subrange {
  44. struct shim_ipc_info * owner;
  45. LEASETYPE lease;
  46. };
  47. struct sub_map {
  48. struct subrange * map[RANGE_SIZE];
  49. };
  50. DEFINE_LIST(range);
  51. struct range {
  52. LIST_TYPE(range) hlist;
  53. LIST_TYPE(range) list;
  54. int offset;
  55. struct shim_ipc_info * owner;
  56. LEASETYPE lease;
  57. struct idx_bitmap * used;
  58. struct sub_map * subranges;
  59. };
  60. struct range_bitmap {
  61. int map_size;
  62. unsigned char map[];
  63. };
  64. /* Helper functions __*_range_*() must be called with range_map_lock held */
  65. static struct range_bitmap * range_map;
  66. static struct shim_lock range_map_lock;
  67. #define RANGE_HASH_LEN 6
  68. #define RANGE_HASH_NUM (1 << RANGE_HASH_LEN)
  69. #define RANGE_HASH_MASK (RANGE_HASH_NUM - 1)
  70. #define RANGE_HASH(off) (((off - 1) / RANGE_SIZE) & RANGE_HASH_MASK)
  71. /* This hash table organizes range structs by hlist */
  72. DEFINE_LISTP(range);
  73. static LISTP_TYPE(range) range_table [RANGE_HASH_NUM];
  74. /* These lists organizes range structs by list
  75. */
  76. static LISTP_TYPE(range) owned_ranges;
  77. static LISTP_TYPE(range) offered_ranges;
  78. static int nowned = 0;
  79. static int noffered = 0;
  80. static int nsubed = 0;
  81. DEFINE_LIST(ns_query);
  82. struct ns_query {
  83. IDTYPE dest;
  84. unsigned long seq;
  85. struct shim_ipc_port * port;
  86. LIST_TYPE(ns_query) list;
  87. };
  88. DEFINE_LISTP(ns_query);
  89. static LISTP_TYPE(ns_query) ns_queries;
  90. static inline LEASETYPE get_lease (void)
  91. {
  92. return DkSystemTimeQuery() + CONCAT2(NS_CAP, LEASE_TIME);
  93. }
  94. void CONCAT3(debug_print, NS, ranges) (void)
  95. {
  96. lock(&range_map_lock);
  97. SYS_PRINTF(NS_STR " ranges in process %010u:\n", cur_process.vmid);
  98. if (!range_map) {
  99. unlock(&range_map_lock);
  100. return;
  101. }
  102. for (int i = 0 ; i < range_map->map_size ; i++) {
  103. unsigned char map = range_map->map[i];
  104. if (!map)
  105. continue;
  106. for (int j = 0 ; j < BITS ; map >>= 1, j++) {
  107. if (!(map & 1))
  108. continue;
  109. int off = i * BITS + j;
  110. LISTP_TYPE(range) * head = range_table + RANGE_HASH(off);
  111. struct range * tmp, * r = NULL;
  112. listp_for_each_entry(tmp, head, hlist)
  113. if (tmp->offset == off) {
  114. r = tmp;
  115. break;
  116. }
  117. assert(r);
  118. IDTYPE base = RANGE_SIZE * off + 1;
  119. struct shim_ipc_info * p = r->owner;
  120. SYS_PRINTF("%04u - %04u: owner %010u, port \"%s\" lease %lu\n",
  121. base, base + RANGE_SIZE - 1,
  122. p->vmid, qstrgetstr(&p->uri), r->lease);
  123. if (!r->subranges)
  124. continue;
  125. for (int k = 0 ; k < RANGE_SIZE ; k++) {
  126. struct subrange * s = r->subranges->map[j];
  127. if (!s)
  128. continue;
  129. p = s->owner;
  130. SYS_PRINTF(" %04u: owner %010u, port \"%s\" lease %lu\n",
  131. base + k, p->vmid,
  132. qstrgetstr(&p->uri), s->lease);
  133. }
  134. }
  135. }
  136. unlock(&range_map_lock);
  137. }
  138. #define INIT_RANGE_MAP_SIZE 32
  139. static int __extend_range_bitmap (int expected)
  140. {
  141. int size = INIT_RANGE_MAP_SIZE;
  142. if (range_map)
  143. size = range_map->map_size;
  144. while (size <= expected)
  145. size *= 2;
  146. struct range_bitmap * new_map = malloc(sizeof(struct range_bitmap) +
  147. size / BITS);
  148. if (!new_map)
  149. return -ENOMEM;
  150. if (range_map) {
  151. memcpy(new_map->map, range_map->map, range_map->map_size / BITS);
  152. memset(new_map->map + range_map->map_size / BITS, 0,
  153. (size - range_map->map_size) / BITS);
  154. free(range_map);
  155. } else {
  156. memset(new_map->map, 0, size / BITS);
  157. }
  158. new_map->map_size = size;
  159. range_map = new_map;
  160. return 0;
  161. }
  162. static int __set_range_bitmap (int off, bool unset)
  163. {
  164. int i = off / BITS;
  165. int j = off - i * BITS;
  166. unsigned char * m = range_map->map + i;
  167. unsigned char f = 1U << j;
  168. if (unset) {
  169. if (!((*m) & f))
  170. return -ENOENT;
  171. (*m) &= ~f;
  172. } else {
  173. if ((*m) & f)
  174. return -EEXIST;
  175. (*m) |= f;
  176. }
  177. return 0;
  178. }
  179. static bool __check_range_bitmap (int off)
  180. {
  181. int i = off / BITS;
  182. int j = off - i * BITS;
  183. unsigned char * m = range_map->map + i;
  184. unsigned char f = 1U << j;
  185. return (*m) && ((*m) & f);
  186. }
  187. static struct range * __get_range (int off)
  188. {
  189. LISTP_TYPE(range) * head = range_table + RANGE_HASH(off);
  190. if (!range_map || off >= range_map->map_size)
  191. return NULL;
  192. if (!__check_range_bitmap(off))
  193. return NULL;
  194. struct range * r;
  195. listp_for_each_entry(r, head, hlist)
  196. if (r->offset == off)
  197. return r;
  198. return NULL;
  199. }
  200. static int __add_range (struct range * r, int off, IDTYPE owner,
  201. const char * uri, LEASETYPE lease)
  202. {
  203. LISTP_TYPE(range) * head = range_table + RANGE_HASH(off);
  204. int ret = 0;
  205. if (!range_map || range_map->map_size <= off) {
  206. ret = __extend_range_bitmap(off);
  207. if (ret < 0)
  208. return ret;
  209. }
  210. r->owner = NULL;
  211. r->offset = off;
  212. r->lease = lease;
  213. r->used = NULL;
  214. r->subranges = NULL;
  215. if (owner) {
  216. r->owner = lookup_and_alloc_client(owner, uri);
  217. if (!r->owner)
  218. return -ENOMEM;
  219. }
  220. ret = __set_range_bitmap(off, false);
  221. if (ret == -EEXIST) {
  222. struct range * tmp;
  223. listp_for_each_entry(tmp, head, hlist)
  224. if (tmp->offset == off) {
  225. listp_del(tmp, head, hlist);
  226. /* Chia-Che Tsai 10/17/17: only when tmp->owner is non-NULL,
  227. * and tmp->owner->vmid == cur_process.vmid, tmp is on the
  228. * owned list, otherwise it is an offered. */
  229. if (tmp->owner && tmp->owner->vmid == cur_process.vmid) {
  230. listp_del(tmp, &owned_ranges, list);
  231. nowned--;
  232. } else {
  233. listp_del(tmp, &offered_ranges, list);
  234. noffered--;
  235. }
  236. if (tmp->owner)
  237. put_client(tmp->owner);
  238. r->used = tmp->used;
  239. r->subranges = tmp->subranges;
  240. free(tmp);
  241. break;
  242. }
  243. }
  244. INIT_LIST_HEAD(r, hlist);
  245. listp_add(r, head, hlist);
  246. INIT_LIST_HEAD(r, list);
  247. LISTP_TYPE(range)* list = (owner == cur_process.vmid) ? &owned_ranges
  248. : &offered_ranges;
  249. struct range * prev = listp_first_entry(list, range, list);
  250. struct range * tmp;
  251. listp_for_each_entry(tmp, list, list) {
  252. if (tmp->offset >= off)
  253. break;
  254. prev = tmp;
  255. }
  256. listp_add_after(r, prev, list, list);
  257. if (owner == cur_process.vmid)
  258. nowned++;
  259. else
  260. noffered++;
  261. return 0;
  262. }
  263. int CONCAT3(add, NS, range) (IDTYPE base, IDTYPE owner,
  264. const char * uri, LEASETYPE lease)
  265. {
  266. int off = (base - 1) / RANGE_SIZE;
  267. int ret;
  268. struct range * r = malloc(sizeof(struct range));
  269. if (!r)
  270. return -ENOMEM;
  271. lock(&range_map_lock);
  272. r->owner = NULL;
  273. ret = __add_range(r, off, owner, uri, lease);
  274. if (ret < 0)
  275. free(r);
  276. unlock(&range_map_lock);
  277. return ret;
  278. }
  279. static void CONCAT3(__del, NS, subrange) (struct subrange ** ptr)
  280. {
  281. struct subrange * s = *ptr;
  282. *ptr = NULL;
  283. put_ipc_info(s->owner);
  284. free(s);
  285. nsubed--;
  286. }
  287. int CONCAT3(add, NS, subrange) (IDTYPE idx, IDTYPE owner,
  288. const char * uri, LEASETYPE * lease)
  289. {
  290. int off = (idx - 1) / RANGE_SIZE, err = 0;
  291. IDTYPE base = off * RANGE_SIZE + 1;
  292. struct subrange * s = malloc(sizeof(struct subrange));
  293. if (!s)
  294. return -ENOMEM;
  295. assert(owner);
  296. lock(&range_map_lock);
  297. s->owner = lookup_and_alloc_client(owner, uri);
  298. if (!s->owner) {
  299. err = -ENOMEM;
  300. goto failed;
  301. }
  302. s->lease = (lease && (*lease)) ? (*lease) : get_lease();
  303. struct range * r = __get_range(off);
  304. if (!r) {
  305. r = malloc(sizeof(struct range));
  306. if (!r) {
  307. err = -ENOMEM;
  308. goto failed;
  309. }
  310. if ((err = __add_range(r, off, 0, NULL, 0)) < 0) {
  311. free(r);
  312. goto failed;
  313. }
  314. }
  315. if (!r->subranges) {
  316. r->subranges = calloc(1, sizeof(struct sub_map));
  317. if (!r->subranges) {
  318. err = -ENOMEM;
  319. goto failed;
  320. }
  321. }
  322. struct subrange ** m = &r->subranges->map[idx - base];
  323. if (*m)
  324. CONCAT3(__del, NS, subrange)(m);
  325. (*m) = s;
  326. nsubed++;
  327. if (lease)
  328. *lease = s->lease;
  329. unlock(&range_map_lock);
  330. return 0;
  331. failed:
  332. if (s->owner)
  333. put_ipc_info(s->owner);
  334. unlock(&range_map_lock);
  335. free(s);
  336. return err;
  337. }
  338. int CONCAT3(alloc, NS, range) (IDTYPE owner, const char * uri,
  339. IDTYPE * base, LEASETYPE * lease)
  340. {
  341. struct range * r = malloc(sizeof(struct range));
  342. if (!r)
  343. return -ENOMEM;
  344. int ret = 0;
  345. lock(&range_map_lock);
  346. r->owner = NULL;
  347. int i = 0, j = 0;
  348. if (range_map)
  349. for (i = 0 ; i < range_map->map_size ; i++) {
  350. unsigned char map = range_map->map[i];
  351. if (map < 255U) {
  352. for (j = 0 ; j < BITS ; map >>= 1, j++)
  353. if (!(map & 1U))
  354. break;
  355. assert(j < BITS);
  356. break;
  357. }
  358. }
  359. LEASETYPE l = get_lease();
  360. ret = __add_range(r, i * BITS + j, owner, uri, l);
  361. if (ret < 0) {
  362. if (r->owner)
  363. put_ipc_info(r->owner);
  364. free(r);
  365. goto out;
  366. }
  367. if (base)
  368. *base = (i * BITS + j) * RANGE_SIZE + 1;
  369. if (lease)
  370. *lease = l;
  371. out:
  372. unlock(&range_map_lock);
  373. return ret;
  374. }
  375. int CONCAT3(get, NS, range) (IDTYPE idx,
  376. struct CONCAT2(NS, range) * range,
  377. struct shim_ipc_info ** info)
  378. {
  379. int off = (idx - 1) / RANGE_SIZE;
  380. lock(&range_map_lock);
  381. struct range * r = __get_range(off);
  382. if (!r) {
  383. unlock(&range_map_lock);
  384. return -ESRCH;
  385. }
  386. IDTYPE base = r->offset * RANGE_SIZE + 1;
  387. IDTYPE sz = RANGE_SIZE;
  388. LEASETYPE l = r->lease;
  389. struct shim_ipc_info * p = r->owner;
  390. if (r->subranges && r->subranges->map[idx - base]) {
  391. struct subrange * s = r->subranges->map[idx - base];
  392. base = idx;
  393. sz = 1;
  394. l = s->lease;
  395. p = s->owner;
  396. }
  397. if (!p) {
  398. unlock(&range_map_lock);
  399. return -ESRCH;
  400. }
  401. if (p->port)
  402. get_ipc_port(p->port);
  403. range->base = base;
  404. range->size = sz;
  405. range->lease = l;
  406. range->owner = p->vmid;
  407. qstrcopy(&range->uri, &p->uri);
  408. range->port = p->port;
  409. if (info) {
  410. get_ipc_info(p);
  411. *info = p;
  412. }
  413. unlock(&range_map_lock);
  414. return 0;
  415. }
  416. int CONCAT3(del, NS, range) (IDTYPE idx)
  417. {
  418. int off = (idx - 1) / RANGE_SIZE;
  419. int ret = -ESRCH;
  420. lock(&range_map_lock);
  421. struct range * r = __get_range(off);
  422. if (!r)
  423. goto failed;
  424. if (r->subranges) {
  425. for (int i = 0 ; i < RANGE_SIZE ; i++)
  426. if (r->subranges->map[i]) {
  427. ret = -EBUSY;
  428. goto failed;
  429. }
  430. }
  431. ret = __set_range_bitmap(off, true);
  432. if (ret < 0)
  433. goto failed;
  434. if (r->owner->vmid == cur_process.vmid)
  435. nowned--;
  436. else
  437. noffered--;
  438. if (r->subranges)
  439. free(r->subranges);
  440. if (r->used)
  441. free(r->used);
  442. // Re-acquire the head; kind of ugly
  443. LISTP_TYPE(range) * head = range_table + RANGE_HASH(off);
  444. listp_del(r, head, hlist);
  445. /* Chia-Che Tsai 10/17/17: only when r->owner is non-NULL,
  446. * and r->owner->vmid == cur_process.vmid, r is on the
  447. * owned list, otherwise it is an offered. */
  448. if (r->owner && r->owner->vmid == cur_process.vmid)
  449. listp_del(r, &owned_ranges, list);
  450. else
  451. listp_del(r, &offered_ranges, list);
  452. put_ipc_info(r->owner);
  453. free(r);
  454. ret = 0;
  455. failed:
  456. unlock(&range_map_lock);
  457. return ret;
  458. }
  459. int CONCAT3(del, NS, subrange) (IDTYPE idx)
  460. {
  461. int off = (idx - 1) / RANGE_SIZE;
  462. IDTYPE base = off * RANGE_SIZE + 1;
  463. int ret = -ESRCH;
  464. lock(&range_map_lock);
  465. struct range * r = __get_range(off);
  466. if (!r)
  467. goto failed;
  468. if (!r->subranges || !r->subranges->map[idx - base])
  469. goto failed;
  470. CONCAT3(__del, NS, subrange) (&r->subranges->map[idx - base]);
  471. ret = 0;
  472. failed:
  473. unlock(&range_map_lock);
  474. return ret;
  475. }
  476. int CONCAT3(renew, NS, range) (IDTYPE idx, LEASETYPE * lease)
  477. {
  478. int off = (idx - 1) / RANGE_SIZE;
  479. lock(&range_map_lock);
  480. struct range * r = __get_range(off);
  481. if (!r) {
  482. unlock(&range_map_lock);
  483. return -ESRCH;
  484. }
  485. r->lease = get_lease();
  486. if (lease)
  487. *lease = r->lease;
  488. unlock(&range_map_lock);
  489. return 0;
  490. }
  491. int CONCAT3(renew, NS, subrange) (IDTYPE idx, LEASETYPE * lease)
  492. {
  493. int off = (idx - 1) / RANGE_SIZE;
  494. IDTYPE base = off * RANGE_SIZE + 1;
  495. lock(&range_map_lock);
  496. struct range * r = __get_range(off);
  497. if (!r) {
  498. unlock(&range_map_lock);
  499. return -ESRCH;
  500. }
  501. if (!r->subranges || !r->subranges->map[idx - base]) {
  502. unlock(&range_map_lock);
  503. return -ESRCH;
  504. }
  505. struct subrange * s = r->subranges->map[idx - base];
  506. s->lease = get_lease();
  507. if (lease)
  508. *lease = s->lease;
  509. unlock(&range_map_lock);
  510. return 0;
  511. }
  512. IDTYPE CONCAT2(allocate, NS) (IDTYPE min, IDTYPE max)
  513. {
  514. IDTYPE idx = min;
  515. struct range * r;
  516. lock(&range_map_lock);
  517. listp_for_each_entry (r, &owned_ranges, list) {
  518. if (max && idx >= max)
  519. break;
  520. IDTYPE base = r->offset * RANGE_SIZE + 1;
  521. if (idx >= base + RANGE_SIZE)
  522. continue;
  523. if (idx < base)
  524. idx = base;
  525. if (!r->used) {
  526. r->used = calloc(1, sizeof(struct idx_bitmap));
  527. if (!r->used)
  528. continue;
  529. }
  530. int i = (idx - base) / BITS;
  531. int j = (idx - base) - i * BITS;
  532. unsigned char * m = r->used->map + i;
  533. unsigned char f = 1U << j;
  534. for ( ; i < RANGE_SIZE / BITS ; i++, j = 0, f = 1U, m++) {
  535. unsigned char map = (*m) ^ (f - 1);
  536. if (map < 255U) {
  537. for ( ; j < BITS ; f <<= 1, j++)
  538. if (!(map & f)) {
  539. (*m) |= f;
  540. idx = base + i * BITS + j;
  541. debug("allocated " NS_STR ": %u\n", idx);
  542. goto out;
  543. }
  544. }
  545. }
  546. }
  547. idx = 0;
  548. out:
  549. unlock(&range_map_lock);
  550. return idx;
  551. }
  552. void CONCAT2(release, NS) (IDTYPE idx)
  553. {
  554. int off = (idx - 1) / RANGE_SIZE;
  555. IDTYPE base = off * RANGE_SIZE + 1;
  556. lock(&range_map_lock);
  557. struct range * r = __get_range(off);
  558. if (!r)
  559. goto out;
  560. if (r->subranges && r->subranges->map[idx - base])
  561. CONCAT3(__del, NS, subrange) (&r->subranges->map[idx - base]);
  562. if (!r->used)
  563. goto out;
  564. if (idx < base || idx >= base + RANGE_SIZE)
  565. goto out;
  566. int i = (idx - base) / BITS;
  567. int j = (idx - base) - i * BITS;
  568. unsigned char * m = r->used->map + i;
  569. unsigned char f = 1U << j;
  570. if ((*m) & f) {
  571. debug("released " NS_STR ": %u\n", idx);
  572. (*m) &= ~f;
  573. }
  574. out:
  575. unlock(&range_map_lock);
  576. }
  577. static inline void init_namespace (void)
  578. {
  579. create_lock(&range_map_lock);
  580. }
  581. #define _NS_ID(ns) __NS_ID(ns)
  582. #define __NS_ID(ns) ns##_NS
  583. #define NS_ID _NS_ID(NS_CAP)
  584. #define NS_LEADER cur_process.ns[NS_ID]
  585. #define NS_SEND(t) CONCAT3(ipc, NS, t##_send)
  586. #define NS_CALLBACK(t) CONCAT3(ipc, NS, t##_callback)
  587. #define NS_CODE(t) CONCAT3(IPC, NS_CAP, t)
  588. #define NS_CODE_STR(t) "IPC_" NS_CAP_STR "_" #t
  589. #define NS_MSG_TYPE(t) struct CONCAT3(shim_ipc, NS, t)
  590. #define PORT(ns, t) __PORT(ns, t)
  591. #define __PORT(ns, t) IPC_PORT_##ns##t
  592. #define IPC_PORT_CLT PORT(NS_CAP, CLT)
  593. #define IPC_PORT_LDR PORT(NS_CAP, LDR)
  594. #define IPC_PORT_CON PORT(NS_CAP, CON)
  595. #define IPC_PORT_OWN PORT(NS_CAP, OWN)
  596. static void ipc_leader_exit (struct shim_ipc_port * port, IDTYPE vmid,
  597. unsigned int exitcode)
  598. {
  599. lock(&cur_process.lock);
  600. if (!NS_LEADER || NS_LEADER->port != port) {
  601. unlock(&cur_process.lock);
  602. return;
  603. }
  604. struct shim_ipc_info * info = NS_LEADER;
  605. NS_LEADER = NULL;
  606. unlock(&cur_process.lock);
  607. debug("ipc port %p of process %u closed suggests " NS_STR " leader exits\n",
  608. port, vmid);
  609. put_ipc_info(info);
  610. }
  611. /*
  612. * __discover_ns(): Discover the leader of this namespace.
  613. * @block: Whether to block for discovery.
  614. * @need_locate: Need the location information of the leader.
  615. */
  616. static void __discover_ns (bool block, bool need_locate)
  617. {
  618. bool ipc_pending = false;
  619. lock(&cur_process.lock);
  620. if (NS_LEADER) {
  621. if (NS_LEADER->vmid == cur_process.vmid) {
  622. if (need_locate && qstrempty(&NS_LEADER->uri)) {
  623. struct shim_ipc_info * info = create_ipc_port(cur_process.vmid,
  624. true);
  625. if (info) {
  626. put_ipc_info(NS_LEADER);
  627. NS_LEADER = info;
  628. add_ipc_port(info->port, 0, IPC_PORT_CLT,
  629. &ipc_leader_exit);
  630. }
  631. }
  632. goto out;
  633. }
  634. if (!qstrempty(&NS_LEADER->uri))
  635. goto out;
  636. }
  637. /*
  638. * Now we need to discover the leader through IPC. Because IPC calls can be blocking,
  639. * we need to temporarily release cur_process.lock to prevent deadlocks. If the discovery
  640. * succeeds, NS_LEADER will contain the IPC information of the namespace leader.
  641. */
  642. unlock(&cur_process.lock);
  643. // Send out an IPC message to find out the namespace information.
  644. // If the call is non-blocking, can't expect the answer when the function finishes.
  645. if (!NS_SEND(findns)(block)) {
  646. ipc_pending = !block; // There is still some unfinished business with IPC
  647. lock(&cur_process.lock);
  648. assert(NS_LEADER);
  649. goto out;
  650. }
  651. lock(&cur_process.lock);
  652. if (NS_LEADER && (!need_locate || !qstrempty(&NS_LEADER->uri)))
  653. goto out;
  654. // If all other ways failed, the current process becomes the leader
  655. if (!need_locate) {
  656. NS_LEADER = get_new_ipc_info(cur_process.vmid, NULL, 0);
  657. goto out;
  658. }
  659. if (NS_LEADER)
  660. put_ipc_info(NS_LEADER);
  661. if (!(NS_LEADER = create_ipc_port(cur_process.vmid, true)))
  662. goto out;
  663. // Finally, set the IPC port as a leadership port
  664. add_ipc_port(NS_LEADER->port, 0, IPC_PORT_CLT, &ipc_leader_exit);
  665. out:
  666. if (NS_LEADER && !ipc_pending) {
  667. // Assertions for checking the correctness of __discover_ns()
  668. assert(NS_LEADER->vmid == cur_process.vmid // The current process is the leader;
  669. || NS_LEADER->port // Or there is a connected port
  670. || !qstrempty(&NS_LEADER->uri)); // Or there is a known URI
  671. if (need_locate)
  672. assert(!qstrempty(&NS_LEADER->uri)); // A known URI is needed
  673. }
  674. unlock(&cur_process.lock);
  675. }
  676. static int connect_ns (IDTYPE * vmid, struct shim_ipc_port ** portptr)
  677. {
  678. __discover_ns(true, false); // This function cannot be called with cur_process.lock held
  679. lock(&cur_process.lock);
  680. if (!NS_LEADER) {
  681. unlock(&cur_process.lock);
  682. return -ESRCH;
  683. }
  684. if (NS_LEADER->vmid == cur_process.vmid) {
  685. if (vmid)
  686. *vmid = NS_LEADER->vmid;
  687. unlock(&cur_process.lock);
  688. return 0;
  689. }
  690. if (!NS_LEADER->port) {
  691. if (qstrempty(&NS_LEADER->uri)) {
  692. unlock(&cur_process.lock);
  693. return -ESRCH;
  694. }
  695. PAL_HANDLE pal_handle = DkStreamOpen(qstrgetstr(&NS_LEADER->uri),
  696. 0, 0, 0, 0);
  697. if (!pal_handle) {
  698. unlock(&cur_process.lock);
  699. return -PAL_ERRNO;
  700. }
  701. add_ipc_port_by_id(NS_LEADER->vmid, pal_handle,
  702. IPC_PORT_LDR|IPC_PORT_LISTEN, &ipc_leader_exit,
  703. &NS_LEADER->port);
  704. }
  705. if (vmid)
  706. *vmid = NS_LEADER->vmid;
  707. if (portptr) {
  708. if (NS_LEADER->port)
  709. get_ipc_port(NS_LEADER->port);
  710. *portptr = NS_LEADER->port;
  711. }
  712. unlock(&cur_process.lock);
  713. return 0;
  714. }
  715. // Turn off this function as it is not used
  716. // Keep the code for future use
  717. #if 0
  718. static int disconnect_ns(struct shim_ipc_port * port)
  719. {
  720. lock(&cur_process.lock);
  721. if (NS_LEADER && NS_LEADER->port == port) {
  722. NS_LEADER->port = NULL;
  723. put_ipc_port(port);
  724. }
  725. unlock(&cur_process.lock);
  726. del_ipc_port(port, IPC_PORT_LDR);
  727. return 0;
  728. }
  729. #endif
  730. int CONCAT3(prepare, NS, leader) (void)
  731. {
  732. lock(&cur_process.lock);
  733. bool need_discover = (!NS_LEADER || qstrempty(&NS_LEADER->uri));
  734. unlock(&cur_process.lock);
  735. if (need_discover)
  736. __discover_ns(true, true); // This function cannot be called with cur_process.lock held
  737. return 0;
  738. }
  739. static int connect_owner (IDTYPE idx, struct shim_ipc_port ** portptr,
  740. IDTYPE * owner)
  741. {
  742. struct shim_ipc_info * info = NULL;
  743. struct CONCAT2(NS, range) range;
  744. memset(&range, 0, sizeof(struct CONCAT2(NS, range)));
  745. int ret = CONCAT3(get, NS, range) (idx, &range, &info);
  746. if (ret == -ESRCH) {
  747. if ((ret = NS_SEND(query)(idx)) < 0)
  748. return -ESRCH;
  749. ret = CONCAT3(get, NS, range) (idx, &range, &info);
  750. }
  751. if (ret < 0)
  752. goto out;
  753. if (range.owner == cur_process.vmid) {
  754. ret = -ESRCH;
  755. assert(!range.port);
  756. goto out;
  757. }
  758. if (range.port)
  759. goto success;
  760. int type = IPC_PORT_OWN|IPC_PORT_LISTEN;
  761. if (!range.port) {
  762. PAL_HANDLE pal_handle = DkStreamOpen(qstrgetstr(&range.uri),
  763. 0, 0, 0, 0);
  764. if (!pal_handle) {
  765. ret = -PAL_ERRNO ? : -EACCES;
  766. goto out;
  767. }
  768. add_ipc_port_by_id(range.owner, pal_handle, type, NULL, &range.port);
  769. assert(range.port);
  770. }
  771. lock(&range_map_lock);
  772. if (info->port)
  773. put_ipc_port(info->port);
  774. get_ipc_port(range.port);
  775. info->port = range.port;
  776. unlock(&range_map_lock);
  777. success:
  778. if (portptr)
  779. *portptr = range.port;
  780. else
  781. put_ipc_port(range.port);
  782. if (owner)
  783. *owner = range.owner;
  784. out:
  785. if (info)
  786. put_ipc_info(info);
  787. assert(ret || range.port);
  788. return ret;
  789. }
  790. DEFINE_PROFILE_INTERVAL(NS_SEND(findns), ipc);
  791. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(findns), ipc);
  792. int NS_SEND(findns) (bool block)
  793. {
  794. BEGIN_PROFILE_INTERVAL();
  795. int ret = -ESRCH;
  796. lock(&cur_process.lock);
  797. if (!cur_process.parent || !cur_process.parent->port) {
  798. unlock(&cur_process.lock);
  799. goto out;
  800. }
  801. IDTYPE dest = cur_process.parent->vmid;
  802. struct shim_ipc_port * port = cur_process.parent->port;
  803. get_ipc_port(port);
  804. unlock(&cur_process.lock);
  805. if (block) {
  806. struct shim_ipc_msg_obj * msg =
  807. create_ipc_msg_duplex_on_stack(NS_CODE(FINDNS), 0, dest);
  808. debug("ipc send to %u: " NS_CODE_STR(FINDNS) "\n", dest);
  809. ret = do_ipc_duplex(msg, port, NULL, NULL);
  810. goto out_port;
  811. }
  812. struct shim_ipc_msg * msg =
  813. create_ipc_msg_on_stack(NS_CODE(FINDNS), 0, dest);
  814. debug("ipc send to %u: " NS_CODE_STR(FINDNS) "\n", dest);
  815. ret = send_ipc_message(msg, port);
  816. out_port:
  817. put_ipc_port(port);
  818. out:
  819. SAVE_PROFILE_INTERVAL(NS_SEND(findns));
  820. return ret;
  821. }
  822. int NS_CALLBACK(findns) (IPC_CALLBACK_ARGS)
  823. {
  824. BEGIN_PROFILE_INTERVAL();
  825. debug("ipc callback from %u: " NS_CODE_STR(FINDNS) "\n",
  826. msg->src);
  827. int ret = 0;
  828. __discover_ns(false, true); // This function cannot be called with cur_process.lock held
  829. lock(&cur_process.lock);
  830. if (NS_LEADER && !qstrempty(&NS_LEADER->uri)) {
  831. // Got the answer! Send back the discovery now.
  832. ret = NS_SEND(tellns)(port, msg->src, NS_LEADER, msg->seq);
  833. } else {
  834. // Don't know the answer yet, set up a callback for sending the discovery later.
  835. struct ns_query * query = malloc(sizeof(struct ns_query));
  836. if (query) {
  837. query->dest = msg->src;
  838. query->seq = msg->seq;
  839. get_ipc_port(port);
  840. query->port = port;
  841. INIT_LIST_HEAD(query, list);
  842. listp_add_tail(query, &ns_queries, list);
  843. } else {
  844. ret = -ENOMEM;
  845. }
  846. }
  847. unlock(&cur_process.lock);
  848. SAVE_PROFILE_INTERVAL(NS_CALLBACK(findns));
  849. return ret;
  850. }
  851. DEFINE_PROFILE_INTERVAL(NS_SEND(tellns), ipc);
  852. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(tellns), ipc);
  853. int NS_SEND(tellns) (struct shim_ipc_port * port, IDTYPE dest,
  854. struct shim_ipc_info * leader, unsigned long seq)
  855. {
  856. BEGIN_PROFILE_INTERVAL();
  857. struct shim_ipc_msg * msg =
  858. create_ipc_msg_on_stack(NS_CODE(TELLNS),
  859. leader->uri.len + sizeof(NS_MSG_TYPE(tellns)),
  860. dest);
  861. NS_MSG_TYPE(tellns) * msgin = (void *) &msg->msg;
  862. msgin->vmid = leader->vmid;
  863. memcpy(msgin->uri, qstrgetstr(&leader->uri), leader->uri.len + 1);
  864. msg->seq = seq;
  865. debug("ipc send to %u: " NS_CODE_STR(TELLNS) "(%u, %s)\n", dest,
  866. leader->vmid, msgin->uri);
  867. int ret = send_ipc_message(msg, port);
  868. SAVE_PROFILE_INTERVAL(NS_SEND(tellns));
  869. return ret;
  870. }
  871. int NS_CALLBACK(tellns) (IPC_CALLBACK_ARGS)
  872. {
  873. BEGIN_PROFILE_INTERVAL();
  874. NS_MSG_TYPE(tellns) * msgin = (void *) &msg->msg;
  875. int ret = 0;
  876. debug("ipc callback from %u: " NS_CODE_STR(TELLNS) "(%u, %s)\n",
  877. msg->src, msgin->vmid, msgin->uri);
  878. lock(&cur_process.lock);
  879. if (NS_LEADER) {
  880. NS_LEADER->vmid = msgin->vmid;
  881. qstrsetstr(&NS_LEADER->uri, msgin->uri, strlen(msgin->uri));
  882. } else {
  883. NS_LEADER = get_new_ipc_info(msgin->vmid, msgin->uri,
  884. strlen(msgin->uri));
  885. if (!NS_LEADER) {
  886. ret = -ENOMEM;
  887. goto out;
  888. }
  889. }
  890. assert(NS_LEADER->vmid != 0);
  891. assert(!qstrempty(&NS_LEADER->uri));
  892. struct ns_query * query, * pos;
  893. listp_for_each_entry_safe(query, pos, &ns_queries, list) {
  894. listp_del(query, &ns_queries, list);
  895. NS_SEND(tellns)(query->port, query->dest, NS_LEADER, query->seq);
  896. put_ipc_port(query->port);
  897. free(query);
  898. }
  899. struct shim_ipc_msg_obj * obj = find_ipc_msg_duplex(port, msg->seq);
  900. if (obj && obj->thread)
  901. thread_wakeup(obj->thread);
  902. out:
  903. unlock(&cur_process.lock);
  904. SAVE_PROFILE_INTERVAL(NS_CALLBACK(tellns));
  905. return ret;
  906. }
  907. DEFINE_PROFILE_INTERVAL(NS_SEND(lease), ipc);
  908. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(lease), ipc);
  909. int NS_SEND(lease) (LEASETYPE * lease)
  910. {
  911. BEGIN_PROFILE_INTERVAL();
  912. IDTYPE leader;
  913. struct shim_ipc_port * port = NULL;
  914. struct shim_ipc_info * self = NULL;
  915. int ret = 0;
  916. if ((ret = connect_ns(&leader, &port)) < 0)
  917. goto out;
  918. if ((ret = create_ipc_location(&self)) < 0)
  919. goto out;
  920. if (leader == cur_process.vmid) {
  921. ret = CONCAT3(alloc, NS, range)(cur_process.vmid,
  922. qstrgetstr(&self->uri),
  923. NULL, NULL);
  924. put_ipc_info(self);
  925. goto out;
  926. }
  927. int len = self->uri.len;
  928. struct shim_ipc_msg_obj * msg = create_ipc_msg_duplex_on_stack(
  929. NS_CODE(LEASE),
  930. len + sizeof(NS_MSG_TYPE(lease)),
  931. leader);
  932. NS_MSG_TYPE(lease) * msgin = (void *) &msg->msg.msg;
  933. assert(!qstrempty(&self->uri));
  934. memcpy(msgin->uri, qstrgetstr(&self->uri), len + 1);
  935. put_ipc_info(self);
  936. debug("ipc send to %u: " NS_CODE_STR(LEASE) "(%s)\n", leader,
  937. msgin->uri);
  938. ret = do_ipc_duplex(msg, port, NULL, lease);
  939. out:
  940. if (port)
  941. put_ipc_port(port);
  942. SAVE_PROFILE_INTERVAL(NS_SEND(lease));
  943. return ret;
  944. }
  945. int NS_CALLBACK(lease) (IPC_CALLBACK_ARGS)
  946. {
  947. BEGIN_PROFILE_INTERVAL();
  948. NS_MSG_TYPE(lease) * msgin = (void *) &msg->msg;
  949. debug("ipc callback from %u: " NS_CODE_STR(LEASE) "(%s)\n",
  950. msg->src, msgin->uri);
  951. IDTYPE base = 0;
  952. LEASETYPE lease = 0;
  953. int ret = CONCAT3(alloc, NS, range)(msg->src, msgin->uri, &base, &lease);
  954. if (ret < 0)
  955. goto out;
  956. ret = NS_SEND(offer)(port, msg->src, base, RANGE_SIZE, lease, msg->seq);
  957. out:
  958. SAVE_PROFILE_INTERVAL(NS_CALLBACK(lease));
  959. return ret;
  960. }
  961. DEFINE_PROFILE_INTERVAL(NS_SEND(offer), ipc);
  962. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(offer), ipc);
  963. int NS_SEND(offer) (struct shim_ipc_port * port, IDTYPE dest, IDTYPE base,
  964. IDTYPE size, LEASETYPE lease, unsigned long seq)
  965. {
  966. BEGIN_PROFILE_INTERVAL();
  967. int ret = 0;
  968. struct shim_ipc_msg * msg = create_ipc_msg_on_stack(NS_CODE(OFFER),
  969. sizeof(NS_MSG_TYPE(offer)), dest);
  970. NS_MSG_TYPE(offer) * msgin = (void *) &msg->msg;
  971. msgin->base = base;
  972. msgin->size = size;
  973. msgin->lease = lease;
  974. msg->seq = seq;
  975. debug("ipc send to %u: " NS_CODE_STR(OFFER) "(%u, %u, %lu)\n",
  976. port->info.vmid, base, size, lease);
  977. ret = send_ipc_message(msg, port);
  978. SAVE_PROFILE_INTERVAL(NS_SEND(offer));
  979. return ret;
  980. }
  981. int NS_CALLBACK(offer) (IPC_CALLBACK_ARGS)
  982. {
  983. BEGIN_PROFILE_INTERVAL();
  984. NS_MSG_TYPE(offer) * msgin = (void *) &msg->msg;
  985. debug("ipc callback from %u: " NS_CODE_STR(OFFER) "(%u, %u, %lu)\n",
  986. msg->src, msgin->base, msgin->size, msgin->lease);
  987. struct shim_ipc_msg_obj * obj = find_ipc_msg_duplex(port, msg->seq);
  988. switch (msgin->size) {
  989. case RANGE_SIZE:
  990. CONCAT3(add, NS, range)(msgin->base, cur_process.vmid,
  991. qstrgetstr(&cur_process.self->uri),
  992. msgin->lease);
  993. LEASETYPE * priv = obj ? obj->private : NULL;
  994. if (priv)
  995. *priv = msgin->lease;
  996. break;
  997. case 1:
  998. if (obj) {
  999. NS_MSG_TYPE(sublease) * s = (void *) &obj->msg.msg;
  1000. CONCAT3(add, NS, subrange)(s->idx, s->tenant, s->uri,
  1001. &msgin->lease);
  1002. LEASETYPE * priv = obj->private;
  1003. if (priv)
  1004. *priv = msgin->lease;
  1005. }
  1006. break;
  1007. default:
  1008. goto out;
  1009. }
  1010. if (obj && obj->thread)
  1011. thread_wakeup(obj->thread);
  1012. out:
  1013. SAVE_PROFILE_INTERVAL(NS_CALLBACK(offer));
  1014. return 0;
  1015. }
  1016. DEFINE_PROFILE_INTERVAL(NS_SEND(renew), ipc);
  1017. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(renew), ipc);
  1018. int NS_SEND(renew) (IDTYPE base, IDTYPE size)
  1019. {
  1020. BEGIN_PROFILE_INTERVAL();
  1021. IDTYPE leader;
  1022. struct shim_ipc_port * port = NULL;
  1023. int ret = 0;
  1024. if ((ret = connect_ns(&leader, &port)) < 0)
  1025. goto out;
  1026. struct shim_ipc_msg * msg =
  1027. create_ipc_msg_on_stack(NS_CODE(RENEW),
  1028. sizeof(NS_MSG_TYPE(renew)), leader);
  1029. NS_MSG_TYPE(renew) * msgin = (void *) &msg->msg;
  1030. msgin->base = base;
  1031. msgin->size = size;
  1032. debug("ipc send to : " NS_CODE_STR(RENEW) "(%u, %u)\n", base, size);
  1033. ret = send_ipc_message(msg, port);
  1034. put_ipc_port(port);
  1035. out:
  1036. SAVE_PROFILE_INTERVAL(NS_SEND(renew));
  1037. return ret;
  1038. }
  1039. int NS_CALLBACK(renew) (IPC_CALLBACK_ARGS)
  1040. {
  1041. BEGIN_PROFILE_INTERVAL();
  1042. NS_MSG_TYPE(renew) * msgin = (void *) &msg->msg;
  1043. int ret = 0;
  1044. debug("ipc callback from %u: " NS_CODE_STR(RENEW) "(%u, %u)\n",
  1045. msg->src, msgin->base, msgin->size);
  1046. if (msgin->size != 1 && msgin->size != RANGE_SIZE) {
  1047. ret = -EINVAL;
  1048. goto out;
  1049. }
  1050. LEASETYPE lease = 0;
  1051. switch (msgin->size) {
  1052. case RANGE_SIZE:
  1053. ret = CONCAT3(renew, NS, range) (msgin->base, &lease);
  1054. break;
  1055. case 1:
  1056. ret = CONCAT3(renew, NS, subrange) (msgin->size, &lease);
  1057. break;
  1058. default:
  1059. ret = -EINVAL;
  1060. break;
  1061. }
  1062. if (ret < 0)
  1063. goto out;
  1064. ret = NS_SEND(offer)(port, msg->src, msgin->base, msgin->size, lease,
  1065. msg->seq);
  1066. out:
  1067. SAVE_PROFILE_INTERVAL(NS_CALLBACK(renew));
  1068. return ret;
  1069. }
  1070. DEFINE_PROFILE_INTERVAL(NS_SEND(revoke), ipc);
  1071. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(revoke), ipc);
  1072. int NS_SEND(revoke) (IDTYPE base, IDTYPE size)
  1073. {
  1074. BEGIN_PROFILE_INTERVAL();
  1075. IDTYPE leader;
  1076. struct shim_ipc_port * port = NULL;
  1077. int ret = 0;
  1078. if ((ret = connect_ns(&leader, &port)) < 0)
  1079. goto out;
  1080. struct shim_ipc_msg * msg =
  1081. create_ipc_msg_on_stack(NS_CODE(REVOKE),
  1082. sizeof(NS_MSG_TYPE(revoke)), leader);
  1083. NS_MSG_TYPE(revoke) * msgin = (void *) &msg->msg;
  1084. msgin->base = base;
  1085. msgin->size = size;
  1086. debug("ipc send to %u: " NS_CODE_STR(REVOKE) "(%u, %u)\n",
  1087. leader, base, size);
  1088. ret = send_ipc_message(msg, port);
  1089. put_ipc_port(port);
  1090. out:
  1091. SAVE_PROFILE_INTERVAL(NS_SEND(revoke));
  1092. return ret;
  1093. }
  1094. int NS_CALLBACK(revoke) (IPC_CALLBACK_ARGS)
  1095. {
  1096. BEGIN_PROFILE_INTERVAL();
  1097. NS_MSG_TYPE(revoke) * msgin = (void *) &msg->msg;
  1098. int ret = 0;
  1099. debug("ipc callback from %u: " NS_CODE_STR(REVOKE) "(%u, %u)\n",
  1100. msg->src, msgin->base, msgin->size);
  1101. switch (msgin->size) {
  1102. case RANGE_SIZE:
  1103. ret = CONCAT3(del, NS, range)(msgin->base);
  1104. break;
  1105. case 1:
  1106. ret = CONCAT3(del, NS, subrange)(msgin->size);
  1107. break;
  1108. default:
  1109. ret = -EINVAL;
  1110. break;
  1111. }
  1112. SAVE_PROFILE_INTERVAL(NS_CALLBACK(revoke));
  1113. return ret;
  1114. }
  1115. DEFINE_PROFILE_INTERVAL(NS_SEND(sublease), ipc);
  1116. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(sublease), ipc);
  1117. int NS_SEND(sublease) (IDTYPE tenant, IDTYPE idx, const char * uri,
  1118. LEASETYPE * lease)
  1119. {
  1120. BEGIN_PROFILE_INTERVAL();
  1121. IDTYPE leader;
  1122. struct shim_ipc_port * port = NULL;
  1123. int ret = 0;
  1124. if ((ret = connect_ns(&leader, &port)) < 0)
  1125. goto out;
  1126. if (leader == cur_process.vmid) {
  1127. ret = CONCAT3(add, NS, subrange)(idx, tenant, uri, NULL);
  1128. goto out;
  1129. }
  1130. int len = strlen(uri);
  1131. struct shim_ipc_msg_obj * msg = create_ipc_msg_duplex_on_stack(
  1132. NS_CODE(SUBLEASE),
  1133. len + sizeof(NS_MSG_TYPE(sublease)),
  1134. leader);
  1135. NS_MSG_TYPE(sublease) * msgin = (void *) &msg->msg.msg;
  1136. msgin->tenant = tenant;
  1137. msgin->idx = idx;
  1138. memcpy(msgin->uri, uri, len + 1);
  1139. debug("ipc send to %u: " NS_CODE_STR(SUBLEASE) "(%u, %u, %s)\n",
  1140. leader, tenant, idx, msgin->uri);
  1141. ret = do_ipc_duplex(msg, port, NULL, lease);
  1142. out:
  1143. if (port)
  1144. put_ipc_port(port);
  1145. SAVE_PROFILE_INTERVAL(NS_SEND(sublease));
  1146. return ret;
  1147. }
  1148. int NS_CALLBACK(sublease) (IPC_CALLBACK_ARGS)
  1149. {
  1150. BEGIN_PROFILE_INTERVAL();
  1151. NS_MSG_TYPE(sublease) * msgin = (void *) &msg->msg;
  1152. debug("ipc callback from %u: " NS_CODE_STR(SUBLEASE) "(%u, %u, %s)\n",
  1153. msg->src, msgin->idx, msgin->tenant, msgin->uri);
  1154. LEASETYPE lease = 0;
  1155. int ret = CONCAT3(add, NS, subrange)(msgin->idx, msgin->tenant, msgin->uri,
  1156. &lease);
  1157. ret = NS_SEND(offer)(port, msg->src, msgin->idx, 1, lease, msg->seq);
  1158. SAVE_PROFILE_INTERVAL(NS_CALLBACK(sublease));
  1159. return ret;
  1160. }
  1161. DEFINE_PROFILE_INTERVAL(NS_SEND(query), ipc);
  1162. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(query), ipc);
  1163. int NS_SEND(query) (IDTYPE idx)
  1164. {
  1165. BEGIN_PROFILE_INTERVAL();
  1166. struct CONCAT2(NS, range) range;
  1167. IDTYPE leader;
  1168. struct shim_ipc_port * port = NULL;
  1169. int ret = 0;
  1170. memset(&range, 0, sizeof(struct CONCAT2(NS, range)));
  1171. if (!CONCAT3(get, NS, range)(idx, &range, NULL))
  1172. goto out;
  1173. if ((ret = connect_ns(&leader, &port)) < 0)
  1174. goto out;
  1175. if (cur_process.vmid == leader) {
  1176. ret = -ESRCH;
  1177. goto out;
  1178. }
  1179. struct shim_ipc_msg_obj * msg = create_ipc_msg_duplex_on_stack(
  1180. NS_CODE(QUERY),
  1181. sizeof(NS_MSG_TYPE(query)),
  1182. leader);
  1183. NS_MSG_TYPE(query) * msgin = (void *) &msg->msg.msg;
  1184. msgin->idx = idx;
  1185. debug("ipc send to %u: " NS_CODE_STR(QUERY) "(%u)\n", leader, idx);
  1186. ret = do_ipc_duplex(msg, port, NULL, NULL);
  1187. out:
  1188. if (port)
  1189. put_ipc_port(port);
  1190. SAVE_PROFILE_INTERVAL(NS_SEND(query));
  1191. return ret;
  1192. }
  1193. int NS_CALLBACK(query) (IPC_CALLBACK_ARGS)
  1194. {
  1195. BEGIN_PROFILE_INTERVAL();
  1196. NS_MSG_TYPE(query) * msgin = (void *) &msg->msg;
  1197. debug("ipc callback from %u: " NS_CODE_STR(QUERY) "(%u)\n",
  1198. msg->src, msgin->idx);
  1199. struct CONCAT2(NS, range) range;
  1200. int ret = 0;
  1201. memset(&range, 0, sizeof(struct CONCAT2(NS, range)));
  1202. ret = CONCAT3(get, NS, range)(msgin->idx, &range, NULL);
  1203. if (ret < 0)
  1204. goto out;
  1205. assert(msgin->idx >= range.base && msgin->idx < range.base + range.size);
  1206. assert(range.owner);
  1207. assert(!qstrempty(&range.uri));
  1208. struct ipc_ns_offered ans;
  1209. ans.base = range.base;
  1210. ans.size = range.size;
  1211. ans.lease = range.lease;
  1212. ans.owner_offset = 0;
  1213. int ownerdatasz = sizeof(struct ipc_ns_client) + range.uri.len;
  1214. struct ipc_ns_client * owner = __alloca(ownerdatasz);
  1215. owner->vmid = range.owner;
  1216. assert(!qstrempty(&range.uri));
  1217. memcpy(owner->uri, qstrgetstr(&range.uri), range.uri.len + 1);
  1218. ret = NS_SEND(answer)(port, msg->src, 1, &ans, 1, &owner, &ownerdatasz,
  1219. msg->seq);
  1220. out:
  1221. SAVE_PROFILE_INTERVAL(NS_CALLBACK(query));
  1222. return ret;
  1223. }
  1224. DEFINE_PROFILE_INTERVAL(NS_SEND(queryall), ipc);
  1225. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(queryall), ipc);
  1226. int NS_SEND(queryall) (void)
  1227. {
  1228. BEGIN_PROFILE_INTERVAL();
  1229. IDTYPE leader;
  1230. struct shim_ipc_port * port = NULL;
  1231. int ret = 0;
  1232. if ((ret = connect_ns(&leader, &port)) < 0)
  1233. goto out;
  1234. if (cur_process.vmid == leader)
  1235. goto out;
  1236. struct shim_ipc_msg_obj * msg = create_ipc_msg_duplex_on_stack(
  1237. NS_CODE(QUERYALL), 0, leader);
  1238. debug("ipc send to %u: " NS_CODE_STR(QUERYALL) "\n", leader);
  1239. ret = do_ipc_duplex(msg, port, NULL, NULL);
  1240. put_ipc_port(port);
  1241. out:
  1242. SAVE_PROFILE_INTERVAL(NS_SEND(queryall));
  1243. return ret;
  1244. }
  1245. int NS_CALLBACK(queryall) (IPC_CALLBACK_ARGS)
  1246. {
  1247. BEGIN_PROFILE_INTERVAL();
  1248. debug("ipc callback from %u: " NS_CODE_STR(QUERYALL) "\n", msg->src);
  1249. LISTP_TYPE(range) * list = &offered_ranges;
  1250. struct range * r;
  1251. int ret;
  1252. lock(&range_map_lock);
  1253. int maxanswers = nowned + noffered + nsubed;
  1254. int nanswers = 0, nowners = 0, i;
  1255. struct ipc_ns_offered * answers =
  1256. __alloca(sizeof(struct ipc_ns_offered) * maxanswers);
  1257. struct ipc_ns_client ** ownerdata =
  1258. __alloca(sizeof(struct ipc_ns_client *) * maxanswers);
  1259. int * ownerdatasz = __alloca(sizeof(int) * maxanswers);
  1260. int owner_offset = 0;
  1261. retry:
  1262. listp_for_each_entry (r, list, list) {
  1263. struct shim_ipc_info * p = r->owner;
  1264. int datasz = sizeof(struct ipc_ns_client) + p->uri.len;
  1265. struct ipc_ns_client * owner = __alloca(datasz);
  1266. assert(!qstrempty(&p->uri));
  1267. owner->vmid = p->vmid;
  1268. memcpy(owner->uri, qstrgetstr(&p->uri), p->uri.len + 1);
  1269. IDTYPE base = r->offset * RANGE_SIZE + 1;
  1270. answers[nanswers].base = base;
  1271. answers[nanswers].size = RANGE_SIZE;
  1272. answers[nanswers].lease = r->lease;
  1273. answers[nanswers].owner_offset = owner_offset;
  1274. nanswers++;
  1275. ownerdata[nowners] = owner;
  1276. ownerdatasz[nowners] = datasz;
  1277. nowners++;
  1278. owner_offset += datasz;
  1279. if (!r->subranges)
  1280. continue;
  1281. for (i = 0 ; i < RANGE_SIZE ; i++) {
  1282. if (!r->subranges->map[i])
  1283. continue;
  1284. struct subrange * s = r->subranges->map[i];
  1285. p = s->owner;
  1286. datasz = sizeof(struct ipc_ns_client) + p->uri.len;
  1287. owner = __alloca(datasz);
  1288. assert(!qstrempty(&p->uri));
  1289. owner->vmid = p->vmid;
  1290. memcpy(owner->uri, qstrgetstr(&p->uri), p->uri.len + 1);
  1291. answers[nanswers].base = base + i;
  1292. answers[nanswers].size = 1;
  1293. answers[nanswers].lease = s->lease;
  1294. answers[nanswers].owner_offset = owner_offset;
  1295. nanswers++;
  1296. ownerdata[nowners] = owner;
  1297. ownerdatasz[nowners] = datasz;
  1298. nowners++;
  1299. owner_offset += datasz;
  1300. }
  1301. }
  1302. if (list == &offered_ranges) {
  1303. list = &owned_ranges;
  1304. goto retry;
  1305. }
  1306. unlock(&range_map_lock);
  1307. ret = NS_SEND(answer)(port, msg->src, nanswers, answers, nowners,
  1308. ownerdata, ownerdatasz, msg->seq);
  1309. SAVE_PROFILE_INTERVAL(NS_CALLBACK(queryall));
  1310. return ret;
  1311. }
  1312. DEFINE_PROFILE_INTERVAL(NS_SEND(answer), ipc);
  1313. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(answer), ipc);
  1314. int NS_SEND(answer) (struct shim_ipc_port * port, IDTYPE dest,
  1315. int nanswers, struct ipc_ns_offered * answers,
  1316. int nowners, struct ipc_ns_client ** ownerdata,
  1317. int * ownerdatasz, unsigned long seq)
  1318. {
  1319. BEGIN_PROFILE_INTERVAL();
  1320. int owner_offset = sizeof(NS_MSG_TYPE(answer)) +
  1321. sizeof(struct ipc_ns_offered) * nanswers;
  1322. int total_ownerdatasz = 0;
  1323. for (int i = 0 ; i < nowners ; i++)
  1324. total_ownerdatasz += ownerdatasz[i];
  1325. struct shim_ipc_msg * msg =
  1326. create_ipc_msg_on_stack(NS_CODE(ANSWER),
  1327. owner_offset + total_ownerdatasz, dest);
  1328. NS_MSG_TYPE(answer) * msgin = (void *) &msg->msg;
  1329. msgin->nanswers = nanswers;
  1330. for (int i = 0 ; i < nanswers ; i++) {
  1331. msgin->answers[i] = answers[i];
  1332. msgin->answers[i].owner_offset += owner_offset;
  1333. }
  1334. for (int i = 0 ; i < nowners ; i++) {
  1335. memcpy((void *) msgin + owner_offset, ownerdata[i], ownerdatasz[i]);
  1336. owner_offset += ownerdatasz[i];
  1337. }
  1338. msg->seq = seq;
  1339. if (nanswers == 1)
  1340. debug("ipc send to %u: " NS_CODE_STR(ANSWER) "([%u, %u])\n", dest,
  1341. answers[0].base, answers[0].size);
  1342. else if (nanswers)
  1343. debug("ipc send to %u: " NS_CODE_STR(ANSWER) "([%u, %u], ...)\n", dest,
  1344. answers[0].base, answers[0].size);
  1345. int ret = send_ipc_message(msg, port);
  1346. SAVE_PROFILE_INTERVAL(NS_SEND(answer));
  1347. return ret;
  1348. }
  1349. int NS_CALLBACK(answer) (IPC_CALLBACK_ARGS)
  1350. {
  1351. BEGIN_PROFILE_INTERVAL();
  1352. NS_MSG_TYPE(answer) * msgin = (void *) &msg->msg;
  1353. if (msgin->nanswers == 1)
  1354. debug("ipc callback from %u: " NS_CODE_STR(ANSWER) "([%u, %u])\n",
  1355. msg->src, msgin->answers[0].base, msgin->answers[0].size);
  1356. else if (msgin->nanswers)
  1357. debug("ipc callback from %u: " NS_CODE_STR(ANSWER) "([%u, %u], ...)\n",
  1358. msg->src, msgin->answers[0].base, msgin->answers[0].size);
  1359. for (int i = 0 ; i < msgin->nanswers ; i++) {
  1360. struct ipc_ns_offered * ans = &msgin->answers[i];
  1361. struct ipc_ns_client * owner = (void *) msgin + ans->owner_offset;
  1362. switch (ans->size) {
  1363. case RANGE_SIZE:
  1364. CONCAT3(add, NS, range)(ans->base, owner->vmid, owner->uri,
  1365. ans->lease);
  1366. break;
  1367. case 1:
  1368. CONCAT3(add, NS, subrange)(ans->base, owner->vmid, owner->uri,
  1369. &ans->lease);
  1370. break;
  1371. default:
  1372. break;
  1373. }
  1374. }
  1375. struct shim_ipc_msg_obj * obj = find_ipc_msg_duplex(port, msg->seq);
  1376. if (obj && obj->thread)
  1377. thread_wakeup(obj->thread);
  1378. SAVE_PROFILE_INTERVAL(NS_CALLBACK(answer));
  1379. return 0;
  1380. }
  1381. #ifdef NS_KEY
  1382. #define KEY_HASH_LEN 8
  1383. #define KEY_HASH_NUM (1 << KEY_HASH_LEN)
  1384. #define KEY_HASH_MASK (KEY_HASH_NUM - 1)
  1385. DEFINE_LIST(key);
  1386. struct key {
  1387. NS_KEY key;
  1388. IDTYPE id;
  1389. LIST_TYPE(key) hlist;
  1390. };
  1391. DEFINE_LISTP(key);
  1392. static LISTP_TYPE(key) key_map [KEY_HASH_NUM];
  1393. int CONCAT2(NS, add_key) (NS_KEY * key, IDTYPE id)
  1394. {
  1395. LISTP_TYPE(key) * head = &key_map[KEY_HASH(key) & KEY_HASH_MASK];
  1396. struct key * k;
  1397. int ret = -EEXIST;
  1398. lock(&range_map_lock);
  1399. listp_for_each_entry(k, head, hlist)
  1400. if (!KEY_COMP(&k->key, key))
  1401. goto out;
  1402. k = malloc(sizeof(struct key));
  1403. if (!k) {
  1404. ret = -ENOMEM;
  1405. goto out;
  1406. }
  1407. KEY_COPY(&k->key, key);
  1408. k->id = id;
  1409. INIT_LIST_HEAD(k, hlist);
  1410. listp_add(k, head, hlist);
  1411. debug("add key/id pair (%lu, %u) to hash list: %p\n",
  1412. KEY_HASH(key), id, head);
  1413. ret = 0;
  1414. out:
  1415. unlock(&range_map_lock);
  1416. return ret;
  1417. }
  1418. int CONCAT2(NS, get_key) (NS_KEY * key, bool delete)
  1419. {
  1420. LISTP_TYPE(key) * head = &key_map[KEY_HASH(key) & KEY_HASH_MASK];
  1421. struct key * k;
  1422. int id = -ENOENT;
  1423. lock(&range_map_lock);
  1424. listp_for_each_entry(k, head, hlist)
  1425. if (!KEY_COMP(&k->key, key)) {
  1426. id = k->id;
  1427. if (delete) {
  1428. listp_del(k, head, hlist);
  1429. free(k);
  1430. }
  1431. break;
  1432. }
  1433. unlock(&range_map_lock);
  1434. return id;
  1435. }
  1436. DEFINE_PROFILE_INTERVAL(NS_SEND(findkey), ipc);
  1437. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(findkey), ipc);
  1438. int NS_SEND(findkey) (NS_KEY * key)
  1439. {
  1440. BEGIN_PROFILE_INTERVAL();
  1441. int ret = 0;
  1442. ret = CONCAT2(NS, get_key) (key, false);
  1443. if (!ret)
  1444. goto out;
  1445. IDTYPE dest;
  1446. struct shim_ipc_port * port = NULL;
  1447. if ((ret = connect_ns(&dest, &port)) < 0)
  1448. goto out;
  1449. if (dest == cur_process.vmid) {
  1450. ret = -ENOENT;
  1451. goto out;
  1452. }
  1453. struct shim_ipc_msg_obj * msg = create_ipc_msg_duplex_on_stack(
  1454. NS_CODE(FINDKEY),
  1455. sizeof(NS_MSG_TYPE(findkey)),
  1456. dest);
  1457. NS_MSG_TYPE(findkey) * msgin = (void *) &msg->msg.msg;
  1458. KEY_COPY(&msgin->key, key);
  1459. debug("ipc send to %u: " NS_CODE_STR(FINDKEY) "(%lu)\n",
  1460. dest, KEY_HASH(key));
  1461. ret = do_ipc_duplex(msg, port, NULL, NULL);
  1462. put_ipc_port(port);
  1463. if (!ret)
  1464. ret = CONCAT2(NS, get_key) (key, false);
  1465. out:
  1466. SAVE_PROFILE_INTERVAL(NS_SEND(findkey));
  1467. return ret;
  1468. }
  1469. int NS_CALLBACK(findkey) (IPC_CALLBACK_ARGS)
  1470. {
  1471. BEGIN_PROFILE_INTERVAL();
  1472. int ret = 0;
  1473. NS_MSG_TYPE(findkey) * msgin = (void *) &msg->msg;
  1474. debug("ipc callback from %u: " NS_CODE_STR(FINDKEY) "(%lu)\n",
  1475. msg->src, KEY_HASH(&msgin->key));
  1476. ret = CONCAT2(NS, get_key)(&msgin->key, false);
  1477. if (ret < 0)
  1478. goto out;
  1479. ret = NS_SEND(tellkey)(port, msg->src, &msgin->key, ret, msg->seq);
  1480. out:
  1481. SAVE_PROFILE_INTERVAL(NS_CALLBACK(findkey));
  1482. return ret;
  1483. }
  1484. DEFINE_PROFILE_INTERVAL(NS_SEND(tellkey), ipc);
  1485. DEFINE_PROFILE_INTERVAL(NS_CALLBACK(tellkey), ipc);
  1486. int NS_SEND(tellkey) (struct shim_ipc_port * port, IDTYPE dest, NS_KEY * key,
  1487. IDTYPE id, unsigned long seq)
  1488. {
  1489. BEGIN_PROFILE_INTERVAL();
  1490. bool owned = true;
  1491. int ret = 0;
  1492. if (!dest) {
  1493. if ((ret = CONCAT2(NS, add_key)(key, id)) < 0)
  1494. goto out;
  1495. if ((ret = connect_ns(&dest, &port)) < 0)
  1496. goto out;
  1497. if (dest == cur_process.vmid)
  1498. goto out;
  1499. owned = false;
  1500. }
  1501. if (owned) {
  1502. struct shim_ipc_msg * msg = create_ipc_msg_on_stack(
  1503. NS_CODE(TELLKEY),
  1504. sizeof(NS_MSG_TYPE(tellkey)),
  1505. dest);
  1506. NS_MSG_TYPE(tellkey) * msgin = (void *) &msg->msg;
  1507. KEY_COPY(&msgin->key, key);
  1508. msgin->id = id;
  1509. msg->seq = seq;
  1510. debug("ipc send to %u: IPC_SYSV_TELLKEY(%lu, %u)\n", dest,
  1511. KEY_HASH(key), id);
  1512. ret = send_ipc_message(msg, port);
  1513. goto out;
  1514. }
  1515. struct shim_ipc_msg_obj * msg = create_ipc_msg_duplex_on_stack(
  1516. NS_CODE(TELLKEY),
  1517. sizeof(NS_MSG_TYPE(tellkey)),
  1518. dest);
  1519. NS_MSG_TYPE(tellkey) * msgin = (void *) &msg->msg.msg;
  1520. KEY_COPY(&msgin->key, key);
  1521. msgin->id = id;
  1522. debug("ipc send to %u: IPC_SYSV_TELLKEY(%lu, %u)\n", dest,
  1523. KEY_HASH(key), id);
  1524. ret = do_ipc_duplex(msg, port, NULL, NULL);
  1525. put_ipc_port(port);
  1526. out:
  1527. SAVE_PROFILE_INTERVAL(NS_SEND(tellkey));
  1528. return ret;
  1529. }
  1530. int NS_CALLBACK(tellkey) (IPC_CALLBACK_ARGS)
  1531. {
  1532. BEGIN_PROFILE_INTERVAL();
  1533. int ret = 0;
  1534. NS_MSG_TYPE(tellkey) * msgin = (void *) &msg->msg;
  1535. debug("ipc callback from %u: " NS_CODE_STR(TELLKEY) "(%lu, %u)\n",
  1536. msg->src, KEY_HASH(&msgin->key), msgin->id);
  1537. ret = CONCAT2(NS, add_key)(&msgin->key, msgin->id);
  1538. struct shim_ipc_msg_obj * obj = find_ipc_msg_duplex(port, msg->seq);
  1539. if (!obj) {
  1540. ret = RESPONSE_CALLBACK;
  1541. goto out;
  1542. }
  1543. if (obj->thread)
  1544. thread_wakeup(obj->thread);
  1545. out:
  1546. SAVE_PROFILE_INTERVAL(ipc_sysv_tellkey_callback);
  1547. return ret;
  1548. }
  1549. #endif /* NS_KEY */