shim_init.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*!
  14. * \file shim_init.c
  15. *
  16. * This file contains entry and exit functions of library OS.
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_table.h>
  20. #include <shim_tls.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_checkpoint.h>
  25. #include <shim_fs.h>
  26. #include <shim_ipc.h>
  27. #include <shim_profile.h>
  28. #include <shim_vdso.h>
  29. #include <pal.h>
  30. #include <pal_debug.h>
  31. #include <pal_error.h>
  32. #include <sys/mman.h>
  33. #include <asm/unistd.h>
  34. #include <asm/fcntl.h>
  35. size_t g_pal_alloc_align;
  36. /* The following constants will help matching glibc version with compatible
  37. SHIM libraries */
  38. #include "glibc-version.h"
  39. const unsigned int glibc_version = GLIBC_VERSION;
  40. static void handle_failure (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
  41. {
  42. __UNUSED(event);
  43. __UNUSED(context);
  44. if ((arg <= PAL_ERROR_NATIVE_COUNT) || (arg >= PAL_ERROR_CRYPTO_START &&
  45. arg <= PAL_ERROR_CRYPTO_END))
  46. shim_get_tls()->pal_errno = arg;
  47. else
  48. shim_get_tls()->pal_errno = PAL_ERROR_DENIED;
  49. }
  50. noreturn void __abort(void) {
  51. PAUSE();
  52. shim_terminate(-ENOTRECOVERABLE);
  53. }
  54. void warn (const char *format, ...)
  55. {
  56. va_list args;
  57. va_start (args, format);
  58. __SYS_VPRINTF(format, args);
  59. va_end (args);
  60. }
  61. void __stack_chk_fail (void)
  62. {
  63. }
  64. static int pal_errno_to_unix_errno [PAL_ERROR_NATIVE_COUNT + 1] = {
  65. /* reserved */ 0,
  66. /* PAL_ERROR_NOTIMPLEMENTED */ ENOSYS,
  67. /* PAL_ERROR_NOTDEFINED */ ENOSYS,
  68. /* PAL_ERROR_NOTSUPPORT */ EACCES,
  69. /* PAL_ERROR_INVAL */ EINVAL,
  70. /* PAL_ERROR_TOOLONG */ ENAMETOOLONG,
  71. /* PAL_ERROR_DENIED */ EACCES,
  72. /* PAL_ERROR_BADHANDLE */ EFAULT,
  73. /* PAL_ERROR_STREAMEXIST */ EEXIST,
  74. /* PAL_ERROR_STREAMNOTEXIST */ ENOENT,
  75. /* PAL_ERROR_STREAMISFILE */ ENOTDIR,
  76. /* PAL_ERROR_STREAMISDIR */ EISDIR,
  77. /* PAL_ERROR_STREAMISDEVICE */ ESPIPE,
  78. /* PAL_ERROR_INTERRUPTED */ EINTR,
  79. /* PAL_ERROR_OVERFLOW */ EFAULT,
  80. /* PAL_ERROR_BADADDR */ EFAULT,
  81. /* PAL_ERROR_NOMEM */ ENOMEM,
  82. /* PAL_ERROR_NOTKILLABLE */ EACCES,
  83. /* PAL_ERROR_INCONSIST */ EFAULT,
  84. /* PAL_ERROR_TRYAGAIN */ EAGAIN,
  85. /* PAL_ERROR_ENDOFSTREAM */ 0,
  86. /* PAL_ERROR_NOTSERVER */ EINVAL,
  87. /* PAL_ERROR_NOTCONNECTION */ ENOTCONN,
  88. /* PAL_ERROR_ZEROSIZE */ 0,
  89. /* PAL_ERROR_CONNFAILED */ ECONNRESET,
  90. /* PAL_ERROR_ADDRNOTEXIST */ EADDRNOTAVAIL,
  91. };
  92. long convert_pal_errno (long err)
  93. {
  94. return (err >= 0 && err <= PAL_ERROR_NATIVE_COUNT) ?
  95. pal_errno_to_unix_errno[err] : EACCES;
  96. }
  97. /*!
  98. * \brief Parse a number into an unsigned long.
  99. *
  100. * \param str A string containing a non-negative number.
  101. *
  102. * By default the number should be decimal, but if it starts with 0x it is
  103. * parsed as hexadecimal and if it otherwise starts with 0, it is parsed as
  104. * octal.
  105. */
  106. unsigned long parse_int (const char * str)
  107. {
  108. unsigned long num = 0;
  109. int radix = 10;
  110. char c;
  111. if (str[0] == '0') {
  112. str++;
  113. radix = 8;
  114. if (str[0] == 'x') {
  115. str++;
  116. radix = 16;
  117. }
  118. }
  119. while ((c = *(str++))) {
  120. int val;
  121. if (c >= 'A' && c <= 'F')
  122. val = c - 'A' + 10;
  123. else if (c >= 'a' && c <= 'f')
  124. val = c - 'a' + 10;
  125. else if (c >= '0' && c <= '9')
  126. val = c - '0';
  127. else
  128. break;
  129. if (val >= radix)
  130. break;
  131. num = num * radix + val;
  132. }
  133. if (c == 'G' || c == 'g')
  134. num *= 1024 * 1024 * 1024;
  135. else if (c == 'M' || c == 'm')
  136. num *= 1024 * 1024;
  137. else if (c == 'K' || c == 'k')
  138. num *= 1024;
  139. return num;
  140. }
  141. long int glibc_option (const char * opt)
  142. {
  143. char cfg[CONFIG_MAX];
  144. if (!strcmp_static(opt, "heap_size")) {
  145. ssize_t ret = get_config(root_config, "glibc.heap_size", cfg, CONFIG_MAX);
  146. if (ret <= 0) {
  147. debug("no glibc option: %s (err=%ld)\n", opt, ret);
  148. return -ENOENT;
  149. }
  150. long int heap_size = parse_int(cfg);
  151. debug("glibc option: heap_size = %ld\n", heap_size);
  152. return (long int) heap_size;
  153. }
  154. return -EINVAL;
  155. }
  156. void * migrated_memory_start;
  157. void * migrated_memory_end;
  158. const char ** initial_envp __attribute_migratable;
  159. /* library_paths is populated with LD_PRELOAD entries once during LibOS
  160. * initialization and is used in __load_interp_object() to search for ELF
  161. * program interpreter in specific paths. Once allocated, its memory is
  162. * never freed or updated. */
  163. char ** library_paths = NULL;
  164. struct shim_lock __master_lock;
  165. bool lock_enabled;
  166. void init_tcb (shim_tcb_t * tcb)
  167. {
  168. tcb->canary = SHIM_TLS_CANARY;
  169. tcb->self = tcb;
  170. }
  171. void copy_tcb (shim_tcb_t * new_tcb, const shim_tcb_t * old_tcb)
  172. {
  173. memset(new_tcb, 0, sizeof(shim_tcb_t));
  174. new_tcb->canary = SHIM_TLS_CANARY;
  175. new_tcb->self = new_tcb;
  176. new_tcb->tp = old_tcb->tp;
  177. memcpy(&new_tcb->context, &old_tcb->context, sizeof(struct shim_context));
  178. new_tcb->tid = old_tcb->tid;
  179. new_tcb->debug_buf = old_tcb->debug_buf;
  180. }
  181. /* This function is used to allocate tls before interpreter start running */
  182. void allocate_tls (__libc_tcb_t * tcb, bool user, struct shim_thread * thread)
  183. {
  184. assert(tcb);
  185. tcb->tcb = tcb;
  186. init_tcb(&tcb->shim_tcb);
  187. if (thread) {
  188. thread->tcb = tcb;
  189. thread->user_tcb = user;
  190. tcb->shim_tcb.tp = thread;
  191. tcb->shim_tcb.tid = thread->tid;
  192. } else {
  193. tcb->shim_tcb.tp = NULL;
  194. tcb->shim_tcb.tid = 0;
  195. }
  196. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  197. assert(shim_tls_check_canary());
  198. }
  199. void populate_tls (__libc_tcb_t * tcb, bool user)
  200. {
  201. assert(tcb);
  202. tcb->tcb = tcb;
  203. copy_tcb(&tcb->shim_tcb, shim_get_tls());
  204. struct shim_thread * thread = (struct shim_thread *) tcb->shim_tcb.tp;
  205. if (thread) {
  206. thread->tcb = tcb;
  207. thread->user_tcb = user;
  208. }
  209. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  210. assert(shim_tls_check_canary());
  211. }
  212. DEFINE_PROFILE_OCCURENCE(alloc_stack, memory);
  213. DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
  214. #define STACK_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
  215. void * allocate_stack (size_t size, size_t protect_size, bool user)
  216. {
  217. size = PAGE_ALIGN_UP(size);
  218. protect_size = PAGE_ALIGN_UP(protect_size);
  219. /* preserve a non-readable, non-writable page below the user
  220. stack to stop user program to clobber other vmas */
  221. void * stack = NULL;
  222. int flags = STACK_FLAGS|(user ? 0 : VMA_INTERNAL);
  223. if (user) {
  224. stack = bkeep_unmapped_heap(size + protect_size, PROT_NONE,
  225. flags, NULL, 0, "stack");
  226. if (!stack)
  227. return NULL;
  228. stack = (void *)
  229. DkVirtualMemoryAlloc(stack, size + protect_size,
  230. 0, PAL_PROT_NONE);
  231. } else {
  232. stack = system_malloc(size + protect_size);
  233. }
  234. if (!stack)
  235. return NULL;
  236. ADD_PROFILE_OCCURENCE(alloc_stack, size + protect_size);
  237. INC_PROFILE_OCCURENCE(alloc_stack_count);
  238. stack += protect_size;
  239. // Ensure proper alignment for process' initial stack pointer value.
  240. stack = ALIGN_UP_PTR(stack, 16);
  241. DkVirtualMemoryProtect(stack, size, PAL_PROT_READ|PAL_PROT_WRITE);
  242. if (bkeep_mprotect(stack, size, PROT_READ|PROT_WRITE, flags) < 0)
  243. return NULL;
  244. debug("allocated stack at %p (size = %ld)\n", stack, size);
  245. return stack;
  246. }
  247. static int populate_user_stack (void * stack, size_t stack_size,
  248. elf_auxv_t ** auxpp, int ** argcpp,
  249. const char *** argvp, const char *** envpp)
  250. {
  251. const int argc = **argcpp;
  252. const char ** argv = *argvp, ** envp = *envpp;
  253. const char ** new_argv = NULL, ** new_envp = NULL;
  254. elf_auxv_t *new_auxp = NULL;
  255. void * stack_bottom = stack;
  256. void * stack_top = stack + stack_size;
  257. #define ALLOCATE_TOP(size) \
  258. ({ if ((stack_top -= (size)) < stack_bottom) return -ENOMEM; \
  259. stack_top; })
  260. #define ALLOCATE_BOTTOM(size) \
  261. ({ if ((stack_bottom += (size)) > stack_top) return -ENOMEM; \
  262. stack_bottom - (size); })
  263. /* ld.so expects argc as long on stack, not int. */
  264. long * argcp = ALLOCATE_BOTTOM(sizeof(long));
  265. *argcp = **argcpp;
  266. if (!argv) {
  267. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  268. goto copy_envp;
  269. }
  270. new_argv = stack_bottom;
  271. while (argv) {
  272. for (const char ** a = argv ; *a ; a++) {
  273. const char ** t = ALLOCATE_BOTTOM(sizeof(const char *));
  274. int len = strlen(*a) + 1;
  275. char * abuf = ALLOCATE_TOP(len);
  276. memcpy(abuf, *a, len);
  277. *t = abuf;
  278. }
  279. *((const char **) ALLOCATE_BOTTOM(sizeof(const char *))) = NULL;
  280. copy_envp:
  281. if (!envp)
  282. break;
  283. new_envp = stack_bottom;
  284. argv = envp;
  285. envp = NULL;
  286. }
  287. if (!new_envp)
  288. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  289. /* reserve space for ELF aux vectors, populated later by LibOS */
  290. new_auxp = ALLOCATE_BOTTOM(REQUIRED_ELF_AUXV * sizeof(elf_auxv_t) +
  291. REQUIRED_ELF_AUXV_SPACE);
  292. /* x86_64 ABI requires 16 bytes alignment on stack on every function
  293. call. */
  294. size_t move_size = stack_bottom - stack;
  295. *argcpp = stack_top - move_size;
  296. *argcpp = ALIGN_DOWN_PTR(*argcpp, 16UL);
  297. **argcpp = argc;
  298. size_t shift = (void*)(*argcpp) - stack;
  299. memmove(*argcpp, stack, move_size);
  300. *argvp = new_argv ? (void *) new_argv + shift : NULL;
  301. *envpp = new_envp ? (void *) new_envp + shift : NULL;
  302. *auxpp = new_auxp ? (void *) new_auxp + shift : NULL;
  303. /* clear working area at the bottom */
  304. memset(stack, 0, shift);
  305. return 0;
  306. }
  307. int init_stack (const char ** argv, const char ** envp,
  308. int ** argcpp, const char *** argpp,
  309. elf_auxv_t ** auxpp, size_t reserve)
  310. {
  311. uint64_t stack_size = get_rlimit_cur(RLIMIT_STACK);
  312. if (root_config) {
  313. char stack_cfg[CONFIG_MAX];
  314. if (get_config(root_config, "sys.stack.size", stack_cfg, CONFIG_MAX) > 0) {
  315. stack_size = PAGE_ALIGN_UP(parse_int(stack_cfg));
  316. set_rlimit_cur(RLIMIT_STACK, stack_size);
  317. }
  318. }
  319. struct shim_thread * cur_thread = get_cur_thread();
  320. if (!cur_thread || cur_thread->stack)
  321. return 0;
  322. void * stack = allocate_stack(stack_size, g_pal_alloc_align, true);
  323. if (!stack)
  324. return -ENOMEM;
  325. if (initial_envp)
  326. envp = initial_envp;
  327. int ret = populate_user_stack(stack, stack_size - reserve,
  328. auxpp, argcpp, &argv, &envp);
  329. if (ret < 0)
  330. return ret;
  331. *argpp = argv;
  332. initial_envp = envp;
  333. cur_thread->stack_top = stack + stack_size;
  334. cur_thread->stack = stack;
  335. cur_thread->stack_red = stack - g_pal_alloc_align;
  336. return 0;
  337. }
  338. int read_environs (const char ** envp)
  339. {
  340. for (const char ** e = envp ; *e ; e++) {
  341. if (strstartswith_static(*e, "LD_LIBRARY_PATH=")) {
  342. /* populate library_paths with entries from LD_LIBRARY_PATH envvar */
  343. const char * s = *e + static_strlen("LD_LIBRARY_PATH=");
  344. size_t npaths = 2; // One for the first entry, one for the last
  345. // NULL.
  346. for (const char * tmp = s ; *tmp ; tmp++)
  347. if (*tmp == ':')
  348. npaths++;
  349. char** paths = malloc(sizeof(const char *) *
  350. npaths);
  351. if (!paths)
  352. return -ENOMEM;
  353. size_t cnt = 0;
  354. while (*s) {
  355. const char * next;
  356. for (next = s ; *next && *next != ':' ; next++);
  357. size_t len = next - s;
  358. char * str = malloc(len + 1);
  359. if (!str) {
  360. for (size_t i = 0; i < cnt; i++)
  361. free(paths[i]);
  362. free(paths);
  363. return -ENOMEM;
  364. }
  365. memcpy(str, s, len);
  366. str[len] = 0;
  367. paths[cnt++] = str;
  368. s = *next ? next + 1 : next;
  369. }
  370. paths[cnt] = NULL;
  371. assert(!library_paths);
  372. library_paths = paths;
  373. return 0;
  374. }
  375. }
  376. return 0;
  377. }
  378. struct config_store * root_config = NULL;
  379. static void * __malloc (size_t size)
  380. {
  381. return malloc(size);
  382. }
  383. static void __free (void * mem)
  384. {
  385. free(mem);
  386. }
  387. int init_manifest (PAL_HANDLE manifest_handle)
  388. {
  389. int ret = 0;
  390. void * addr = NULL;
  391. size_t size = 0, map_size = 0;
  392. #define MAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL)
  393. if (PAL_CB(manifest_preload.start)) {
  394. addr = PAL_CB(manifest_preload.start);
  395. size = PAL_CB(manifest_preload.end) - PAL_CB(manifest_preload.start);
  396. } else {
  397. PAL_STREAM_ATTR attr;
  398. if (!DkStreamAttributesQueryByHandle(manifest_handle, &attr))
  399. return -PAL_ERRNO;
  400. size = attr.pending_size;
  401. map_size = PAGE_ALIGN_UP(size);
  402. addr = bkeep_unmapped_any(map_size, PROT_READ, MAP_FLAGS,
  403. 0, "manifest");
  404. if (!addr)
  405. return -ENOMEM;
  406. void* ret_addr = DkStreamMap(manifest_handle, addr, PAL_PROT_READ, 0, PAGE_ALIGN_UP(size));
  407. if (!ret_addr) {
  408. bkeep_munmap(addr, map_size, MAP_FLAGS);
  409. return -ENOMEM;
  410. } else {
  411. assert(addr == ret_addr);
  412. }
  413. }
  414. struct config_store * new_root_config = malloc(sizeof(struct config_store));
  415. if (!new_root_config) {
  416. ret = -ENOMEM;
  417. goto fail;
  418. }
  419. new_root_config->raw_data = addr;
  420. new_root_config->raw_size = size;
  421. new_root_config->malloc = __malloc;
  422. new_root_config->free = __free;
  423. const char * errstring = "Unexpected error";
  424. if ((ret = read_config(new_root_config, NULL, &errstring)) < 0) {
  425. SYS_PRINTF("Unable to read manifest file: %s\n", errstring);
  426. goto fail;
  427. }
  428. root_config = new_root_config;
  429. return 0;
  430. fail:
  431. if (map_size) {
  432. DkStreamUnmap(addr, map_size);
  433. if (bkeep_munmap(addr, map_size, MAP_FLAGS) < 0)
  434. BUG();
  435. }
  436. free(new_root_config);
  437. return ret;
  438. }
  439. #ifdef PROFILE
  440. struct shim_profile profile_root;
  441. #endif
  442. # define FIND_ARG_COMPONENTS(cookie, argc, argv, envp, auxp) \
  443. do { \
  444. void *_tmp = (cookie); \
  445. (argv) = _tmp; \
  446. _tmp += sizeof(char *) * ((argc) + 1); \
  447. (envp) = _tmp; \
  448. for ( ; *(char **) _tmp; _tmp += sizeof(char *)); \
  449. (auxp) = _tmp + sizeof(char *); \
  450. } while (0)
  451. #ifdef PROFILE
  452. static void set_profile_enabled (const char ** envp)
  453. {
  454. const char ** p;
  455. for (p = envp ; (*p) ; p++)
  456. if (strstartswith_static(*p, "PROFILE_ENABLED="))
  457. break;
  458. if (!(*p))
  459. return;
  460. for (size_t i = 0 ; i < N_PROFILE ; i++)
  461. PROFILES[i].disabled = true;
  462. const char * str = (*p) + 16;
  463. bool enabled = false;
  464. while (*str) {
  465. const char * next = str;
  466. for ( ; (*next) && (*next) != ',' ; next++);
  467. if (next > str) {
  468. size_t len = next - str;
  469. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  470. struct shim_profile * profile = &PROFILES[i];
  471. if (!memcmp(profile->name, str, len) && !profile->name[len]) {
  472. profile->disabled = false;
  473. if (profile->type == CATEGORY)
  474. enabled = true;
  475. }
  476. }
  477. }
  478. str = (*next) ? next + 1 : next;
  479. }
  480. while (enabled) {
  481. enabled = false;
  482. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  483. struct shim_profile * profile = &PROFILES[i];
  484. if (!profile->disabled || profile->root == &profile_)
  485. continue;
  486. if (!profile->root->disabled) {
  487. profile->disabled = false;
  488. if (profile->type == CATEGORY)
  489. enabled = true;
  490. }
  491. }
  492. }
  493. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  494. struct shim_profile * profile = &PROFILES[i];
  495. if (profile->type == CATEGORY || profile->disabled)
  496. continue;
  497. for (profile = profile->root ;
  498. profile != &profile_ && profile->disabled ;
  499. profile = profile->root)
  500. profile->disabled = false;
  501. }
  502. }
  503. #endif
  504. static int init_newproc (struct newproc_header * hdr)
  505. {
  506. BEGIN_PROFILE_INTERVAL();
  507. int bytes = DkStreamRead(PAL_CB(parent_process), 0,
  508. sizeof(struct newproc_header), hdr,
  509. NULL, 0);
  510. if (!bytes)
  511. return -PAL_ERRNO;
  512. SAVE_PROFILE_INTERVAL(child_wait_header);
  513. SAVE_PROFILE_INTERVAL_SINCE(child_receive_header, hdr->write_proc_time);
  514. return hdr->failure;
  515. }
  516. DEFINE_PROFILE_CATEGORY(pal, );
  517. DEFINE_PROFILE_INTERVAL(pal_startup_time, pal);
  518. DEFINE_PROFILE_INTERVAL(pal_host_specific_startup_time, pal);
  519. DEFINE_PROFILE_INTERVAL(pal_relocation_time, pal);
  520. DEFINE_PROFILE_INTERVAL(pal_linking_time, pal);
  521. DEFINE_PROFILE_INTERVAL(pal_manifest_loading_time, pal);
  522. DEFINE_PROFILE_INTERVAL(pal_allocation_time, pal);
  523. DEFINE_PROFILE_INTERVAL(pal_tail_startup_time, pal);
  524. DEFINE_PROFILE_INTERVAL(pal_child_creation_time, pal);
  525. DEFINE_PROFILE_CATEGORY(init, );
  526. DEFINE_PROFILE_INTERVAL(init_vma, init);
  527. DEFINE_PROFILE_INTERVAL(init_slab, init);
  528. DEFINE_PROFILE_INTERVAL(init_str_mgr, init);
  529. DEFINE_PROFILE_INTERVAL(init_internal_map, init);
  530. DEFINE_PROFILE_INTERVAL(init_rlimit, init);
  531. DEFINE_PROFILE_INTERVAL(init_fs, init);
  532. DEFINE_PROFILE_INTERVAL(init_dcache, init);
  533. DEFINE_PROFILE_INTERVAL(init_handle, init);
  534. DEFINE_PROFILE_INTERVAL(read_from_checkpoint, init);
  535. DEFINE_PROFILE_INTERVAL(read_from_file, init);
  536. DEFINE_PROFILE_INTERVAL(init_newproc, init);
  537. DEFINE_PROFILE_INTERVAL(init_mount_root, init);
  538. DEFINE_PROFILE_INTERVAL(init_from_checkpoint_file, init);
  539. DEFINE_PROFILE_INTERVAL(restore_from_file, init);
  540. DEFINE_PROFILE_INTERVAL(init_manifest, init);
  541. DEFINE_PROFILE_INTERVAL(init_ipc, init);
  542. DEFINE_PROFILE_INTERVAL(init_thread, init);
  543. DEFINE_PROFILE_INTERVAL(init_important_handles, init);
  544. DEFINE_PROFILE_INTERVAL(init_mount, init);
  545. DEFINE_PROFILE_INTERVAL(init_async, init);
  546. DEFINE_PROFILE_INTERVAL(init_stack, init);
  547. DEFINE_PROFILE_INTERVAL(read_environs, init);
  548. DEFINE_PROFILE_INTERVAL(init_loader, init);
  549. DEFINE_PROFILE_INTERVAL(init_ipc_helper, init);
  550. DEFINE_PROFILE_INTERVAL(init_signal, init);
  551. #define CALL_INIT(func, args ...) func(args)
  552. #define RUN_INIT(func, ...) \
  553. do { \
  554. int _err = CALL_INIT(func, ##__VA_ARGS__); \
  555. if (_err < 0) { \
  556. SYS_PRINTF("shim_init() in " #func " (%d)\n", _err); \
  557. shim_terminate(_err); \
  558. } \
  559. SAVE_PROFILE_INTERVAL(func); \
  560. } while (0)
  561. extern PAL_HANDLE thread_start_event;
  562. noreturn void* shim_init (int argc, void * args)
  563. {
  564. debug_handle = PAL_CB(debug_stream);
  565. cur_process.vmid = (IDTYPE) PAL_CB(process_id);
  566. /* create the initial TCB, shim can not be run without a tcb */
  567. __libc_tcb_t tcb;
  568. memset(&tcb, 0, sizeof(__libc_tcb_t));
  569. allocate_tls(&tcb, false, NULL);
  570. __disable_preempt(&tcb.shim_tcb); // Temporarily disable preemption for delaying any signal
  571. // that arrives during initialization
  572. debug_setbuf(&tcb.shim_tcb, true);
  573. debug("set tcb to %p\n", &tcb);
  574. #ifdef PROFILE
  575. unsigned long begin_time = GET_PROFILE_INTERVAL();
  576. #endif
  577. debug("host: %s\n", PAL_CB(host_type));
  578. DkSetExceptionHandler(&handle_failure, PAL_EVENT_FAILURE);
  579. g_pal_alloc_align = PAL_CB(alloc_align);
  580. if (!IS_POWER_OF_2(g_pal_alloc_align)) {
  581. SYS_PRINTF("shim_init(): error: PAL allocation alignment not a power of 2\n");
  582. shim_terminate(-EINVAL);
  583. }
  584. create_lock(&__master_lock);
  585. int * argcp = &argc;
  586. const char ** argv, ** envp, ** argp = NULL;
  587. elf_auxv_t * auxp;
  588. /* call to figure out where the arguments are */
  589. FIND_ARG_COMPONENTS(args, argc, argv, envp, auxp);
  590. #ifdef PROFILE
  591. set_profile_enabled(envp);
  592. #endif
  593. struct newproc_header hdr;
  594. void * cpaddr = NULL;
  595. #ifdef PROFILE
  596. unsigned long begin_create_time = 0;
  597. #endif
  598. BEGIN_PROFILE_INTERVAL();
  599. RUN_INIT(init_vma);
  600. RUN_INIT(init_slab);
  601. RUN_INIT(read_environs, envp);
  602. RUN_INIT(init_str_mgr);
  603. RUN_INIT(init_internal_map);
  604. RUN_INIT(init_rlimit);
  605. RUN_INIT(init_fs);
  606. RUN_INIT(init_dcache);
  607. RUN_INIT(init_handle);
  608. debug("shim loaded at %p, ready to initialize\n", &__load_address);
  609. if (argc && argv[0][0] == '-') {
  610. if (!strcmp_static(argv[0], "-resume") && argc >= 2) {
  611. const char * filename = *(argv + 1);
  612. argc -= 2;
  613. argv += 2;
  614. RUN_INIT(init_mount_root);
  615. RUN_INIT(init_from_checkpoint_file, filename, &hdr.checkpoint,
  616. &cpaddr);
  617. }
  618. }
  619. if (!cpaddr && PAL_CB(parent_process)) {
  620. RUN_INIT(init_newproc, &hdr);
  621. SAVE_PROFILE_INTERVAL_SET(child_created_in_new_process,
  622. hdr.create_time, begin_time);
  623. #ifdef PROFILE
  624. begin_create_time = hdr.begin_create_time;
  625. #endif
  626. if (hdr.checkpoint.hdr.size)
  627. RUN_INIT(do_migration, &hdr.checkpoint, &cpaddr);
  628. }
  629. if (cpaddr) {
  630. thread_start_event = DkNotificationEventCreate(PAL_FALSE);
  631. RUN_INIT(restore_checkpoint,
  632. &hdr.checkpoint.hdr, &hdr.checkpoint.mem,
  633. (ptr_t) cpaddr, 0);
  634. }
  635. if (PAL_CB(manifest_handle))
  636. RUN_INIT(init_manifest, PAL_CB(manifest_handle));
  637. RUN_INIT(init_mount_root);
  638. RUN_INIT(init_ipc);
  639. RUN_INIT(init_thread);
  640. RUN_INIT(init_mount);
  641. RUN_INIT(init_important_handles);
  642. RUN_INIT(init_async);
  643. RUN_INIT(init_stack, argv, envp, &argcp, &argp, &auxp, 0);
  644. RUN_INIT(init_loader);
  645. RUN_INIT(init_ipc_helper);
  646. RUN_INIT(init_signal);
  647. if (PAL_CB(parent_process)) {
  648. /* Notify the parent process */
  649. struct newproc_response res;
  650. res.child_vmid = cur_process.vmid;
  651. res.failure = 0;
  652. if (!DkStreamWrite(PAL_CB(parent_process), 0,
  653. sizeof(struct newproc_response),
  654. &res, NULL))
  655. shim_do_exit(-PAL_ERRNO);
  656. }
  657. debug("shim process initialized\n");
  658. #ifdef PROFILE
  659. if (begin_create_time)
  660. SAVE_PROFILE_INTERVAL_SINCE(child_total_migration_time,
  661. begin_create_time);
  662. #endif
  663. SAVE_PROFILE_INTERVAL_SET(pal_startup_time, 0, pal_control.startup_time);
  664. SAVE_PROFILE_INTERVAL_SET(pal_host_specific_startup_time, 0,
  665. pal_control.host_specific_startup_time);
  666. SAVE_PROFILE_INTERVAL_SET(pal_relocation_time, 0,
  667. pal_control.relocation_time);
  668. SAVE_PROFILE_INTERVAL_SET(pal_linking_time, 0, pal_control.linking_time);
  669. SAVE_PROFILE_INTERVAL_SET(pal_manifest_loading_time, 0,
  670. pal_control.manifest_loading_time);
  671. SAVE_PROFILE_INTERVAL_SET(pal_allocation_time, 0,
  672. pal_control.allocation_time);
  673. SAVE_PROFILE_INTERVAL_SET(pal_tail_startup_time, 0,
  674. pal_control.tail_startup_time);
  675. SAVE_PROFILE_INTERVAL_SET(pal_child_creation_time, 0,
  676. pal_control.child_creation_time);
  677. if (thread_start_event)
  678. DkEventSet(thread_start_event);
  679. shim_tcb_t * cur_tcb = shim_get_tls();
  680. struct shim_thread * cur_thread = (struct shim_thread *) cur_tcb->tp;
  681. if (cur_tcb->context.regs && cur_tcb->context.regs->rsp) {
  682. vdso_map_migrate();
  683. restore_context(&cur_tcb->context);
  684. }
  685. if (cur_thread->exec)
  686. execute_elf_object(cur_thread->exec, argcp, argp, auxp);
  687. shim_do_exit(0);
  688. }
  689. static int create_unique (int (*mkname) (char *, size_t, void *),
  690. int (*create) (const char *, void *),
  691. int (*output) (char *, size_t, const void *,
  692. struct shim_qstr *),
  693. char * name, size_t size, void * id, void * obj,
  694. struct shim_qstr * qstr)
  695. {
  696. int ret, len;
  697. while (1) {
  698. len = mkname(name, size, id);
  699. if (len < 0)
  700. return len;
  701. if ((ret = create(name, obj)) < 0)
  702. return ret;
  703. if (ret)
  704. continue;
  705. if (output)
  706. return output(name, size, id, qstr);
  707. if (qstr)
  708. qstrsetstr(qstr, name, len);
  709. return len;
  710. }
  711. }
  712. static int name_pipe_rand (char * uri, size_t size, void * id)
  713. {
  714. IDTYPE pipeid;
  715. size_t len;
  716. int ret = DkRandomBitsRead(&pipeid, sizeof(pipeid));
  717. if (ret < 0)
  718. return -convert_pal_errno(-ret);
  719. debug("creating pipe: pipe.srv:%u\n", pipeid);
  720. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) >= size)
  721. return -ERANGE;
  722. *((IDTYPE *)id) = pipeid;
  723. return len;
  724. }
  725. static int name_pipe_vmid (char * uri, size_t size, void * id)
  726. {
  727. IDTYPE pipeid = cur_process.vmid;
  728. size_t len;
  729. debug("creating pipe: pipe.srv:%u\n", pipeid);
  730. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) >= size)
  731. return -ERANGE;
  732. *((IDTYPE *)id) = pipeid;
  733. return len;
  734. }
  735. static int open_pipe (const char * uri, void * obj)
  736. {
  737. PAL_HANDLE pipe = DkStreamOpen(uri, 0, 0, 0, 0);
  738. if (!pipe)
  739. return PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST ? 1 :
  740. -PAL_ERRNO;
  741. if (obj)
  742. *((PAL_HANDLE *) obj) = pipe;
  743. else
  744. DkObjectClose(pipe);
  745. return 0;
  746. }
  747. static int pipe_addr (char * uri, size_t size, const void * id,
  748. struct shim_qstr * qstr)
  749. {
  750. IDTYPE pipeid = *((IDTYPE *) id);
  751. size_t len;
  752. if ((len = snprintf(uri, size, "pipe:%u", pipeid)) == size)
  753. return -ERANGE;
  754. if (qstr)
  755. qstrsetstr(qstr, uri, len);
  756. return len;
  757. }
  758. int create_pipe (IDTYPE * id, char * uri, size_t size, PAL_HANDLE * hdl,
  759. struct shim_qstr * qstr, bool use_vmid_for_name)
  760. {
  761. IDTYPE pipeid;
  762. int ret;
  763. if (use_vmid_for_name)
  764. ret = create_unique(&name_pipe_vmid, &open_pipe, &pipe_addr,
  765. uri, size, &pipeid, hdl, qstr);
  766. else
  767. ret = create_unique(&name_pipe_rand, &open_pipe, &pipe_addr,
  768. uri, size, &pipeid, hdl, qstr);
  769. if (ret > 0 && id)
  770. *id = pipeid;
  771. return ret;
  772. }
  773. static int name_path (char * path, size_t size, void * id)
  774. {
  775. unsigned int suffix;
  776. int prefix_len = strlen(path);
  777. size_t len;
  778. int ret = DkRandomBitsRead(&suffix, sizeof(suffix));
  779. if (ret < 0)
  780. return -convert_pal_errno(-ret);
  781. len = snprintf(path + prefix_len, size - prefix_len, "%08x", suffix);
  782. if (len == size)
  783. return -ERANGE;
  784. *((unsigned int *) id) = suffix;
  785. return prefix_len + len;
  786. }
  787. static int open_dir (const char * path, void * obj)
  788. {
  789. struct shim_handle * dir = NULL;
  790. if (obj) {
  791. dir = get_new_handle();
  792. if (!dir)
  793. return -ENOMEM;
  794. }
  795. int ret = open_namei(dir, NULL, path, O_CREAT|O_EXCL|O_DIRECTORY, 0700,
  796. NULL);
  797. if (ret < 0)
  798. return ret = -EEXIST ? 1 : ret;
  799. if (obj)
  800. *((struct shim_handle **) obj) = dir;
  801. return 0;
  802. }
  803. static int open_file (const char * path, void * obj)
  804. {
  805. struct shim_handle * file = NULL;
  806. if (obj) {
  807. file = get_new_handle();
  808. if (!file)
  809. return -ENOMEM;
  810. }
  811. int ret = open_namei(file, NULL, path, O_CREAT|O_EXCL|O_RDWR, 0600,
  812. NULL);
  813. if (ret < 0)
  814. return ret = -EEXIST ? 1 : ret;
  815. if (obj)
  816. *((struct shim_handle **) obj) = file;
  817. return 0;
  818. }
  819. static int open_pal_handle (const char * uri, void * obj)
  820. {
  821. PAL_HANDLE hdl;
  822. if (strstartswith_static(uri, "dev:"))
  823. hdl = DkStreamOpen(uri, 0,
  824. PAL_SHARE_OWNER_X|PAL_SHARE_OWNER_W|
  825. PAL_SHARE_OWNER_R,
  826. PAL_CREATE_TRY|PAL_CREATE_ALWAYS,
  827. 0);
  828. else
  829. hdl = DkStreamOpen(uri, PAL_ACCESS_RDWR,
  830. PAL_SHARE_OWNER_W|PAL_SHARE_OWNER_R,
  831. PAL_CREATE_TRY|PAL_CREATE_ALWAYS,
  832. 0);
  833. if (!hdl) {
  834. if (PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST)
  835. return 0;
  836. else
  837. return -PAL_ERRNO;
  838. }
  839. if (obj) {
  840. *((PAL_HANDLE *) obj) = hdl;
  841. } else {
  842. DkObjectClose(hdl);
  843. }
  844. return 0;
  845. }
  846. static int output_path (char * path, size_t size, const void * id,
  847. struct shim_qstr * qstr)
  848. {
  849. size_t len = strlen(path);
  850. // API compatibility
  851. __UNUSED(size);
  852. __UNUSED(id);
  853. if (qstr)
  854. qstrsetstr(qstr, path, len);
  855. return len;
  856. }
  857. int create_dir (const char * prefix, char * path, size_t size,
  858. struct shim_handle ** hdl)
  859. {
  860. unsigned int suffix;
  861. if (prefix) {
  862. size_t len = strlen(prefix);
  863. if (len >= size)
  864. return -ERANGE;
  865. memcpy(path, prefix, len + 1);
  866. }
  867. return create_unique(&name_path, &open_dir, &output_path, path, size,
  868. &suffix, hdl, NULL);
  869. }
  870. int create_file (const char * prefix, char * path, size_t size,
  871. struct shim_handle ** hdl)
  872. {
  873. unsigned int suffix;
  874. if (prefix) {
  875. size_t len = strlen(prefix);
  876. if (len >= size)
  877. return -ERANGE;
  878. memcpy(path, prefix, len + 1);
  879. }
  880. return create_unique(&name_path, &open_file, &output_path, path, size,
  881. &suffix, hdl, NULL);
  882. }
  883. int create_handle (const char * prefix, char * uri, size_t size,
  884. PAL_HANDLE * hdl, unsigned int * id)
  885. {
  886. unsigned int suffix;
  887. if (prefix) {
  888. size_t len = strlen(prefix);
  889. if (len >= size)
  890. return -ERANGE;
  891. memcpy(uri, prefix, len + 1);
  892. }
  893. return create_unique(&name_path, &open_pal_handle, &output_path, uri, size,
  894. id ? : &suffix, hdl, NULL);
  895. }
  896. void check_stack_hook (void)
  897. {
  898. struct shim_thread * cur_thread = get_cur_thread();
  899. void * rsp;
  900. __asm__ volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  901. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack) {
  902. if ((uintptr_t) rsp - (uintptr_t) cur_thread->stack < PAL_CB(pagesize))
  903. SYS_PRINTF("*** stack is almost drained (RSP = %p, stack = %p-%p) ***\n",
  904. rsp, cur_thread->stack, cur_thread->stack_top);
  905. } else {
  906. SYS_PRINTF("*** context dismatched with thread stack (RSP = %p, stack = %p-%p) ***\n",
  907. rsp, cur_thread->stack, cur_thread->stack_top);
  908. }
  909. }
  910. #ifdef PROFILE
  911. static void print_profile_result (PAL_HANDLE hdl, struct shim_profile * root,
  912. int level)
  913. {
  914. unsigned long total_interval_time = 0;
  915. unsigned long total_interval_count = 0;
  916. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  917. struct shim_profile * profile = &PROFILES[i];
  918. if (profile->root != root || profile->disabled)
  919. continue;
  920. switch (profile->type) {
  921. case OCCURENCE: {
  922. unsigned int count =
  923. atomic_read(&profile->val.occurence.count);
  924. if (count) {
  925. for (int j = 0 ; j < level ; j++)
  926. __SYS_FPRINTF(hdl, " ");
  927. __SYS_FPRINTF(hdl, "- %s: %u times\n", profile->name, count);
  928. }
  929. break;
  930. }
  931. case INTERVAL: {
  932. unsigned int count =
  933. atomic_read(&profile->val.interval.count);
  934. if (count) {
  935. unsigned long time =
  936. atomic_read(&profile->val.interval.time);
  937. unsigned long ind_time = time / count;
  938. total_interval_time += time;
  939. total_interval_count += count;
  940. for (int j = 0 ; j < level ; j++)
  941. __SYS_FPRINTF(hdl, " ");
  942. __SYS_FPRINTF(hdl, "- (%11.11lu) %s: %u times, %lu msec\n",
  943. time, profile->name, count, ind_time);
  944. }
  945. break;
  946. }
  947. case CATEGORY:
  948. for (int j = 0 ; j < level ; j++)
  949. __SYS_FPRINTF(hdl, " ");
  950. __SYS_FPRINTF(hdl, "- %s:\n", profile->name);
  951. print_profile_result(hdl, profile, level + 1);
  952. break;
  953. }
  954. }
  955. if (total_interval_count) {
  956. __SYS_FPRINTF(hdl, " - (%11.11lu) total: %lu times, %lu msec\n",
  957. total_interval_time, total_interval_count,
  958. total_interval_time / total_interval_count);
  959. }
  960. }
  961. #endif /* PROFILE */
  962. static struct atomic_int in_terminate = { .counter = 0, };
  963. noreturn void shim_terminate (int err)
  964. {
  965. debug("teminating the whole process (%d)\n", err);
  966. /* do last clean-up of the process */
  967. shim_clean(err);
  968. DkProcessExit(err);
  969. }
  970. /* cleanup and terminate process, preserve exit code if err == 0 */
  971. int shim_clean (int err)
  972. {
  973. /* preventing multiple cleanup, this is mostly caused by
  974. assertion in shim_clean */
  975. if (atomic_inc_return(&in_terminate) > 1)
  976. return 0;
  977. if (err != 0)
  978. cur_process.exit_code = err;
  979. store_all_msg_persist();
  980. #ifdef PROFILE
  981. if (ENTER_TIME) {
  982. switch (shim_get_tls()->context.orig_rax) {
  983. case __NR_exit_group:
  984. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit_group, ENTER_TIME);
  985. break;
  986. case __NR_exit:
  987. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit, ENTER_TIME);
  988. break;
  989. }
  990. }
  991. if (ipc_cld_profile_send()) {
  992. MASTER_LOCK();
  993. PAL_HANDLE hdl = __open_shim_stdio();
  994. if (hdl) {
  995. __SYS_FPRINTF(hdl, "******************************\n");
  996. __SYS_FPRINTF(hdl, "profiling:\n");
  997. print_profile_result(hdl, &profile_root, 0);
  998. __SYS_FPRINTF(hdl, "******************************\n");
  999. }
  1000. MASTER_UNLOCK();
  1001. DkObjectClose(hdl);
  1002. }
  1003. #endif
  1004. del_all_ipc_ports();
  1005. if (shim_stdio && shim_stdio != (PAL_HANDLE) -1)
  1006. DkObjectClose(shim_stdio);
  1007. shim_stdio = NULL;
  1008. debug("process %u exited with status %d\n", cur_process.vmid & 0xFFFF, cur_process.exit_code);
  1009. MASTER_LOCK();
  1010. DkProcessExit(cur_process.exit_code);
  1011. return 0;
  1012. }
  1013. int message_confirm (const char * message, const char * options)
  1014. {
  1015. char answer;
  1016. int noptions = strlen(options);
  1017. char * option_str = __alloca(noptions * 2 + 3), * str = option_str;
  1018. int ret = 0;
  1019. *(str++) = ' ';
  1020. *(str++) = '[';
  1021. for (int i = 0 ; i < noptions ; i++) {
  1022. *(str++) = options[i];
  1023. *(str++) = '/';
  1024. }
  1025. str--;
  1026. *(str++) = ']';
  1027. *(str++) = ' ';
  1028. MASTER_LOCK();
  1029. PAL_HANDLE hdl = __open_shim_stdio();
  1030. if (!hdl) {
  1031. MASTER_UNLOCK();
  1032. return -EACCES;
  1033. }
  1034. #define WRITE(buf, len) \
  1035. ({ int _ret = DkStreamWrite(hdl, 0, len, (void*)(buf), NULL); \
  1036. _ret ? : -PAL_ERRNO; })
  1037. #define READ(buf, len) \
  1038. ({ int _ret = DkStreamRead(hdl, 0, len, buf, NULL, 0); \
  1039. _ret ? : -PAL_ERRNO; })
  1040. if ((ret = WRITE(message, strlen(message))) < 0)
  1041. goto out;
  1042. if ((ret = WRITE(option_str, noptions * 2 + 3)) < 0)
  1043. goto out;
  1044. if ((ret = READ(&answer, 1)) < 0)
  1045. goto out;
  1046. out:
  1047. DkObjectClose(hdl);
  1048. MASTER_UNLOCK();
  1049. return (ret < 0) ? ret : answer;
  1050. }