shim_sigaction.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_sigaction.c
  15. *
  16. * Implementation of system call "sigaction", "sigreturn", "sigprocmask",
  17. * "kill", "tkill" and "tgkill".
  18. */
  19. #include <errno.h>
  20. #include <stddef.h> // FIXME(mkow): Without this we get:
  21. // asm/signal.h:126:2: error: unknown type name ‘size_t’
  22. // It definitely shouldn't behave like this...
  23. #include <linux/signal.h>
  24. #include <pal.h>
  25. #include <pal_error.h>
  26. #include <shim_internal.h>
  27. #include <shim_ipc.h>
  28. #include <shim_profile.h>
  29. #include <shim_table.h>
  30. #include <shim_thread.h>
  31. #include <shim_utils.h>
  32. int shim_do_sigaction(int signum, const struct __kernel_sigaction* act,
  33. struct __kernel_sigaction* oldact, size_t sigsetsize) {
  34. /* SIGKILL and SIGSTOP cannot be caught or ignored */
  35. if (signum == SIGKILL || signum == SIGSTOP || signum <= 0 || signum > NUM_SIGS ||
  36. sigsetsize != sizeof(__sigset_t))
  37. return -EINVAL;
  38. if (act && test_user_memory((void*)act, sizeof(*act), false))
  39. return -EFAULT;
  40. if (oldact && test_user_memory(oldact, sizeof(*oldact), false))
  41. return -EFAULT;
  42. struct shim_thread* cur = get_cur_thread();
  43. int err = 0;
  44. assert(!act || (void*)act->k_sa_handler != (void*)0x11);
  45. struct shim_signal_handle* sighdl = &cur->signal_handles[signum - 1];
  46. lock(&cur->lock);
  47. if (oldact) {
  48. if (sighdl->action) {
  49. memcpy(oldact, sighdl->action, sizeof(struct __kernel_sigaction));
  50. } else {
  51. memset(oldact, 0, sizeof(struct __kernel_sigaction));
  52. oldact->k_sa_handler = SIG_DFL;
  53. }
  54. }
  55. if (act) {
  56. if (!(sighdl->action))
  57. sighdl->action = malloc(sizeof(struct __kernel_sigaction));
  58. if (!(sighdl->action)) {
  59. err = -ENOMEM;
  60. goto out;
  61. }
  62. memcpy(sighdl->action, act, sizeof(struct __kernel_sigaction));
  63. }
  64. err = 0;
  65. out:
  66. unlock(&cur->lock);
  67. return err;
  68. }
  69. int shim_do_sigreturn(int __unused) {
  70. __UNUSED(__unused);
  71. /* do nothing */
  72. return 0;
  73. }
  74. int shim_do_sigprocmask(int how, const __sigset_t* set, __sigset_t* oldset) {
  75. __sigset_t* old;
  76. __sigset_t tmp;
  77. __sigset_t set_tmp;
  78. if (how != SIG_BLOCK && how != SIG_UNBLOCK && how != SIG_SETMASK)
  79. return -EINVAL;
  80. if (set && test_user_memory((void*)set, sizeof(*set), false))
  81. return -EFAULT;
  82. if (oldset && test_user_memory(oldset, sizeof(*oldset), false))
  83. return -EFAULT;
  84. struct shim_thread* cur = get_cur_thread();
  85. int err = 0;
  86. lock(&cur->lock);
  87. old = get_sig_mask(cur);
  88. if (oldset) {
  89. memcpy(&tmp, old, sizeof(__sigset_t));
  90. old = &tmp;
  91. }
  92. /* if set is NULL, then the signal mask is unchanged, but the current
  93. value of the signal mask is nevertheless returned in oldset */
  94. if (!set)
  95. goto out;
  96. memcpy(&set_tmp, old, sizeof(__sigset_t));
  97. switch (how) {
  98. case SIG_BLOCK:
  99. __sigorset(&set_tmp, &set_tmp, set);
  100. break;
  101. case SIG_UNBLOCK:
  102. __signotset(&set_tmp, &set_tmp, set);
  103. break;
  104. case SIG_SETMASK:
  105. memcpy(&set_tmp, set, sizeof(__sigset_t));
  106. break;
  107. }
  108. set_sig_mask(cur, &set_tmp);
  109. out:
  110. unlock(&cur->lock);
  111. if (!err && oldset)
  112. memcpy(oldset, old, sizeof(__sigset_t));
  113. return err;
  114. }
  115. int shim_do_sigaltstack(const stack_t* ss, stack_t* oss) {
  116. if (ss && (ss->ss_flags & ~SS_DISABLE))
  117. return -EINVAL;
  118. struct shim_thread* cur = get_cur_thread();
  119. lock(&cur->lock);
  120. stack_t* cur_ss = &cur->signal_altstack;
  121. if (oss)
  122. *oss = *cur_ss;
  123. void* sp = (void*)shim_get_tcb()->context.regs->rsp;
  124. /* check if thread is currently executing on an active altstack */
  125. if (!(cur_ss->ss_flags & SS_DISABLE) && sp && cur_ss->ss_sp <= sp &&
  126. sp < cur_ss->ss_sp + cur_ss->ss_size) {
  127. if (oss)
  128. oss->ss_flags |= SS_ONSTACK;
  129. if (ss) {
  130. unlock(&cur->lock);
  131. return -EPERM;
  132. }
  133. }
  134. if (ss) {
  135. if (ss->ss_flags & SS_DISABLE) {
  136. memset(cur_ss, 0, sizeof(*cur_ss));
  137. cur_ss->ss_flags = SS_DISABLE;
  138. } else {
  139. if (ss->ss_size < MINSIGSTKSZ) {
  140. unlock(&cur->lock);
  141. return -ENOMEM;
  142. }
  143. *cur_ss = *ss;
  144. }
  145. }
  146. unlock(&cur->lock);
  147. return 0;
  148. }
  149. int shim_do_sigsuspend(const __sigset_t* mask) {
  150. if (!mask || test_user_memory((void*)mask, sizeof(*mask), false))
  151. return -EFAULT;
  152. __sigset_t* old;
  153. __sigset_t tmp;
  154. struct shim_thread* cur = get_cur_thread();
  155. lock(&cur->lock);
  156. /* return immediately on some pending unblocked signal */
  157. for (int sig = 1; sig <= NUM_SIGS; sig++) {
  158. if (signal_logs_pending(cur->signal_logs, sig)) {
  159. /* at least one signal of type sig... */
  160. if (!__sigismember(mask, sig)) {
  161. /* ...and this type is not blocked in supplied mask */
  162. unlock(&cur->lock);
  163. return -EINTR;
  164. }
  165. }
  166. }
  167. old = get_sig_mask(cur);
  168. memcpy(&tmp, old, sizeof(__sigset_t));
  169. old = &tmp;
  170. set_sig_mask(cur, mask);
  171. cur->suspend_on_signal = true;
  172. unlock(&cur->lock);
  173. thread_setwait(NULL, NULL);
  174. thread_sleep(NO_TIMEOUT);
  175. lock(&cur->lock);
  176. set_sig_mask(cur, old);
  177. unlock(&cur->lock);
  178. return -EINTR;
  179. }
  180. int shim_do_sigpending(__sigset_t* set, size_t sigsetsize) {
  181. if (sigsetsize != sizeof(*set))
  182. return -EINVAL;
  183. if (!set || test_user_memory(set, sigsetsize, false))
  184. return -EFAULT;
  185. struct shim_thread* cur = get_cur_thread();
  186. __sigemptyset(set);
  187. if (!cur->signal_logs)
  188. return 0;
  189. for (int sig = 1; sig <= NUM_SIGS; sig++) {
  190. if (signal_logs_pending(cur->signal_logs, sig))
  191. __sigaddset(set, sig);
  192. }
  193. return 0;
  194. }
  195. struct walk_arg {
  196. struct shim_thread* current;
  197. IDTYPE sender;
  198. IDTYPE id;
  199. int sig;
  200. bool use_ipc;
  201. };
  202. // Need to hold thread->lock
  203. static inline void __append_signal(struct shim_thread* thread, int sig, IDTYPE sender) {
  204. assert(locked(&thread->lock));
  205. debug("Thread %d killed by signal %d\n", thread->tid, sig);
  206. siginfo_t info;
  207. memset(&info, 0, sizeof(siginfo_t));
  208. info.si_signo = sig;
  209. info.si_pid = sender;
  210. append_signal(thread, sig, &info, true);
  211. }
  212. static int __kill_proc(struct shim_thread* thread, void* arg, bool* unlocked) {
  213. struct walk_arg* warg = (struct walk_arg*)arg;
  214. int srched = 0;
  215. if (!warg->use_ipc && !thread->in_vm)
  216. return 0;
  217. if (thread->tgid != warg->id)
  218. return 0;
  219. if (warg->current == thread)
  220. return 1;
  221. /* DEP: Let's do a racy read of is_alive and in_vm.
  222. * If either of these are zero it is a stable condition,
  223. * and we can elide the lock acquire (which helps perf).
  224. */
  225. if (!thread->is_alive)
  226. goto out;
  227. if (!thread->in_vm) {
  228. unlock(&thread_list_lock);
  229. *unlocked = true;
  230. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS, warg->sig)) ? 1 : 0;
  231. } else {
  232. lock(&thread->lock);
  233. if (!thread->is_alive)
  234. goto out_locked;
  235. if (thread->in_vm) {
  236. if (warg->sig > 0)
  237. __append_signal(thread, warg->sig, warg->sender);
  238. srched = 1;
  239. } else {
  240. /* This double-check case is probably unnecessary, but keep it for now */
  241. unlock(&thread->lock);
  242. unlock(&thread_list_lock);
  243. *unlocked = true;
  244. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS, warg->sig)) ? 1 : 0;
  245. }
  246. }
  247. out_locked:
  248. unlock(&thread->lock);
  249. out:
  250. return srched;
  251. }
  252. static int __kill_proc_simple(struct shim_simple_thread* sthread, void* arg, bool* unlocked) {
  253. struct walk_arg* warg = (struct walk_arg*)arg;
  254. int srched = 0;
  255. if (sthread->tgid != warg->id)
  256. return 0;
  257. lock(&sthread->lock);
  258. if (sthread->is_alive) {
  259. unlock(&sthread->lock);
  260. unlock(&thread_list_lock);
  261. *unlocked = true;
  262. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS, warg->sig)) ? 1 : 0;
  263. }
  264. unlock(&sthread->lock);
  265. return srched;
  266. }
  267. int do_kill_proc(IDTYPE sender, IDTYPE tgid, int sig, bool use_ipc) {
  268. struct shim_thread* cur = get_cur_thread();
  269. if (!tgid) {
  270. /* DEP: cur->tgid never changes. No lock needed */
  271. tgid = cur->tgid;
  272. }
  273. struct walk_arg arg;
  274. arg.current = cur;
  275. arg.sender = sender;
  276. arg.id = tgid;
  277. arg.sig = sig;
  278. arg.use_ipc = use_ipc;
  279. bool srched = false;
  280. if (!walk_thread_list(__kill_proc, &arg))
  281. srched = true;
  282. if (!use_ipc || srched)
  283. goto out;
  284. if (!walk_simple_thread_list(__kill_proc_simple, &arg))
  285. srched = true;
  286. if (!srched && !ipc_pid_kill_send(sender, tgid, KILL_PROCESS, sig))
  287. srched = true;
  288. out:
  289. return srched ? 0 : -ESRCH;
  290. }
  291. static int __kill_pgroup(struct shim_thread* thread, void* arg, bool* unlocked) {
  292. struct walk_arg* warg = (struct walk_arg*)arg;
  293. int srched = 0;
  294. if (!warg->use_ipc && !thread->in_vm)
  295. return 0;
  296. if (thread->pgid != warg->id)
  297. return 0;
  298. if (warg->current == thread)
  299. return 1;
  300. lock(&thread->lock);
  301. if (!thread->is_alive)
  302. goto out;
  303. if (thread->in_vm) {
  304. if (warg->sig > 0)
  305. __append_signal(thread, warg->sig, warg->sender);
  306. srched = 1;
  307. } else {
  308. unlock(&thread->lock);
  309. unlock(&thread_list_lock);
  310. *unlocked = true;
  311. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PGROUP, warg->sig)) ? 1 : 0;
  312. }
  313. out:
  314. unlock(&thread->lock);
  315. return srched;
  316. }
  317. static int __kill_pgroup_simple(struct shim_simple_thread* sthread, void* arg, bool* unlocked) {
  318. struct walk_arg* warg = (struct walk_arg*)arg;
  319. int srched = 0;
  320. if (sthread->pgid != warg->id)
  321. return 0;
  322. lock(&sthread->lock);
  323. if (sthread->is_alive) {
  324. unlock(&sthread->lock);
  325. unlock(&thread_list_lock);
  326. *unlocked = true;
  327. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PGROUP, warg->sig)) ? 1 : 0;
  328. }
  329. unlock(&sthread->lock);
  330. return srched;
  331. }
  332. int do_kill_pgroup(IDTYPE sender, IDTYPE pgid, int sig, bool use_ipc) {
  333. struct shim_thread* cur = get_cur_thread();
  334. if (!pgid) {
  335. pgid = cur->pgid;
  336. }
  337. struct walk_arg arg;
  338. arg.current = cur;
  339. arg.sender = sender;
  340. arg.id = pgid;
  341. arg.sig = sig;
  342. arg.use_ipc = use_ipc;
  343. bool srched = false;
  344. if (!walk_thread_list(__kill_pgroup, &arg))
  345. srched = true;
  346. if (!use_ipc || srched)
  347. goto out;
  348. if (!walk_simple_thread_list(__kill_pgroup_simple, &arg))
  349. srched = true;
  350. if (!srched && !ipc_pid_kill_send(sender, pgid, KILL_PGROUP, sig))
  351. srched = true;
  352. out:
  353. return srched ? 0 : -ESRCH;
  354. }
  355. static int __kill_all_threads(struct shim_thread* thread, void* arg, bool* unlocked) {
  356. __UNUSED(unlocked); // Retained for API compatibility
  357. int srched = 0;
  358. struct walk_arg* warg = (struct walk_arg*)arg;
  359. if (thread->tgid != thread->tid)
  360. return 0;
  361. if (warg->current == thread)
  362. return 1;
  363. lock(&thread->lock);
  364. if (thread->in_vm) {
  365. __append_signal(thread, warg->sig, warg->sender);
  366. srched = 1;
  367. }
  368. unlock(&thread->lock);
  369. return srched;
  370. }
  371. int kill_all_threads(struct shim_thread* cur, IDTYPE sender, int sig) {
  372. struct walk_arg arg;
  373. arg.current = cur;
  374. arg.sender = sender;
  375. arg.id = 0;
  376. arg.sig = sig;
  377. arg.use_ipc = false;
  378. walk_thread_list(__kill_all_threads, &arg);
  379. return 0;
  380. }
  381. int shim_do_kill(pid_t pid, int sig) {
  382. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  383. if (sig < 0 || sig > NUM_SIGS)
  384. return -EINVAL;
  385. struct shim_thread* cur = get_cur_thread();
  386. int ret = 0;
  387. bool send_to_self = false;
  388. /* If pid equals 0, then sig is sent to every process in the process group
  389. of the calling process. */
  390. if (pid == 0) {
  391. ret = do_kill_pgroup(cur->tgid, 0, sig, true);
  392. send_to_self = true;
  393. }
  394. /* If pid equals -1, then sig is sent to every process for which the
  395. calling process has permission to send */
  396. else if (pid == -1) {
  397. ipc_pid_kill_send(cur->tid, /*target=*/0, KILL_ALL, sig);
  398. kill_all_threads(cur, cur->tid, sig);
  399. send_to_self = true;
  400. }
  401. /* If pid is positive, then signal sig is sent to the process with the ID
  402. specified by pid. */
  403. else if (pid > 0) {
  404. ret = do_kill_proc(cur->tid, pid, sig, true);
  405. send_to_self = (IDTYPE)pid == cur->tgid;
  406. }
  407. /* If pid is less than -1, then sig is sent to every process in the
  408. process group whose id is -pid */
  409. else {
  410. ret = do_kill_pgroup(cur->tid, -pid, sig, true);
  411. send_to_self = (IDTYPE)-pid == cur->pgid;
  412. }
  413. if (send_to_self) {
  414. if (ret == -ESRCH)
  415. ret = 0;
  416. if (sig) {
  417. siginfo_t info;
  418. memset(&info, 0, sizeof(siginfo_t));
  419. info.si_signo = sig;
  420. info.si_pid = cur->tid;
  421. deliver_signal(&info, NULL);
  422. }
  423. }
  424. return ret < 0 ? ret : 0;
  425. }
  426. int do_kill_thread(IDTYPE sender, IDTYPE tgid, IDTYPE tid, int sig, bool use_ipc) {
  427. if (sig < 0 || sig > NUM_SIGS)
  428. return -EINVAL;
  429. struct shim_thread* thread = lookup_thread(tid);
  430. int ret = -ESRCH;
  431. if (thread) {
  432. lock(&thread->lock);
  433. if (thread->in_vm) {
  434. if (!tgid || thread->tgid == tgid) {
  435. __append_signal(thread, sig, sender);
  436. ret = 0;
  437. }
  438. use_ipc = false;
  439. } else {
  440. use_ipc = true;
  441. }
  442. unlock(&thread->lock);
  443. put_thread(thread);
  444. }
  445. if (!use_ipc) {
  446. return ret;
  447. }
  448. return ipc_pid_kill_send(sender, tid, KILL_THREAD, sig);
  449. }
  450. int shim_do_tkill(pid_t tid, int sig) {
  451. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  452. if (tid <= 0)
  453. return -EINVAL;
  454. struct shim_thread* cur = get_cur_thread();
  455. if ((IDTYPE)tid == cur->tid) {
  456. if (sig) {
  457. siginfo_t info;
  458. memset(&info, 0, sizeof(siginfo_t));
  459. info.si_signo = sig;
  460. info.si_pid = cur->tid;
  461. deliver_signal(&info, NULL);
  462. }
  463. return 0;
  464. }
  465. return do_kill_thread(cur->tgid, 0, tid, sig, true);
  466. }
  467. int shim_do_tgkill(pid_t tgid, pid_t tid, int sig) {
  468. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  469. if (tgid < -1 || tgid == 0 || tid <= 0)
  470. return -EINVAL;
  471. if (tgid == -1)
  472. tgid = 0;
  473. struct shim_thread* cur = get_cur_thread();
  474. if ((IDTYPE)tid == cur->tid) {
  475. if (sig) {
  476. siginfo_t info;
  477. memset(&info, 0, sizeof(siginfo_t));
  478. info.si_signo = sig;
  479. info.si_pid = cur->tid;
  480. deliver_signal(&info, NULL);
  481. }
  482. return 0;
  483. }
  484. return do_kill_thread(cur->tgid, tgid, tid, sig, true);
  485. }