shim_sigaction.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_sigaction.c
  15. *
  16. * Implementation of system call "sigaction", "sigreturn", "sigprocmask",
  17. * "kill", "tkill" and "tgkill".
  18. */
  19. #include <shim_internal.h>
  20. #include <shim_utils.h>
  21. #include <shim_table.h>
  22. #include <shim_thread.h>
  23. #include <shim_ipc.h>
  24. #include <shim_profile.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. #include <errno.h>
  28. #include <linux/signal.h>
  29. int shim_do_sigaction (int signum, const struct __kernel_sigaction * act,
  30. struct __kernel_sigaction * oldact, size_t sigsetsize)
  31. {
  32. /* SIGKILL and SIGSTOP cannot be caught or ignored */
  33. if (signum == SIGKILL || signum == SIGSTOP ||
  34. signum <= 0 || signum > NUM_SIGS ||
  35. sigsetsize != sizeof(__sigset_t))
  36. return -EINVAL;
  37. if (act && test_user_memory((void *) act, sizeof(*act), false))
  38. return -EFAULT;
  39. if (oldact && test_user_memory(oldact, sizeof(*oldact), false))
  40. return -EFAULT;
  41. struct shim_thread * cur = get_cur_thread();
  42. int err = 0;
  43. assert(!act || (void *) act->k_sa_handler != (void *) 0x11);
  44. struct shim_signal_handle * sighdl = &cur->signal_handles[signum - 1];
  45. lock(&cur->lock);
  46. if (oldact) {
  47. if (sighdl->action) {
  48. memcpy(oldact, sighdl->action, sizeof(struct __kernel_sigaction));
  49. } else {
  50. memset(oldact, 0, sizeof(struct __kernel_sigaction));
  51. oldact->k_sa_handler = SIG_DFL;
  52. }
  53. }
  54. if (act) {
  55. if (!(sighdl->action))
  56. sighdl->action = malloc(sizeof(struct __kernel_sigaction));
  57. if (!(sighdl->action)) {
  58. err = -ENOMEM;
  59. goto out;
  60. }
  61. memcpy(sighdl->action, act, sizeof(struct __kernel_sigaction));
  62. }
  63. err = 0;
  64. out:
  65. unlock(&cur->lock);
  66. return err;
  67. }
  68. int shim_do_sigreturn (int __unused)
  69. {
  70. __UNUSED(__unused);
  71. /* do nothing */
  72. return 0;
  73. }
  74. int shim_do_sigprocmask (int how, const __sigset_t * set, __sigset_t * oldset)
  75. {
  76. __sigset_t * old, tmp, set_tmp;
  77. if (how != SIG_BLOCK && how != SIG_UNBLOCK &&
  78. how != SIG_SETMASK)
  79. return -EINVAL;
  80. if (set && test_user_memory((void *) set, sizeof(*set), false))
  81. return -EFAULT;
  82. if (oldset && test_user_memory(oldset, sizeof(*oldset), false))
  83. return -EFAULT;
  84. struct shim_thread * cur = get_cur_thread();
  85. int err = 0;
  86. lock(&cur->lock);
  87. old = get_sig_mask(cur);
  88. if (oldset) {
  89. memcpy(&tmp, old, sizeof(__sigset_t));
  90. old = &tmp;
  91. }
  92. /* if set is NULL, then the signal mask is unchanged, but the current
  93. value of the signal mask is nevertheless returned in oldset */
  94. if (!set)
  95. goto out;
  96. memcpy(&set_tmp, old, sizeof(__sigset_t));
  97. switch (how) {
  98. case SIG_BLOCK:
  99. __sigorset(&set_tmp, &set_tmp, set);
  100. break;
  101. case SIG_UNBLOCK:
  102. __signotset(&set_tmp, &set_tmp, set);
  103. break;
  104. case SIG_SETMASK:
  105. memcpy(&set_tmp, set, sizeof(__sigset_t));
  106. break;
  107. }
  108. set_sig_mask(cur, &set_tmp);
  109. out:
  110. unlock(&cur->lock);
  111. if (!err && oldset)
  112. memcpy(oldset, old, sizeof(__sigset_t));
  113. return err;
  114. }
  115. int shim_do_sigaltstack (const stack_t * ss, stack_t * oss)
  116. {
  117. if (ss && (ss->ss_flags & ~SS_DISABLE))
  118. return -EINVAL;
  119. struct shim_thread * cur = get_cur_thread();
  120. lock(&cur->lock);
  121. stack_t * cur_ss = &cur->signal_altstack;
  122. if (oss)
  123. *oss = *cur_ss;
  124. void * sp = (void *)shim_get_tls()->context.regs->rsp;
  125. /* check if thread is currently executing on an active altstack */
  126. if (!(cur_ss->ss_flags & SS_DISABLE) &&
  127. sp &&
  128. cur_ss->ss_sp <= sp &&
  129. sp < cur_ss->ss_sp + cur_ss->ss_size) {
  130. if (oss)
  131. oss->ss_flags |= SS_ONSTACK;
  132. if (ss) {
  133. unlock(&cur->lock);
  134. return -EPERM;
  135. }
  136. }
  137. if (ss) {
  138. if (ss->ss_flags & SS_DISABLE) {
  139. memset(cur_ss, 0, sizeof(*cur_ss));
  140. cur_ss->ss_flags = SS_DISABLE;
  141. } else {
  142. if (ss->ss_size < MINSIGSTKSZ) {
  143. unlock(&cur->lock);
  144. return -ENOMEM;
  145. }
  146. *cur_ss = *ss;
  147. }
  148. }
  149. unlock(&cur->lock);
  150. return 0;
  151. }
  152. int shim_do_sigsuspend (const __sigset_t * mask)
  153. {
  154. if (!mask || test_user_memory((void *) mask, sizeof(*mask), false))
  155. return -EFAULT;
  156. __sigset_t * old, tmp;
  157. struct shim_thread * cur = get_cur_thread();
  158. lock(&cur->lock);
  159. /* return immediately on some pending unblocked signal */
  160. for (int sig = 1 ; sig <= NUM_SIGS ; sig++) {
  161. if (atomic_read(&cur->signal_logs[sig - 1].head) !=
  162. atomic_read(&cur->signal_logs[sig - 1].tail)) {
  163. /* at least one signal of type sig... */
  164. if (!__sigismember(mask, sig)) {
  165. /* ...and this type is not blocked in supplied mask */
  166. unlock(&cur->lock);
  167. return -EINTR;
  168. }
  169. }
  170. }
  171. old = get_sig_mask(cur);
  172. memcpy(&tmp, old, sizeof(__sigset_t));
  173. old = &tmp;
  174. set_sig_mask(cur, mask);
  175. cur->suspend_on_signal = true;
  176. thread_setwait(NULL, NULL);
  177. thread_sleep(NO_TIMEOUT);
  178. unlock(&cur->lock);
  179. set_sig_mask(cur, old);
  180. return -EINTR;
  181. }
  182. int shim_do_sigpending (__sigset_t * set, size_t sigsetsize)
  183. {
  184. if (sigsetsize != sizeof(*set))
  185. return -EINVAL;
  186. if (!set || test_user_memory(set, sigsetsize, false))
  187. return -EFAULT;
  188. struct shim_thread * cur = get_cur_thread();
  189. __sigemptyset(set);
  190. if (!cur->signal_logs)
  191. return 0;
  192. for (int sig = 1 ; sig <= NUM_SIGS ; sig++) {
  193. if (atomic_read(&cur->signal_logs[sig - 1].head) !=
  194. atomic_read(&cur->signal_logs[sig - 1].tail))
  195. __sigaddset(set, sig);
  196. }
  197. return 0;
  198. }
  199. struct walk_arg {
  200. struct shim_thread * current;
  201. IDTYPE sender;
  202. IDTYPE id;
  203. int sig;
  204. bool use_ipc;
  205. };
  206. static inline void __append_signal (struct shim_thread * thread, int sig,
  207. IDTYPE sender)
  208. {
  209. debug("Thread %d killed by signal %d\n", thread->tid, sig);
  210. siginfo_t info;
  211. memset(&info, 0, sizeof(siginfo_t));
  212. info.si_signo = sig;
  213. info.si_pid = sender;
  214. append_signal(thread, sig, &info, true);
  215. }
  216. static int __kill_proc (struct shim_thread * thread, void * arg,
  217. bool * unlocked)
  218. {
  219. struct walk_arg * warg = (struct walk_arg *) arg;
  220. int srched = 0;
  221. if (!warg->use_ipc && !thread->in_vm)
  222. return 0;
  223. if (thread->tgid != warg->id)
  224. return 0;
  225. if (warg->current == thread)
  226. return 1;
  227. /* DEP: Let's do a racy read of is_alive and in_vm.
  228. * If either of these are zero it is a stable condition,
  229. * and we can elide the lock acquire (which helps perf).
  230. */
  231. if (!thread->is_alive)
  232. goto out;
  233. if (!thread->in_vm) {
  234. unlock(&thread_list_lock);
  235. *unlocked = true;
  236. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS,
  237. warg->sig)) ? 1 : 0;
  238. } else {
  239. lock(&thread->lock);
  240. if (!thread->is_alive)
  241. goto out_locked;
  242. if (thread->in_vm) {
  243. if (warg->sig > 0)
  244. __append_signal(thread, warg->sig, warg->sender);
  245. srched = 1;
  246. } else {
  247. /* This double-check case is probably unnecessary, but keep it for now */
  248. unlock(&thread->lock);
  249. unlock(&thread_list_lock);
  250. *unlocked = true;
  251. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS,
  252. warg->sig)) ? 1 : 0;
  253. }
  254. }
  255. out_locked:
  256. unlock(&thread->lock);
  257. out:
  258. return srched;
  259. }
  260. static int __kill_proc_simple (struct shim_simple_thread * sthread,
  261. void * arg, bool * unlocked)
  262. {
  263. struct walk_arg * warg = (struct walk_arg *) arg;
  264. int srched = 0;
  265. if (sthread->tgid != warg->id)
  266. return 0;
  267. lock(&sthread->lock);
  268. if (sthread->is_alive) {
  269. unlock(&sthread->lock);
  270. unlock(&thread_list_lock);
  271. *unlocked = true;
  272. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS,
  273. warg->sig)) ? 1 : 0;
  274. }
  275. unlock(&sthread->lock);
  276. return srched;
  277. }
  278. int do_kill_proc (IDTYPE sender, IDTYPE tgid, int sig, bool use_ipc)
  279. {
  280. struct shim_thread * cur = get_cur_thread();
  281. if (!tgid) {
  282. /* DEP: cur->tgid never changes. No lock needed */
  283. tgid = cur->tgid;
  284. }
  285. struct walk_arg arg;
  286. arg.current = cur;
  287. arg.sender = sender;
  288. arg.id = tgid;
  289. arg.sig = sig;
  290. arg.use_ipc = use_ipc;
  291. bool srched = false;
  292. if (!walk_thread_list(__kill_proc, &arg))
  293. srched = true;
  294. if (!use_ipc || srched)
  295. goto out;
  296. if (!walk_simple_thread_list(__kill_proc_simple, &arg))
  297. srched = true;
  298. if (!srched && !ipc_pid_kill_send(sender, tgid, KILL_PROCESS, sig))
  299. srched = true;
  300. out:
  301. return srched ? 0 : -ESRCH;
  302. }
  303. static int __kill_pgroup (struct shim_thread * thread, void * arg,
  304. bool * unlocked)
  305. {
  306. struct walk_arg * warg = (struct walk_arg *) arg;
  307. int srched = 0;
  308. if (!warg->use_ipc && !thread->in_vm)
  309. return 0;
  310. if (thread->pgid != warg->id)
  311. return 0;
  312. if (warg->current == thread)
  313. return 1;
  314. lock(&thread->lock);
  315. if (!thread->is_alive)
  316. goto out;
  317. if (thread->in_vm) {
  318. if (warg->sig > 0)
  319. __append_signal(thread, warg->sig, warg->sender);
  320. srched = 1;
  321. } else {
  322. unlock(&thread->lock);
  323. unlock(&thread_list_lock);
  324. *unlocked = true;
  325. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PGROUP,
  326. warg->sig)) ? 1 : 0;
  327. }
  328. out:
  329. unlock(&thread->lock);
  330. return srched;
  331. }
  332. static int __kill_pgroup_simple (struct shim_simple_thread * sthread,
  333. void * arg, bool * unlocked)
  334. {
  335. struct walk_arg * warg = (struct walk_arg *) arg;
  336. int srched = 0;
  337. if (sthread->pgid != warg->id)
  338. return 0;
  339. lock(&sthread->lock);
  340. if (sthread->is_alive) {
  341. unlock(&sthread->lock);
  342. unlock(&thread_list_lock);
  343. *unlocked = true;
  344. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PGROUP,
  345. warg->sig)) ? 1 : 0;
  346. }
  347. unlock(&sthread->lock);
  348. return srched;
  349. }
  350. int do_kill_pgroup (IDTYPE sender, IDTYPE pgid, int sig, bool use_ipc)
  351. {
  352. struct shim_thread * cur = get_cur_thread();
  353. if (!pgid) {
  354. pgid = cur->pgid;
  355. }
  356. struct walk_arg arg;
  357. arg.current = cur;
  358. arg.sender = sender;
  359. arg.id = pgid;
  360. arg.sig = sig;
  361. arg.use_ipc = use_ipc;
  362. bool srched = false;
  363. if (!walk_thread_list(__kill_pgroup, &arg))
  364. srched = true;
  365. if (!use_ipc || srched)
  366. goto out;
  367. if (!walk_simple_thread_list(__kill_pgroup_simple, &arg))
  368. srched = true;
  369. if (!srched && !ipc_pid_kill_send(sender, pgid, KILL_PGROUP, sig))
  370. srched = true;
  371. out:
  372. return srched ? 0 : -ESRCH;
  373. }
  374. static int __kill_all_threads (struct shim_thread * thread, void * arg,
  375. bool * unlocked)
  376. {
  377. __UNUSED(unlocked); // Retained for API compatibility
  378. int srched = 0;
  379. struct walk_arg * warg = (struct walk_arg *) arg;
  380. if (thread->tgid != thread->tid)
  381. return 0;
  382. if (warg->current == thread)
  383. return 1;
  384. lock(&thread->lock);
  385. if (thread->in_vm) {
  386. __append_signal(thread, warg->sig, warg->sender);
  387. srched = 1;
  388. }
  389. unlock(&thread->lock);
  390. return srched;
  391. }
  392. int kill_all_threads (struct shim_thread * cur, IDTYPE sender, int sig)
  393. {
  394. struct walk_arg arg;
  395. arg.current = cur;
  396. arg.sender = sender;
  397. arg.id = 0;
  398. arg.sig = sig;
  399. arg.use_ipc = false;
  400. walk_thread_list(__kill_all_threads, &arg);
  401. return 0;
  402. }
  403. int shim_do_kill (pid_t pid, int sig)
  404. {
  405. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  406. if (sig < 0 || sig > NUM_SIGS)
  407. return -EINVAL;
  408. struct shim_thread * cur = get_cur_thread();
  409. int ret = 0;
  410. bool send_to_self = false;
  411. /* If pid equals 0, then sig is sent to every process in the process group
  412. of the calling process. */
  413. if (pid == 0) {
  414. ret = do_kill_pgroup(cur->tgid, 0, sig, true);
  415. send_to_self = true;
  416. }
  417. /* If pid equals -1, then sig is sent to every process for which the
  418. calling process has permission to send */
  419. else if (pid == -1) {
  420. ipc_pid_kill_send(cur->tid, /*target=*/0, KILL_ALL, sig);
  421. kill_all_threads(cur, cur->tid, sig);
  422. send_to_self = true;
  423. }
  424. /* If pid is positive, then signal sig is sent to the process with the ID
  425. specified by pid. */
  426. else if (pid > 0) {
  427. ret = do_kill_proc(cur->tid, pid, sig, true);
  428. send_to_self = ((IDTYPE) pid == cur->tgid);
  429. }
  430. /* If pid is less than -1, then sig is sent to every process in the
  431. process group whose id is -pid */
  432. else {
  433. ret = do_kill_pgroup(cur->tid, -pid, sig, true);
  434. send_to_self = ((IDTYPE) -pid == cur->pgid);
  435. }
  436. if (send_to_self) {
  437. if (ret == -ESRCH)
  438. ret = 0;
  439. if (sig) {
  440. siginfo_t info;
  441. memset(&info, 0, sizeof(siginfo_t));
  442. info.si_signo = sig;
  443. info.si_pid = cur->tid;
  444. deliver_signal(&info, NULL);
  445. }
  446. }
  447. return ret < 0 ? ret : 0;
  448. }
  449. int do_kill_thread (IDTYPE sender, IDTYPE tgid, IDTYPE tid, int sig,
  450. bool use_ipc)
  451. {
  452. if (sig < 0 || sig > NUM_SIGS)
  453. return -EINVAL;
  454. struct shim_thread * thread = lookup_thread(tid);
  455. int ret = 0;
  456. if (thread) {
  457. lock(&thread->lock);
  458. if (thread->in_vm) {
  459. if (!tgid || thread->tgid == tgid)
  460. __append_signal(thread, sig, sender);
  461. else
  462. ret = -ESRCH;
  463. } else {
  464. unlock(&thread->lock);
  465. return ipc_pid_kill_send(sender, tid, KILL_THREAD, sig);
  466. }
  467. unlock(&thread->lock);
  468. return ret;
  469. }
  470. if (!use_ipc)
  471. return -ESRCH;
  472. return ipc_pid_kill_send(sender, tid, KILL_THREAD, sig);
  473. }
  474. int shim_do_tkill (pid_t tid, int sig)
  475. {
  476. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  477. if (tid <= 0)
  478. return -EINVAL;
  479. struct shim_thread * cur = get_cur_thread();
  480. if ((IDTYPE) tid == cur->tid) {
  481. if (sig) {
  482. siginfo_t info;
  483. memset(&info, 0, sizeof(siginfo_t));
  484. info.si_signo = sig;
  485. info.si_pid = cur->tid;
  486. deliver_signal(&info, NULL);
  487. }
  488. return 0;
  489. }
  490. return do_kill_thread(cur->tgid, 0, tid, sig, true);
  491. }
  492. int shim_do_tgkill (pid_t tgid, pid_t tid, int sig)
  493. {
  494. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  495. if (tgid < -1 || tgid == 0 || tid <= 0)
  496. return -EINVAL;
  497. if (tgid == -1)
  498. tgid = 0;
  499. struct shim_thread * cur = get_cur_thread();
  500. if ((IDTYPE) tid == cur->tid) {
  501. if (sig) {
  502. siginfo_t info;
  503. memset(&info, 0, sizeof(siginfo_t));
  504. info.si_signo = sig;
  505. info.si_pid = cur->tid;
  506. deliver_signal(&info, NULL);
  507. }
  508. return 0;
  509. }
  510. return do_kill_thread(cur->tgid, tgid, tid, sig, true);
  511. }