shim_sigaction.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_sigaction.c
  15. *
  16. * Implementation of system call "sigaction", "sigreturn", "sigprocmask",
  17. * "kill", "tkill" and "tgkill".
  18. */
  19. #include <errno.h>
  20. #include <stddef.h> // FIXME(mkow): Without this we get:
  21. // asm/signal.h:126:2: error: unknown type name ‘size_t’
  22. // It definitely shouldn't behave like this...
  23. #include <linux/signal.h>
  24. #include <pal.h>
  25. #include <pal_error.h>
  26. #include <shim_internal.h>
  27. #include <shim_ipc.h>
  28. #include <shim_profile.h>
  29. #include <shim_table.h>
  30. #include <shim_thread.h>
  31. #include <shim_utils.h>
  32. int shim_do_sigaction(int signum, const struct __kernel_sigaction* act,
  33. struct __kernel_sigaction* oldact, size_t sigsetsize) {
  34. /* SIGKILL and SIGSTOP cannot be caught or ignored */
  35. if (signum == SIGKILL || signum == SIGSTOP || signum <= 0 || signum > NUM_SIGS ||
  36. sigsetsize != sizeof(__sigset_t))
  37. return -EINVAL;
  38. if (act && test_user_memory((void*)act, sizeof(*act), false))
  39. return -EFAULT;
  40. if (oldact && test_user_memory(oldact, sizeof(*oldact), false))
  41. return -EFAULT;
  42. struct shim_thread* cur = get_cur_thread();
  43. int err = 0;
  44. assert(!act || (void*)act->k_sa_handler != (void*)0x11);
  45. struct shim_signal_handle* sighdl = &cur->signal_handles[signum - 1];
  46. lock(&cur->lock);
  47. if (oldact) {
  48. if (sighdl->action) {
  49. memcpy(oldact, sighdl->action, sizeof(struct __kernel_sigaction));
  50. } else {
  51. memset(oldact, 0, sizeof(struct __kernel_sigaction));
  52. oldact->k_sa_handler = SIG_DFL;
  53. }
  54. }
  55. if (act) {
  56. if (!(sighdl->action))
  57. sighdl->action = malloc(sizeof(struct __kernel_sigaction));
  58. if (!(sighdl->action)) {
  59. err = -ENOMEM;
  60. goto out;
  61. }
  62. memcpy(sighdl->action, act, sizeof(struct __kernel_sigaction));
  63. }
  64. err = 0;
  65. out:
  66. unlock(&cur->lock);
  67. return err;
  68. }
  69. int shim_do_sigreturn(int __unused) {
  70. __UNUSED(__unused);
  71. /* do nothing */
  72. return 0;
  73. }
  74. int shim_do_sigprocmask(int how, const __sigset_t* set, __sigset_t* oldset) {
  75. __sigset_t* old;
  76. __sigset_t tmp;
  77. __sigset_t set_tmp;
  78. if (how != SIG_BLOCK && how != SIG_UNBLOCK && how != SIG_SETMASK)
  79. return -EINVAL;
  80. if (set && test_user_memory((void*)set, sizeof(*set), false))
  81. return -EFAULT;
  82. if (oldset && test_user_memory(oldset, sizeof(*oldset), false))
  83. return -EFAULT;
  84. struct shim_thread* cur = get_cur_thread();
  85. int err = 0;
  86. lock(&cur->lock);
  87. old = get_sig_mask(cur);
  88. if (oldset) {
  89. memcpy(&tmp, old, sizeof(__sigset_t));
  90. old = &tmp;
  91. }
  92. /* if set is NULL, then the signal mask is unchanged, but the current
  93. value of the signal mask is nevertheless returned in oldset */
  94. if (!set)
  95. goto out;
  96. memcpy(&set_tmp, old, sizeof(__sigset_t));
  97. switch (how) {
  98. case SIG_BLOCK:
  99. __sigorset(&set_tmp, &set_tmp, set);
  100. break;
  101. case SIG_UNBLOCK:
  102. __signotset(&set_tmp, &set_tmp, set);
  103. break;
  104. case SIG_SETMASK:
  105. memcpy(&set_tmp, set, sizeof(__sigset_t));
  106. break;
  107. }
  108. set_sig_mask(cur, &set_tmp);
  109. out:
  110. unlock(&cur->lock);
  111. if (!err && oldset)
  112. memcpy(oldset, old, sizeof(__sigset_t));
  113. return err;
  114. }
  115. int shim_do_sigaltstack(const stack_t* ss, stack_t* oss) {
  116. if (ss && (ss->ss_flags & ~SS_DISABLE))
  117. return -EINVAL;
  118. struct shim_thread* cur = get_cur_thread();
  119. lock(&cur->lock);
  120. stack_t* cur_ss = &cur->signal_altstack;
  121. if (oss)
  122. *oss = *cur_ss;
  123. void* sp = (void*)shim_get_tcb()->context.regs->rsp;
  124. /* check if thread is currently executing on an active altstack */
  125. if (!(cur_ss->ss_flags & SS_DISABLE) && sp && cur_ss->ss_sp <= sp &&
  126. sp < cur_ss->ss_sp + cur_ss->ss_size) {
  127. if (oss)
  128. oss->ss_flags |= SS_ONSTACK;
  129. if (ss) {
  130. unlock(&cur->lock);
  131. return -EPERM;
  132. }
  133. }
  134. if (ss) {
  135. if (ss->ss_flags & SS_DISABLE) {
  136. memset(cur_ss, 0, sizeof(*cur_ss));
  137. cur_ss->ss_flags = SS_DISABLE;
  138. } else {
  139. if (ss->ss_size < MINSIGSTKSZ) {
  140. unlock(&cur->lock);
  141. return -ENOMEM;
  142. }
  143. *cur_ss = *ss;
  144. }
  145. }
  146. unlock(&cur->lock);
  147. return 0;
  148. }
  149. int shim_do_sigsuspend(const __sigset_t* mask) {
  150. if (!mask || test_user_memory((void*)mask, sizeof(*mask), false))
  151. return -EFAULT;
  152. __sigset_t* old;
  153. __sigset_t tmp;
  154. struct shim_thread* cur = get_cur_thread();
  155. lock(&cur->lock);
  156. /* return immediately on some pending unblocked signal */
  157. for (int sig = 1; sig <= NUM_SIGS; sig++) {
  158. if (atomic_read(&cur->signal_logs[sig - 1].head) !=
  159. atomic_read(&cur->signal_logs[sig - 1].tail)) {
  160. /* at least one signal of type sig... */
  161. if (!__sigismember(mask, sig)) {
  162. /* ...and this type is not blocked in supplied mask */
  163. unlock(&cur->lock);
  164. return -EINTR;
  165. }
  166. }
  167. }
  168. old = get_sig_mask(cur);
  169. memcpy(&tmp, old, sizeof(__sigset_t));
  170. old = &tmp;
  171. set_sig_mask(cur, mask);
  172. cur->suspend_on_signal = true;
  173. unlock(&cur->lock);
  174. thread_setwait(NULL, NULL);
  175. thread_sleep(NO_TIMEOUT);
  176. lock(&cur->lock);
  177. set_sig_mask(cur, old);
  178. unlock(&cur->lock);
  179. return -EINTR;
  180. }
  181. int shim_do_sigpending(__sigset_t* set, size_t sigsetsize) {
  182. if (sigsetsize != sizeof(*set))
  183. return -EINVAL;
  184. if (!set || test_user_memory(set, sigsetsize, false))
  185. return -EFAULT;
  186. struct shim_thread* cur = get_cur_thread();
  187. __sigemptyset(set);
  188. if (!cur->signal_logs)
  189. return 0;
  190. for (int sig = 1; sig <= NUM_SIGS; sig++) {
  191. if (atomic_read(&cur->signal_logs[sig - 1].head) !=
  192. atomic_read(&cur->signal_logs[sig - 1].tail))
  193. __sigaddset(set, sig);
  194. }
  195. return 0;
  196. }
  197. struct walk_arg {
  198. struct shim_thread* current;
  199. IDTYPE sender;
  200. IDTYPE id;
  201. int sig;
  202. bool use_ipc;
  203. };
  204. // Need to hold thread->lock
  205. static inline void __append_signal(struct shim_thread* thread, int sig, IDTYPE sender) {
  206. debug("Thread %d killed by signal %d\n", thread->tid, sig);
  207. siginfo_t info;
  208. memset(&info, 0, sizeof(siginfo_t));
  209. info.si_signo = sig;
  210. info.si_pid = sender;
  211. append_signal(thread, sig, &info, true);
  212. }
  213. static int __kill_proc(struct shim_thread* thread, void* arg, bool* unlocked) {
  214. struct walk_arg* warg = (struct walk_arg*)arg;
  215. int srched = 0;
  216. if (!warg->use_ipc && !thread->in_vm)
  217. return 0;
  218. if (thread->tgid != warg->id)
  219. return 0;
  220. if (warg->current == thread)
  221. return 1;
  222. /* DEP: Let's do a racy read of is_alive and in_vm.
  223. * If either of these are zero it is a stable condition,
  224. * and we can elide the lock acquire (which helps perf).
  225. */
  226. if (!thread->is_alive)
  227. goto out;
  228. if (!thread->in_vm) {
  229. unlock(&thread_list_lock);
  230. *unlocked = true;
  231. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS, warg->sig)) ? 1 : 0;
  232. } else {
  233. lock(&thread->lock);
  234. if (!thread->is_alive)
  235. goto out_locked;
  236. if (thread->in_vm) {
  237. if (warg->sig > 0)
  238. __append_signal(thread, warg->sig, warg->sender);
  239. srched = 1;
  240. } else {
  241. /* This double-check case is probably unnecessary, but keep it for now */
  242. unlock(&thread->lock);
  243. unlock(&thread_list_lock);
  244. *unlocked = true;
  245. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS, warg->sig)) ? 1 : 0;
  246. }
  247. }
  248. out_locked:
  249. unlock(&thread->lock);
  250. out:
  251. return srched;
  252. }
  253. static int __kill_proc_simple(struct shim_simple_thread* sthread, void* arg, bool* unlocked) {
  254. struct walk_arg* warg = (struct walk_arg*)arg;
  255. int srched = 0;
  256. if (sthread->tgid != warg->id)
  257. return 0;
  258. lock(&sthread->lock);
  259. if (sthread->is_alive) {
  260. unlock(&sthread->lock);
  261. unlock(&thread_list_lock);
  262. *unlocked = true;
  263. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PROCESS, warg->sig)) ? 1 : 0;
  264. }
  265. unlock(&sthread->lock);
  266. return srched;
  267. }
  268. int do_kill_proc(IDTYPE sender, IDTYPE tgid, int sig, bool use_ipc) {
  269. struct shim_thread* cur = get_cur_thread();
  270. if (!tgid) {
  271. /* DEP: cur->tgid never changes. No lock needed */
  272. tgid = cur->tgid;
  273. }
  274. struct walk_arg arg;
  275. arg.current = cur;
  276. arg.sender = sender;
  277. arg.id = tgid;
  278. arg.sig = sig;
  279. arg.use_ipc = use_ipc;
  280. bool srched = false;
  281. if (!walk_thread_list(__kill_proc, &arg))
  282. srched = true;
  283. if (!use_ipc || srched)
  284. goto out;
  285. if (!walk_simple_thread_list(__kill_proc_simple, &arg))
  286. srched = true;
  287. if (!srched && !ipc_pid_kill_send(sender, tgid, KILL_PROCESS, sig))
  288. srched = true;
  289. out:
  290. return srched ? 0 : -ESRCH;
  291. }
  292. static int __kill_pgroup(struct shim_thread* thread, void* arg, bool* unlocked) {
  293. struct walk_arg* warg = (struct walk_arg*)arg;
  294. int srched = 0;
  295. if (!warg->use_ipc && !thread->in_vm)
  296. return 0;
  297. if (thread->pgid != warg->id)
  298. return 0;
  299. if (warg->current == thread)
  300. return 1;
  301. lock(&thread->lock);
  302. if (!thread->is_alive)
  303. goto out;
  304. if (thread->in_vm) {
  305. if (warg->sig > 0)
  306. __append_signal(thread, warg->sig, warg->sender);
  307. srched = 1;
  308. } else {
  309. unlock(&thread->lock);
  310. unlock(&thread_list_lock);
  311. *unlocked = true;
  312. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PGROUP, warg->sig)) ? 1 : 0;
  313. }
  314. out:
  315. unlock(&thread->lock);
  316. return srched;
  317. }
  318. static int __kill_pgroup_simple(struct shim_simple_thread* sthread, void* arg, bool* unlocked) {
  319. struct walk_arg* warg = (struct walk_arg*)arg;
  320. int srched = 0;
  321. if (sthread->pgid != warg->id)
  322. return 0;
  323. lock(&sthread->lock);
  324. if (sthread->is_alive) {
  325. unlock(&sthread->lock);
  326. unlock(&thread_list_lock);
  327. *unlocked = true;
  328. return (!ipc_pid_kill_send(warg->sender, warg->id, KILL_PGROUP, warg->sig)) ? 1 : 0;
  329. }
  330. unlock(&sthread->lock);
  331. return srched;
  332. }
  333. int do_kill_pgroup(IDTYPE sender, IDTYPE pgid, int sig, bool use_ipc) {
  334. struct shim_thread* cur = get_cur_thread();
  335. if (!pgid) {
  336. pgid = cur->pgid;
  337. }
  338. struct walk_arg arg;
  339. arg.current = cur;
  340. arg.sender = sender;
  341. arg.id = pgid;
  342. arg.sig = sig;
  343. arg.use_ipc = use_ipc;
  344. bool srched = false;
  345. if (!walk_thread_list(__kill_pgroup, &arg))
  346. srched = true;
  347. if (!use_ipc || srched)
  348. goto out;
  349. if (!walk_simple_thread_list(__kill_pgroup_simple, &arg))
  350. srched = true;
  351. if (!srched && !ipc_pid_kill_send(sender, pgid, KILL_PGROUP, sig))
  352. srched = true;
  353. out:
  354. return srched ? 0 : -ESRCH;
  355. }
  356. static int __kill_all_threads(struct shim_thread* thread, void* arg, bool* unlocked) {
  357. __UNUSED(unlocked); // Retained for API compatibility
  358. int srched = 0;
  359. struct walk_arg* warg = (struct walk_arg*)arg;
  360. if (thread->tgid != thread->tid)
  361. return 0;
  362. if (warg->current == thread)
  363. return 1;
  364. lock(&thread->lock);
  365. if (thread->in_vm) {
  366. __append_signal(thread, warg->sig, warg->sender);
  367. srched = 1;
  368. }
  369. unlock(&thread->lock);
  370. return srched;
  371. }
  372. int kill_all_threads(struct shim_thread* cur, IDTYPE sender, int sig) {
  373. struct walk_arg arg;
  374. arg.current = cur;
  375. arg.sender = sender;
  376. arg.id = 0;
  377. arg.sig = sig;
  378. arg.use_ipc = false;
  379. walk_thread_list(__kill_all_threads, &arg);
  380. return 0;
  381. }
  382. int shim_do_kill(pid_t pid, int sig) {
  383. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  384. if (sig < 0 || sig > NUM_SIGS)
  385. return -EINVAL;
  386. struct shim_thread* cur = get_cur_thread();
  387. int ret = 0;
  388. bool send_to_self = false;
  389. /* If pid equals 0, then sig is sent to every process in the process group
  390. of the calling process. */
  391. if (pid == 0) {
  392. ret = do_kill_pgroup(cur->tgid, 0, sig, true);
  393. send_to_self = true;
  394. }
  395. /* If pid equals -1, then sig is sent to every process for which the
  396. calling process has permission to send */
  397. else if (pid == -1) {
  398. ipc_pid_kill_send(cur->tid, /*target=*/0, KILL_ALL, sig);
  399. kill_all_threads(cur, cur->tid, sig);
  400. send_to_self = true;
  401. }
  402. /* If pid is positive, then signal sig is sent to the process with the ID
  403. specified by pid. */
  404. else if (pid > 0) {
  405. ret = do_kill_proc(cur->tid, pid, sig, true);
  406. send_to_self = (IDTYPE)pid == cur->tgid;
  407. }
  408. /* If pid is less than -1, then sig is sent to every process in the
  409. process group whose id is -pid */
  410. else {
  411. ret = do_kill_pgroup(cur->tid, -pid, sig, true);
  412. send_to_self = (IDTYPE)-pid == cur->pgid;
  413. }
  414. if (send_to_self) {
  415. if (ret == -ESRCH)
  416. ret = 0;
  417. if (sig) {
  418. siginfo_t info;
  419. memset(&info, 0, sizeof(siginfo_t));
  420. info.si_signo = sig;
  421. info.si_pid = cur->tid;
  422. deliver_signal(&info, NULL);
  423. }
  424. }
  425. return ret < 0 ? ret : 0;
  426. }
  427. int do_kill_thread(IDTYPE sender, IDTYPE tgid, IDTYPE tid, int sig, bool use_ipc) {
  428. if (sig < 0 || sig > NUM_SIGS)
  429. return -EINVAL;
  430. struct shim_thread* thread = lookup_thread(tid);
  431. int ret = -ESRCH;
  432. if (thread) {
  433. lock(&thread->lock);
  434. if (thread->in_vm) {
  435. if (!tgid || thread->tgid == tgid) {
  436. __append_signal(thread, sig, sender);
  437. ret = 0;
  438. }
  439. use_ipc = false;
  440. } else {
  441. use_ipc = true;
  442. }
  443. unlock(&thread->lock);
  444. put_thread(thread);
  445. }
  446. if (!use_ipc) {
  447. return ret;
  448. }
  449. return ipc_pid_kill_send(sender, tid, KILL_THREAD, sig);
  450. }
  451. int shim_do_tkill(pid_t tid, int sig) {
  452. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  453. if (tid <= 0)
  454. return -EINVAL;
  455. struct shim_thread* cur = get_cur_thread();
  456. if ((IDTYPE)tid == cur->tid) {
  457. if (sig) {
  458. siginfo_t info;
  459. memset(&info, 0, sizeof(siginfo_t));
  460. info.si_signo = sig;
  461. info.si_pid = cur->tid;
  462. deliver_signal(&info, NULL);
  463. }
  464. return 0;
  465. }
  466. return do_kill_thread(cur->tgid, 0, tid, sig, true);
  467. }
  468. int shim_do_tgkill(pid_t tgid, pid_t tid, int sig) {
  469. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  470. if (tgid < -1 || tgid == 0 || tid <= 0)
  471. return -EINVAL;
  472. if (tgid == -1)
  473. tgid = 0;
  474. struct shim_thread* cur = get_cur_thread();
  475. if ((IDTYPE)tid == cur->tid) {
  476. if (sig) {
  477. siginfo_t info;
  478. memset(&info, 0, sizeof(siginfo_t));
  479. info.si_signo = sig;
  480. info.si_pid = cur->tid;
  481. deliver_signal(&info, NULL);
  482. }
  483. return 0;
  484. }
  485. return do_kill_thread(cur->tgid, tgid, tid, sig, true);
  486. }