123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799 |
- /* Copyright (C) 2014 Stony Brook University
- This file is part of Graphene Library OS.
- Graphene Library OS is free software: you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public License
- as published by the Free Software Foundation, either version 3 of the
- License, or (at your option) any later version.
- Graphene Library OS is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU Lesser General Public License for more details.
- You should have received a copy of the GNU Lesser General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
- /*
- * shim_signal.c
- *
- * This file contains codes to handle signals and exceptions passed from PAL.
- */
- #include <shim_internal.h>
- #include <shim_utils.h>
- #include <shim_table.h>
- #include <shim_thread.h>
- #include <shim_handle.h>
- #include <shim_vma.h>
- #include <shim_checkpoint.h>
- #include <shim_signal.h>
- #include <shim_unistd.h>
- #include <pal.h>
- static struct shim_signal **
- allocate_signal_log (struct shim_thread * thread, int sig)
- {
- if (!thread->signal_logs)
- return NULL;
- struct shim_signal_log * log = &thread->signal_logs[sig - 1];
- int head, tail, old_tail;
- do {
- head = atomic_read(&log->head);
- old_tail = tail = atomic_read(&log->tail);
- if (head == tail + 1 || (!head && tail == (MAX_SIGNAL_LOG - 1)))
- return NULL;
- tail = (tail == MAX_SIGNAL_LOG - 1) ? 0 : tail + 1;
- } while (atomic_cmpxchg(&log->tail, old_tail, tail) == tail);
- debug("signal_logs[%d]: head=%d, tail=%d (counter = %ld)\n", sig - 1,
- head, tail, thread->has_signal.counter + 1);
- atomic_inc(&thread->has_signal);
- return &log->logs[old_tail];
- }
- static struct shim_signal *
- fetch_signal_log (shim_tcb_t * tcb, struct shim_thread * thread, int sig)
- {
- struct shim_signal_log * log = &thread->signal_logs[sig - 1];
- struct shim_signal * signal = NULL;
- int head, tail, old_head;
- while (1) {
- old_head = head = atomic_read(&log->head);
- tail = atomic_read(&log->tail);
- if (head == tail)
- return NULL;
- if (!(signal = log->logs[head]))
- return NULL;
- log->logs[head] = NULL;
- head = (head == MAX_SIGNAL_LOG - 1) ? 0 : head + 1;
- if (atomic_cmpxchg(&log->head, old_head, head) == old_head)
- break;
- log->logs[old_head] = signal;
- }
- debug("signal_logs[%d]: head=%d, tail=%d\n", sig -1, head, tail);
- atomic_dec(&thread->has_signal);
- return signal;
- }
- static void
- __handle_one_signal (shim_tcb_t * tcb, int sig, struct shim_signal * signal);
- static void __store_info (siginfo_t * info, struct shim_signal * signal)
- {
- if (info)
- memcpy(&signal->info, info, sizeof(siginfo_t));
- }
- void __store_context (shim_tcb_t * tcb, PAL_CONTEXT * pal_context,
- struct shim_signal * signal)
- {
- ucontext_t * context = &signal->context;
- if (tcb && tcb->context.syscall_nr) {
- struct shim_context * ct = &tcb->context;
- context->uc_mcontext.gregs[REG_RSP] = (unsigned long) ct->sp;
- context->uc_mcontext.gregs[REG_RIP] = (unsigned long) ct->ret_ip;
- if (ct->regs) {
- struct shim_regs * regs = ct->regs;
- context->uc_mcontext.gregs[REG_EFL] = regs->rflags;
- context->uc_mcontext.gregs[REG_R15] = regs->r15;
- context->uc_mcontext.gregs[REG_R14] = regs->r14;
- context->uc_mcontext.gregs[REG_R13] = regs->r13;
- context->uc_mcontext.gregs[REG_R12] = regs->r12;
- context->uc_mcontext.gregs[REG_R11] = regs->r11;
- context->uc_mcontext.gregs[REG_R10] = regs->r10;
- context->uc_mcontext.gregs[REG_R9] = regs->r9;
- context->uc_mcontext.gregs[REG_R8] = regs->r8;
- context->uc_mcontext.gregs[REG_RCX] = regs->rcx;
- context->uc_mcontext.gregs[REG_RDX] = regs->rdx;
- context->uc_mcontext.gregs[REG_RSI] = regs->rsi;
- context->uc_mcontext.gregs[REG_RDI] = regs->rdi;
- context->uc_mcontext.gregs[REG_RBX] = regs->rbx;
- context->uc_mcontext.gregs[REG_RBP] = regs->rbp;
- }
- signal->context_stored = true;
- return;
- }
- if (pal_context) {
- memcpy(context->uc_mcontext.gregs, pal_context, sizeof(PAL_CONTEXT));
- signal->context_stored = true;
- }
- }
- void deliver_signal (siginfo_t * info, PAL_CONTEXT * context)
- {
- shim_tcb_t * tcb = shim_get_tls();
- assert(tcb);
- // Signals should not be delivered before the user process starts
- // or after the user process dies.
- if (!tcb->tp || !cur_thread_is_alive())
- return;
- struct shim_thread * cur_thread = (struct shim_thread *) tcb->tp;
- int sig = info->si_signo;
- __disable_preempt(tcb);
- struct shim_signal * signal = __alloca(sizeof(struct shim_signal));
- /* save in signal */
- memset(signal, 0, sizeof(struct shim_signal));
- __store_info(info, signal);
- __store_context(tcb, context, signal);
- signal->pal_context = context;
- if ((tcb->context.preempt & ~SIGNAL_DELAYED) > 1 ||
- __sigismember(&cur_thread->signal_mask, sig)) {
- struct shim_signal ** signal_log = NULL;
- if ((signal = malloc_copy(signal,sizeof(struct shim_signal))) &&
- (signal_log = allocate_signal_log(cur_thread, sig))) {
- *signal_log = signal;
- }
- if (signal && !signal_log) {
- SYS_PRINTF("signal queue is full (TID = %u, SIG = %d)\n",
- tcb->tid, sig);
- free(signal);
- }
- } else {
- __handle_signal(tcb, sig, &signal->context);
- __handle_one_signal(tcb, sig, signal);
- }
- __enable_preempt(tcb);
- }
- #define ALLOC_SIGINFO(signo, code, member, value) \
- ({ \
- siginfo_t * _info = __alloca(sizeof(siginfo_t)); \
- memset(_info, 0, sizeof(siginfo_t)); \
- _info->si_signo = (signo); \
- _info->si_code = (code); \
- _info->member = (value); \
- _info; \
- })
- #ifdef __x86_64__
- #define IP rip
- #else
- #define IP eip
- #endif
- static inline bool context_is_internal(PAL_CONTEXT * context)
- {
- return context &&
- (void *) context->IP >= (void *) &__code_address &&
- (void *) context->IP < (void *) &__code_address_end;
- }
- static inline void internal_fault(const char* errstr,
- PAL_NUM addr, PAL_CONTEXT * context)
- {
- IDTYPE tid = get_cur_tid();
- if (context_is_internal(context))
- SYS_PRINTF("%s at 0x%08lx (IP = +0x%lx, VMID = %u, TID = %u)\n", errstr,
- addr, (void *) context->IP - (void *) &__load_address,
- cur_process.vmid, is_internal_tid(tid) ? 0 : tid);
- else
- SYS_PRINTF("%s at 0x%08lx (IP = 0x%08lx, VMID = %u, TID = %u)\n", errstr,
- addr, context ? context->IP : 0,
- cur_process.vmid, is_internal_tid(tid) ? 0 : tid);
- PAUSE();
- }
- static void arithmetic_error_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
- {
- if (is_internal_tid(get_cur_tid()) || context_is_internal(context)) {
- internal_fault("Internal arithmetic fault", arg, context);
- } else {
- if (context)
- debug("arithmetic fault at 0x%08lx\n", context->IP);
- deliver_signal(ALLOC_SIGINFO(SIGFPE, FPE_INTDIV,
- si_addr, (void *) arg), context);
- }
- DkExceptionReturn(event);
- }
- static void memfault_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
- {
- shim_tcb_t * tcb = shim_get_tls();
- assert(tcb);
- if (tcb->test_range.cont_addr && arg
- && (void *) arg >= tcb->test_range.start
- && (void *) arg <= tcb->test_range.end) {
- assert(context);
- context->rip = (PAL_NUM) tcb->test_range.cont_addr;
- goto ret_exception;
- }
- if (is_internal_tid(get_cur_tid()) || context_is_internal(context)) {
- internal_fault("Internal memory fault", arg, context);
- goto ret_exception;
- }
- if (context)
- debug("memory fault at 0x%08lx (IP = 0x%08lx)\n", arg, context->IP);
- struct shim_vma_val vma;
- int signo = SIGSEGV;
- int code;
- if (!arg) {
- code = SEGV_MAPERR;
- } else if (!lookup_vma((void *) arg, &vma)) {
- if (vma.flags & VMA_INTERNAL) {
- internal_fault("Internal memory fault with VMA", arg, context);
- goto ret_exception;
- }
- if (vma.file && vma.file->type == TYPE_FILE) {
- /* DEP 3/3/17: If the mapping exceeds end of a file (but is in the VMA)
- * then return a SIGBUS. */
- uint64_t eof_in_vma = (uint64_t) vma.addr + vma.offset + vma.file->info.file.size;
- if (arg > eof_in_vma) {
- signo = SIGBUS;
- code = BUS_ADRERR;
- } else if ((context->err & 4) && !(vma.flags & PROT_WRITE)) {
- /* DEP 3/3/17: If the page fault gives a write error, and
- * the VMA is read-only, return SIGSEGV+SEGV_ACCERR */
- signo = SIGSEGV;
- code = SEGV_ACCERR;
- } else {
- /* XXX: need more sophisticated judgement */
- signo = SIGBUS;
- code = BUS_ADRERR;
- }
- } else {
- code = SEGV_ACCERR;
- }
- } else {
- code = SEGV_MAPERR;
- }
- deliver_signal(ALLOC_SIGINFO(signo, code, si_addr, (void *) arg), context);
- ret_exception:
- DkExceptionReturn(event);
- }
- /*
- * Helper function for test_user_memory / test_user_string; they behave
- * differently for different PALs:
- *
- * - For Linux-SGX, the faulting address is not propagated in memfault
- * exception (SGX v1 does not write address in SSA frame, SGX v2 writes
- * it only at a granularity of 4K pages). Thus, we cannot rely on
- * exception handling to compare against tcb.test_range.start/end.
- * Instead, traverse VMAs to see if [addr, addr+size) is addressable;
- * before traversing VMAs, grab a VMA lock.
- *
- * - For other PALs, we touch one byte of each page in [addr, addr+size).
- * If some byte is not addressable, exception is raised. memfault_upcall
- * handles this exception and resumes execution from ret_fault.
- *
- * The second option is faster in fault-free case but cannot be used under
- * SGX PAL. We use the best option for each PAL for now. */
- static bool is_sgx_pal(void) {
- static struct atomic_int sgx_pal = { .counter = 0 };
- static struct atomic_int inited = { .counter = 0 };
- if (!atomic_read(&inited)) {
- /* Ensure that is_sgx_pal is updated before initialized */
- atomic_set(&sgx_pal, strcmp_static(PAL_CB(host_type), "Linux-SGX"));
- MB();
- atomic_set(&inited, 1);
- }
- MB();
- return atomic_read(&sgx_pal) != 0;
- }
- /*
- * 'test_user_memory' and 'test_user_string' are helper functions for testing
- * if a user-given buffer or data structure is readable / writable (according
- * to the system call semantics). If the memory test fails, the system call
- * should return -EFAULT or -EINVAL accordingly. These helper functions cannot
- * guarantee further corruption of the buffer, or if the buffer is unmapped
- * with a concurrent system call. The purpose of these functions is simply for
- * the compatibility with programs that rely on the error numbers, such as the
- * LTP test suite. */
- bool test_user_memory (void * addr, size_t size, bool write)
- {
- if (!size)
- return false;
- if (!access_ok(addr, size))
- return true;
- /* SGX path: check if [addr, addr+size) is addressable (in some VMA) */
- if (is_sgx_pal())
- return !is_in_one_vma(addr, size);
- /* Non-SGX path: check if [addr, addr+size) is addressable by touching
- * a byte of each page; invalid access will be caught in memfault_upcall */
- shim_tcb_t * tcb = shim_get_tls();
- assert(tcb && tcb->tp);
- __disable_preempt(tcb);
- bool has_fault = true;
- /* Add the memory region to the watch list. This is not racy because
- * each thread has its own record. */
- assert(!tcb->test_range.cont_addr);
- tcb->test_range.cont_addr = &&ret_fault;
- tcb->test_range.start = addr;
- tcb->test_range.end = addr + size - 1;
- /* Try to read or write into one byte inside each page */
- void * tmp = addr;
- while (tmp <= addr + size - 1) {
- if (write) {
- *(volatile char *) tmp = *(volatile char *) tmp;
- } else {
- *(volatile char *) tmp;
- }
- tmp = ALIGN_UP(tmp + 1);
- }
- has_fault = false; /* All accesses have passed. Nothing wrong. */
- ret_fault:
- /* If any read or write into the target region causes an exception,
- * the control flow will immediately jump to here. */
- tcb->test_range.cont_addr = NULL;
- tcb->test_range.start = tcb->test_range.end = NULL;
- __enable_preempt(tcb);
- return has_fault;
- }
- /*
- * This function tests a user string with unknown length. It only tests
- * whether the memory is readable.
- */
- bool test_user_string (const char * addr)
- {
- if (!access_ok(addr, 1))
- return true;
- size_t size, maxlen;
- const char * next = ALIGN_UP(addr + 1);
- /* SGX path: check if [addr, addr+size) is addressable (in some VMA). */
- if (is_sgx_pal()) {
- /* We don't know length but using unprotected strlen() is dangerous
- * so we check string in chunks of 4K pages. */
- do {
- maxlen = next - addr;
- if (!access_ok(addr, maxlen) || !is_in_one_vma((void*) addr, maxlen))
- return true;
- size = strnlen(addr, maxlen);
- addr = next;
- next = ALIGN_UP(addr + 1);
- } while (size == maxlen);
- return false;
- }
- /* Non-SGX path: check if [addr, addr+size) is addressable by touching
- * a byte of each page; invalid access will be caught in memfault_upcall. */
- shim_tcb_t * tcb = shim_get_tls();
- assert(tcb && tcb->tp);
- __disable_preempt(tcb);
- bool has_fault = true;
- assert(!tcb->test_range.cont_addr);
- tcb->test_range.cont_addr = &&ret_fault;
- do {
- /* Add the memory region to the watch list. This is not racy because
- * each thread has its own record. */
- tcb->test_range.start = (void *) addr;
- tcb->test_range.end = (void *) (next - 1);
- maxlen = next - addr;
- if (!access_ok(addr, maxlen))
- return true;
- *(volatile char *) addr; /* try to read one byte from the page */
- size = strnlen(addr, maxlen);
- addr = next;
- next = ALIGN_UP(addr + 1);
- } while (size == maxlen);
- has_fault = false; /* All accesses have passed. Nothing wrong. */
- ret_fault:
- /* If any read or write into the target region causes an exception,
- * the control flow will immediately jump to here. */
- tcb->test_range.cont_addr = NULL;
- tcb->test_range.start = tcb->test_range.end = NULL;
- __enable_preempt(tcb);
- return has_fault;
- }
- void __attribute__((weak)) syscall_wrapper(void)
- {
- /*
- * work around for link.
- * syscalldb.S is excluded for libsysdb_debug.so so it fails to link
- * due to missing syscall_wrapper.
- */
- }
- static void illegal_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
- {
- struct shim_vma_val vma;
- if (!is_internal_tid(get_cur_tid()) &&
- !context_is_internal(context) &&
- !(lookup_vma((void *) arg, &vma)) &&
- !(vma.flags & VMA_INTERNAL)) {
- if (context)
- debug("illegal instruction at 0x%08lx\n", context->IP);
- uint8_t * rip = (uint8_t*)context->IP;
- /*
- * Emulate syscall instruction (opcode 0x0f 0x05);
- * syscall instruction is prohibited in
- * Linux-SGX PAL and raises a SIGILL exception and
- * Linux PAL with seccomp and raise SIGSYS exception.
- */
- #if 0
- if (rip[-2] == 0x0f && rip[-1] == 0x05) {
- /* TODO: once finished, remove "#if 0" above. */
- /*
- * SIGSYS case (can happen with Linux PAL with seccomp)
- * rip points to the address after syscall instruction
- * %rcx: syscall instruction must put an
- * instruction-after-syscall in rcx
- */
- context->rax = siginfo->si_syscall; /* PAL_CONTEXT doesn't
- * include a member
- * corresponding to
- * siginfo_t::si_syscall yet.
- */
- context->rcx = (long)rip;
- context->r11 = context->efl;
- context->rip = (long)&syscall_wrapper;
- } else
- #endif
- if (rip[0] == 0x0f && rip[1] == 0x05) {
- /*
- * SIGILL case (can happen in Linux-SGX PAL)
- * %rcx: syscall instruction must put an instruction-after-syscall
- * in rcx. See the syscall_wrapper in syscallas.S
- * TODO: check SIGILL and ILL_ILLOPN
- */
- context->rcx = (long)rip + 2;
- context->r11 = context->efl;
- context->rip = (long)&syscall_wrapper;
- } else {
- deliver_signal(ALLOC_SIGINFO(SIGILL, ILL_ILLOPC,
- si_addr, (void *) arg), context);
- }
- } else {
- internal_fault("Internal illegal fault", arg, context);
- }
- DkExceptionReturn(event);
- }
- static void quit_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
- {
- if (!is_internal_tid(get_cur_tid())) {
- deliver_signal(ALLOC_SIGINFO(SIGTERM, SI_USER, si_pid, 0), NULL);
- }
- DkExceptionReturn(event);
- }
- static void suspend_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
- {
- if (!is_internal_tid(get_cur_tid())) {
- deliver_signal(ALLOC_SIGINFO(SIGINT, SI_USER, si_pid, 0), NULL);
- }
- DkExceptionReturn(event);
- }
- static void resume_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
- {
- shim_tcb_t * tcb = shim_get_tls();
- if (!tcb || !tcb->tp)
- return;
- if (!is_internal_tid(get_cur_tid())) {
- __disable_preempt(tcb);
- if ((tcb->context.preempt & ~SIGNAL_DELAYED) > 1) {
- tcb->context.preempt |= SIGNAL_DELAYED;
- } else {
- __handle_signal(tcb, 0, NULL);
- }
- __enable_preempt(tcb);
- }
- DkExceptionReturn(event);
- }
- int init_signal (void)
- {
- DkSetExceptionHandler(&arithmetic_error_upcall, PAL_EVENT_ARITHMETIC_ERROR);
- DkSetExceptionHandler(&memfault_upcall, PAL_EVENT_MEMFAULT);
- DkSetExceptionHandler(&illegal_upcall, PAL_EVENT_ILLEGAL);
- DkSetExceptionHandler(&quit_upcall, PAL_EVENT_QUIT);
- DkSetExceptionHandler(&suspend_upcall, PAL_EVENT_SUSPEND);
- DkSetExceptionHandler(&resume_upcall, PAL_EVENT_RESUME);
- return 0;
- }
- __sigset_t * get_sig_mask (struct shim_thread * thread)
- {
- if (!thread)
- thread = get_cur_thread();
- assert(thread);
- return &(thread->signal_mask);
- }
- __sigset_t * set_sig_mask (struct shim_thread * thread,
- const __sigset_t * set)
- {
- if (!thread)
- thread = get_cur_thread();
- assert(thread);
- if (set)
- memcpy(&thread->signal_mask, set, sizeof(__sigset_t));
- return &thread->signal_mask;
- }
- static void (*default_sighandler[NUM_SIGS]) (int, siginfo_t *, void *);
- static void
- __handle_one_signal (shim_tcb_t * tcb, int sig, struct shim_signal * signal)
- {
- struct shim_thread * thread = (struct shim_thread *) tcb->tp;
- struct shim_signal_handle * sighdl = &thread->signal_handles[sig - 1];
- void (*handler) (int, siginfo_t *, void *) = NULL;
- if (signal->info.si_signo == SIGCP) {
- join_checkpoint(thread, &signal->context, SI_CP_SESSION(&signal->info));
- return;
- }
- debug("%s handled\n", signal_name(sig));
- lock(&thread->lock);
- if (sighdl->action) {
- struct __kernel_sigaction * act = sighdl->action;
- /* This is a workaround. The truth is that many program will
- use sa_handler as sa_sigaction, because sa_sigaction is
- not supported in amd64 */
- #ifdef __i386__
- handler = (void (*) (int, siginfo_t *, void *)) act->_u._sa_handler;
- if (act->sa_flags & SA_SIGINFO)
- sa_handler = act->_u._sa_sigaction;
- #else
- handler = (void (*) (int, siginfo_t *, void *)) act->k_sa_handler;
- #endif
- if (act->sa_flags & SA_RESETHAND) {
- sighdl->action = NULL;
- free(act);
- }
- }
- unlock(&thread->lock);
- if ((void *) handler == (void *) 1) /* SIG_IGN */
- return;
- if (!handler && !(handler = default_sighandler[sig - 1]))
- return;
- /* if the context is never stored in the signal, it means the
- signal is handled during system calls, and before the thread
- is resumed. */
- if (!signal->context_stored)
- __store_context(tcb, NULL, signal);
- struct shim_context * context = NULL;
- if (tcb->context.syscall_nr) {
- context = __alloca(sizeof(struct shim_context));
- memcpy(context, &tcb->context, sizeof(struct shim_context));
- tcb->context.syscall_nr = 0;
- tcb->context.next = context;
- }
- debug("run signal handler %p (%d, %p, %p)\n", handler, sig, &signal->info,
- &signal->context);
- (*handler) (sig, &signal->info, &signal->context);
- if (context)
- memcpy(&tcb->context, context, sizeof(struct shim_context));
- if (signal->pal_context)
- memcpy(signal->pal_context, signal->context.uc_mcontext.gregs,
- sizeof(PAL_CONTEXT));
- }
- void __handle_signal (shim_tcb_t * tcb, int sig, ucontext_t * uc)
- {
- struct shim_thread * thread = (struct shim_thread *) tcb->tp;
- int begin_sig = 1, end_sig = NUM_KNOWN_SIGS;
- if (sig)
- end_sig = (begin_sig = sig) + 1;
- sig = begin_sig;
- while (atomic_read(&thread->has_signal)) {
- struct shim_signal * signal = NULL;
- for ( ; sig < end_sig ; sig++)
- if (!__sigismember(&thread->signal_mask, sig) &&
- (signal = fetch_signal_log(tcb, thread, sig)))
- break;
- if (!signal)
- break;
- if (!signal->context_stored)
- __store_context(tcb, NULL, signal);
- __handle_one_signal(tcb, sig, signal);
- free(signal);
- DkThreadYieldExecution();
- tcb->context.preempt &= ~SIGNAL_DELAYED;
- }
- }
- void handle_signal (bool delayed_only)
- {
- shim_tcb_t * tcb = shim_get_tls();
- assert(tcb);
- struct shim_thread * thread = (struct shim_thread *) tcb->tp;
- /* Fast path */
- if (!thread || !thread->has_signal.counter)
- return;
- __disable_preempt(tcb);
- if ((tcb->context.preempt & ~SIGNAL_DELAYED) > 1) {
- debug("signal delayed (%ld)\n", tcb->context.preempt & ~SIGNAL_DELAYED);
- tcb->context.preempt |= SIGNAL_DELAYED;
- } else if (!(delayed_only && !(tcb->context.preempt & SIGNAL_DELAYED))) {
- __handle_signal(tcb, 0, NULL);
- }
- __enable_preempt(tcb);
- debug("__enable_preempt: %s:%d\n", __FILE__, __LINE__);
- }
- void append_signal (struct shim_thread * thread, int sig, siginfo_t * info,
- bool wakeup)
- {
- struct shim_signal * signal = malloc(sizeof(struct shim_signal));
- if (!signal)
- return;
- /* save in signal */
- if (info) {
- __store_info(info, signal);
- signal->context_stored = false;
- } else {
- memset(signal, 0, sizeof(struct shim_signal));
- }
- struct shim_signal ** signal_log = allocate_signal_log(thread, sig);
- if (signal_log) {
- *signal_log = signal;
- if (wakeup) {
- debug("resuming thread %u\n", thread->tid);
- thread_wakeup(thread);
- DkThreadResume(thread->pal_handle);
- }
- } else {
- SYS_PRINTF("signal queue is full (TID = %u, SIG = %d)\n",
- thread->tid, sig);
- free(signal);
- }
- }
- static void sighandler_kill (int sig, siginfo_t * info, void * ucontext)
- {
- debug("killed by %s\n", signal_name(sig));
- if (!info->si_pid)
- switch(sig) {
- case SIGTERM:
- case SIGINT:
- shim_do_kill(-1, sig);
- break;
- }
- try_process_exit(0, sig);
- DkThreadExit();
- }
- /* We don't currently implement core dumps, but put a wrapper
- * in case we do in the future */
- static void sighandler_core (int sig, siginfo_t * info, void * ucontext)
- {
- sighandler_kill(sig, info, ucontext);
- }
- static void (*default_sighandler[NUM_SIGS]) (int, siginfo_t *, void *) =
- {
- /* SIGHUP */ &sighandler_kill,
- /* SIGINT */ &sighandler_kill,
- /* SIGQUIT */ &sighandler_kill,
- /* SIGILL */ &sighandler_kill,
- /* SIGTRAP */ &sighandler_core,
- /* SIGABRT */ &sighandler_kill,
- /* SIGBUS */ &sighandler_kill,
- /* SIGFPE */ &sighandler_kill,
- /* SIGKILL */ &sighandler_kill,
- /* SIGUSR1 */ NULL,
- /* SIGSEGV */ &sighandler_kill,
- /* SIGUSR2 */ NULL,
- /* SIGPIPE */ &sighandler_kill,
- /* SIGALRM */ &sighandler_kill,
- /* SIGTERM */ &sighandler_kill,
- /* SIGSTKFLT */ NULL,
- /* SIGCHLD */ NULL,
- /* SIGCONT */ NULL,
- /* SIGSTOP */ NULL,
- /* SIGTSTP */ NULL,
- /* SIGTTIN */ NULL,
- /* SIGTTOU */ NULL,
- };
|