|
@@ -0,0 +1,641 @@
|
|
|
+diff -ruNp a/elf/dl-load.c b/elf/dl-load.c
|
|
|
+--- a/elf/dl-load.c
|
|
|
++++ b/elf/dl-load.c
|
|
|
+@@ -45,6 +45,7 @@
|
|
|
+ #include <dl-machine-reject-phdr.h>
|
|
|
+ #include <dl-sysdep-open.h>
|
|
|
+
|
|
|
++#include <glibc-version.h>
|
|
|
+
|
|
|
+ #include <endian.h>
|
|
|
+ #if BYTE_ORDER == BIG_ENDIAN
|
|
|
+@@ -1415,6 +1416,9 @@ cannot enable executable stack as shared
|
|
|
+ DL_AFTER_LOAD (l);
|
|
|
+ #endif
|
|
|
+
|
|
|
++ /* register the library to SHIM */
|
|
|
++ register_library(l->l_name, l->l_addr);
|
|
|
++
|
|
|
+ /* Now that the object is fully initialized add it to the object list. */
|
|
|
+ _dl_add_to_namespace_list (l, nsid);
|
|
|
+
|
|
|
+diff -ruNp a/elf/Makefile b/elf/Makefile
|
|
|
+--- a/elf/Makefile
|
|
|
++++ b/elf/Makefile
|
|
|
+@@ -21,7 +21,7 @@ subdir := elf
|
|
|
+
|
|
|
+ include ../Makeconfig
|
|
|
+
|
|
|
+-headers = elf.h bits/elfclass.h link.h bits/link.h
|
|
|
++headers = elf.h bits/elfclass.h link.h bits/link.h syscalldb.h
|
|
|
+ routines = $(all-dl-routines) dl-support dl-iteratephdr \
|
|
|
+ dl-addr enbl-secure dl-profstub \
|
|
|
+ dl-origin dl-libc dl-sym dl-tsd dl-sysdep
|
|
|
+@@ -31,7 +31,8 @@ routines = $(all-dl-routines) dl-support
|
|
|
+ dl-routines = $(addprefix dl-,load lookup object reloc deps hwcaps \
|
|
|
+ runtime error init fini debug misc \
|
|
|
+ version profile conflict tls origin scope \
|
|
|
+- execstack caller open close trampoline)
|
|
|
++ execstack caller open close trampoline) \
|
|
|
++ syscalldb syscallas
|
|
|
+ ifeq (yes,$(use-ldconfig))
|
|
|
+ dl-routines += dl-cache
|
|
|
+ endif
|
|
|
+diff -ruNp a/elf/rtld.c b/elf/rtld.c
|
|
|
+--- a/elf/rtld.c
|
|
|
++++ b/elf/rtld.c
|
|
|
+@@ -332,6 +332,23 @@ _dl_start_final (void *arg, struct dl_st
|
|
|
+ return start_addr;
|
|
|
+ }
|
|
|
+
|
|
|
++/* For graphene, check if glibc version match to the compatible SHIM
|
|
|
++ library. If not, tell the user to update glibc. */
|
|
|
++#include "glibc-version.h"
|
|
|
++
|
|
|
++const unsigned int glibc_version __attribute__((weak)) = GLIBC_VERSION;
|
|
|
++
|
|
|
++static void __attribute__((noinline,optimize("-O0")))
|
|
|
++check_glibc_version (void)
|
|
|
++{
|
|
|
++ if (glibc_version != GLIBC_VERSION)
|
|
|
++ {
|
|
|
++ _dl_fatal_printf ("Warning from Graphene: "
|
|
|
++ "Glibc version is incorrect. Please rebuild Glibc.\n");
|
|
|
++ _exit (1);
|
|
|
++ }
|
|
|
++}
|
|
|
++
|
|
|
+ static ElfW(Addr) __attribute_used__ internal_function
|
|
|
+ _dl_start (void *arg)
|
|
|
+ {
|
|
|
+@@ -402,6 +419,9 @@ _dl_start (void *arg)
|
|
|
+ therefore need not test whether we have to allocate the array
|
|
|
+ for the relocation results (as done in dl-reloc.c). */
|
|
|
+
|
|
|
++ /* For Graphene, check if the glibc version is correct. */
|
|
|
++ check_glibc_version();
|
|
|
++
|
|
|
+ /* Now life is sane; we can call functions and access global data.
|
|
|
+ Set up to use the operating system facilities, and find out from
|
|
|
+ the operating system's program loader where to find the program
|
|
|
+diff -ruNp a/elf/Versions b/elf/Versions
|
|
|
+--- a/elf/Versions
|
|
|
++++ b/elf/Versions
|
|
|
+@@ -65,4 +68,7 @@ ld {
|
|
|
+ # Pointer protection.
|
|
|
+ __pointer_chk_guard;
|
|
|
+ }
|
|
|
++ SHIM {
|
|
|
++ syscalldb; glibc_version; glibc_option; register_library;
|
|
|
++ }
|
|
|
+ }
|
|
|
+diff -ruNp a/Makeconfig b/Makeconfig
|
|
|
+--- a/Makeconfig
|
|
|
++++ b/Makeconfig
|
|
|
+@@ -841,7 +841,8 @@ endif # $(+cflags) == ""
|
|
|
+ # current directory.
|
|
|
+ +includes = -I$(..)include $(if $(subdir),$(objpfx:%/=-I%)) \
|
|
|
+ $(+sysdep-includes) $(includes) \
|
|
|
+- $(patsubst %/,-I%,$(..)) $(libio-include) -I. $(sysincludes)
|
|
|
++ $(patsubst %/,-I%,$(..)) $(libio-include) -I. $(sysincludes) \
|
|
|
++ -I$(common-objpfx)../shim/include
|
|
|
+
|
|
|
+ # Since libio has several internal header files, we use a -I instead
|
|
|
+ # of many little headers in the include directory.
|
|
|
+diff -ruNp a/Makefile b/Makefile
|
|
|
+--- a/Makefile
|
|
|
++++ b/Makefile
|
|
|
+@@ -178,6 +178,8 @@ $(inst_includedir)/gnu/stubs.h: $(+force
|
|
|
+ install-others-nosubdir: $(installed-stubs)
|
|
|
+ endif
|
|
|
+
|
|
|
++# For Graphene
|
|
|
++CFLAGS-syscalldb.c = -fPIC
|
|
|
+
|
|
|
+ # Since stubs.h is never needed when building the library, we simplify the
|
|
|
+ # hairy installation process by producing it in place only as the last part
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/_exit.c b/sysdeps/unix/sysv/linux/_exit.c
|
|
|
+--- a/sysdeps/unix/sysv/linux/_exit.c
|
|
|
++++ b/sysdeps/unix/sysv/linux/_exit.c
|
|
|
+@@ -28,9 +28,9 @@ _exit (int status)
|
|
|
+ while (1)
|
|
|
+ {
|
|
|
+ #ifdef __NR_exit_group
|
|
|
+- INLINE_SYSCALL (exit_group, 1, status);
|
|
|
++ INLINE_SYSCALL_ASM (exit_group, 1, status);
|
|
|
+ #endif
|
|
|
+- INLINE_SYSCALL (exit, 1, status);
|
|
|
++ INLINE_SYSCALL_ASM (exit, 1, status);
|
|
|
+
|
|
|
+ #ifdef ABORT_INSTRUCTION
|
|
|
+ ABORT_INSTRUCTION;
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/cancellation.S b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/cancellation.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/cancellation.S
|
|
|
+@@ -111,7 +111,7 @@ ENTRY(__pthread_disable_asynccancel)
|
|
|
+ xorq %r10, %r10
|
|
|
+ addq $CANCELHANDLING, %rdi
|
|
|
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ movl %fs:CANCELHANDLING, %eax
|
|
|
+ jmp 3b
|
|
|
+ END(__pthread_disable_asynccancel)
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/clone.S b/sysdeps/unix/sysv/linux/x86_64/clone.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/clone.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/clone.S
|
|
|
+@@ -76,7 +76,7 @@ ENTRY (__clone)
|
|
|
+ /* End FDE now, because in the child the unwind info will be
|
|
|
+ wrong. */
|
|
|
+ cfi_endproc;
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ testq %rax,%rax
|
|
|
+ jl SYSCALL_ERROR_LABEL
|
|
|
+@@ -98,7 +98,7 @@ L(thread_start):
|
|
|
+ movl $-1, %eax
|
|
|
+ jne 2f
|
|
|
+ movl $SYS_ify(getpid), %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ 2: movl %eax, %fs:PID
|
|
|
+ movl %eax, %fs:TID
|
|
|
+ 1:
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/getcontext.S b/sysdeps/unix/sysv/linux/x86_64/getcontext.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/getcontext.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/getcontext.S
|
|
|
+@@ -75,7 +75,7 @@ ENTRY(__getcontext)
|
|
|
+ #endif
|
|
|
+ movl $_NSIG8,%r10d
|
|
|
+ movl $__NR_rt_sigprocmask, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ cmpq $-4095, %rax /* Check %rax for error. */
|
|
|
+ jae SYSCALL_ERROR_LABEL /* Jump to error handler if error. */
|
|
|
+
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/____longjmp_chk.S b/sysdeps/unix/sysv/linux/x86_64/____longjmp_chk.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/____longjmp_chk.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/____longjmp_chk.S
|
|
|
+@@ -84,7 +84,8 @@ ENTRY(____longjmp_chk)
|
|
|
+ xorl %edi, %edi
|
|
|
+ lea -sizeSS(%rsp), %RSI_LP
|
|
|
+ movl $__NR_sigaltstack, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
++
|
|
|
+ /* Without working sigaltstack we cannot perform the test. */
|
|
|
+ testl %eax, %eax
|
|
|
+ jne .Lok2
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
|
|
|
+@@ -90,7 +90,7 @@ __lll_lock_wait_private:
|
|
|
+
|
|
|
+ 1: LIBC_PROBE (lll_lock_wait_private, 1, %rdi)
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ 2: movl %edx, %eax
|
|
|
+ xchgl %eax, (%rdi) /* NB: lock is implied */
|
|
|
+@@ -130,7 +130,7 @@ __lll_lock_wait:
|
|
|
+
|
|
|
+ 1: LIBC_PROBE (lll_lock_wait, 2, %rdi, %rsi)
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ 2: movl %edx, %eax
|
|
|
+ xchgl %eax, (%rdi) /* NB: lock is implied */
|
|
|
+@@ -185,7 +185,7 @@ __lll_timedlock_wait:
|
|
|
+
|
|
|
+ 1: movl $SYS_futex, %eax
|
|
|
+ movl $2, %edx
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ 2: xchgl %edx, (%rdi) /* NB: lock is implied */
|
|
|
+
|
|
|
+@@ -279,7 +279,7 @@ __lll_timedlock_wait:
|
|
|
+ LOAD_FUTEX_WAIT (%esi)
|
|
|
+ movq %r12, %rdi
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* NB: %edx == 2 */
|
|
|
+ xchgl %edx, (%r12)
|
|
|
+@@ -336,7 +336,7 @@ __lll_unlock_wake_private:
|
|
|
+ LOAD_PRIVATE_FUTEX_WAKE (%esi)
|
|
|
+ movl $1, %edx /* Wake one thread. */
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ popq %rdx
|
|
|
+ cfi_adjust_cfa_offset(-8)
|
|
|
+@@ -366,7 +366,7 @@ __lll_unlock_wake:
|
|
|
+ LOAD_FUTEX_WAKE (%esi)
|
|
|
+ movl $1, %edx /* Wake one thread. */
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ popq %rdx
|
|
|
+ cfi_adjust_cfa_offset(-8)
|
|
|
+@@ -436,7 +436,7 @@ __lll_timedwait_tid:
|
|
|
+ #endif
|
|
|
+ movq %r12, %rdi
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ cmpl $0, (%rdi)
|
|
|
+ jne 1f
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
|
|
|
+@@ -80,7 +80,7 @@ __lll_robust_lock_wait:
|
|
|
+ jnz 2f
|
|
|
+
|
|
|
+ 1: movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ movl (%rdi), %eax
|
|
|
+
|
|
|
+@@ -145,7 +145,7 @@ __lll_robust_timedlock_wait:
|
|
|
+ jnz 6f
|
|
|
+
|
|
|
+ 5: movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ movl %eax, %ecx
|
|
|
+
|
|
|
+ movl (%rdi), %eax
|
|
|
+@@ -257,7 +257,7 @@ __lll_robust_timedlock_wait:
|
|
|
+ LOAD_FUTEX_WAIT (%esi)
|
|
|
+ movq %r12, %rdi
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ movq %rax, %rcx
|
|
|
+
|
|
|
+ movl (%r12), %eax
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
|
|
|
+@@ -87,7 +87,7 @@ ENTRY(__pthread_cond_broadcast)
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+ movl $1, %edx
|
|
|
+ movl $0x7fffffff, %r10d
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* For any kind of error, which mainly is EAGAIN, we try again
|
|
|
+ with WAKE. The general test also covers running on old
|
|
|
+@@ -103,7 +103,7 @@ ENTRY(__pthread_cond_broadcast)
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+ movl $1, %edx
|
|
|
+ movl $0x7fffffff, %r10d
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* For any kind of error, which mainly is EAGAIN, we try again
|
|
|
+ with WAKE. The general test also covers running on old
|
|
|
+@@ -169,7 +169,7 @@ ENTRY(__pthread_cond_broadcast)
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ jmp 10b
|
|
|
+ END(__pthread_cond_broadcast)
|
|
|
+
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
|
|
|
+@@ -78,7 +78,7 @@ ENTRY(__pthread_cond_signal)
|
|
|
+ addq $cond_lock, %r8
|
|
|
+ #endif
|
|
|
+ movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ #if cond_lock != 0
|
|
|
+ subq $cond_lock, %r8
|
|
|
+ #endif
|
|
|
+@@ -95,7 +95,7 @@ ENTRY(__pthread_cond_signal)
|
|
|
+ movq %rcx, %r8
|
|
|
+ xorq %r10, %r10
|
|
|
+ movl (%rdi), %r9d // XXX Can this be right?
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ leaq -cond_futex(%rdi), %r8
|
|
|
+
|
|
|
+@@ -114,7 +114,7 @@ ENTRY(__pthread_cond_signal)
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+ /* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
|
|
|
+ movl $1, %edx */
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* Unlock. */
|
|
|
+ 4: LOCK
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
|
|
|
+@@ -175,7 +175,7 @@ __pthread_cond_timedwait:
|
|
|
+ movq %r12, %rdx
|
|
|
+ addq $cond_futex, %rdi
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ cmpl $0, %eax
|
|
|
+ sete %r15b
|
|
|
+@@ -221,7 +221,7 @@ __pthread_cond_timedwait:
|
|
|
+ movq %r12, %rdx
|
|
|
+ addq $cond_futex, %rdi
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ 62: movq %rax, %r14
|
|
|
+
|
|
|
+ movl (%rsp), %edi
|
|
|
+@@ -308,7 +308,7 @@ __pthread_cond_timedwait:
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ subq $cond_nwaiters, %rdi
|
|
|
+
|
|
|
+ 55: LOCK
|
|
|
+@@ -521,7 +521,7 @@ __condvar_cleanup2:
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ subq $cond_nwaiters, %rdi
|
|
|
+ movl $1, %r12d
|
|
|
+
|
|
|
+@@ -558,7 +558,7 @@ __condvar_cleanup2:
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* Lock the mutex only if we don't own it already. This only happens
|
|
|
+ in case of PI mutexes, if we got cancelled after a successful
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
|
|
|
+@@ -138,7 +138,7 @@ __pthread_cond_wait:
|
|
|
+
|
|
|
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ cmpl $0, %eax
|
|
|
+ sete %r8b
|
|
|
+@@ -180,7 +180,7 @@ __pthread_cond_wait:
|
|
|
+ #endif
|
|
|
+ 60: xorb %r8b, %r8b
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ 62: movl (%rsp), %edi
|
|
|
+ callq __pthread_disable_asynccancel
|
|
|
+@@ -239,7 +239,7 @@ __pthread_cond_wait:
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ subq $cond_nwaiters, %rdi
|
|
|
+
|
|
|
+ 17: LOCK
|
|
|
+@@ -455,7 +455,7 @@ __condvar_cleanup1:
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ subq $cond_nwaiters, %rdi
|
|
|
+ movl $1, %ecx
|
|
|
+
|
|
|
+@@ -493,7 +493,7 @@ __condvar_cleanup1:
|
|
|
+ orl $FUTEX_WAKE, %esi
|
|
|
+ #endif
|
|
|
+ movl $SYS_futex, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* Lock the mutex only if we don't own it already. This only happens
|
|
|
+ in case of PI mutexes, if we got cancelled after a successful
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/setcontext.S b/sysdeps/unix/sysv/linux/x86_64/setcontext.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/setcontext.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/setcontext.S
|
|
|
+@@ -43,7 +43,7 @@ ENTRY(__setcontext)
|
|
|
+ movl $SIG_SETMASK, %edi
|
|
|
+ movl $_NSIG8,%r10d
|
|
|
+ movl $__NR_rt_sigprocmask, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ popq %rdi /* Reload %rdi, adjust stack. */
|
|
|
+ cfi_adjust_cfa_offset(-8)
|
|
|
+ cmpq $-4095, %rax /* Check %rax for error. */
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/sigaction.c b/sysdeps/unix/sysv/linux/x86_64/sigaction.c
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/sigaction.c
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/sigaction.c
|
|
|
+@@ -120,7 +120,7 @@ asm \
|
|
|
+ " .type __" #name ",@function\n" \
|
|
|
+ "__" #name ":\n" \
|
|
|
+ " movq $" #syscall ", %rax\n" \
|
|
|
+- " syscall\n" \
|
|
|
++ SYSCALLDB_ASM \
|
|
|
+ ".LEND_" #name ":\n" \
|
|
|
+ ".section .eh_frame,\"a\",@progbits\n" \
|
|
|
+ ".LSTARTFRAME_" #name ":\n" \
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/swapcontext.S b/sysdeps/unix/sysv/linux/x86_64/swapcontext.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/swapcontext.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/swapcontext.S
|
|
|
+@@ -75,7 +75,7 @@ ENTRY(__swapcontext)
|
|
|
+ movl $SIG_SETMASK, %edi
|
|
|
+ movl $_NSIG8,%r10d
|
|
|
+ movl $__NR_rt_sigprocmask, %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+ cmpq $-4095, %rax /* Check %rax for error. */
|
|
|
+ jae SYSCALL_ERROR_LABEL /* Jump to error handler if error. */
|
|
|
+
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/syscall.S b/sysdeps/unix/sysv/linux/x86_64/syscall.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/syscall.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/syscall.S
|
|
|
+@@ -34,7 +34,7 @@ ENTRY (syscall)
|
|
|
+ movq %r8, %r10
|
|
|
+ movq %r9, %r8
|
|
|
+ movq 8(%rsp),%r9 /* arg6 is on the stack. */
|
|
|
+- syscall /* Do the system call. */
|
|
|
++ SYSCALLDB /* Do the system call. */
|
|
|
+ cmpq $-4095, %rax /* Check %rax for error. */
|
|
|
+ jae SYSCALL_ERROR_LABEL /* Jump to error handler if error. */
|
|
|
+ ret /* Return to caller. */
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/sysdep.h b/sysdeps/unix/sysv/linux/x86_64/sysdep.h
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/sysdep.h
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/sysdep.h
|
|
|
+@@ -22,6 +22,7 @@
|
|
|
+ #include <sysdeps/unix/sysv/linux/sysdep.h>
|
|
|
+ #include <sysdeps/unix/x86_64/sysdep.h>
|
|
|
+ #include <tls.h>
|
|
|
++#include "syscalldb.h"
|
|
|
+
|
|
|
+ #if IS_IN (rtld)
|
|
|
+ # include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */
|
|
|
+@@ -177,7 +178,7 @@
|
|
|
+ # define DO_CALL(syscall_name, args) \
|
|
|
+ DOARGS_##args \
|
|
|
+ movl $SYS_ify (syscall_name), %eax; \
|
|
|
+- syscall;
|
|
|
++ SYSCALLDB;
|
|
|
+
|
|
|
+ # define DOARGS_0 /* nothing */
|
|
|
+ # define DOARGS_1 /* nothing */
|
|
|
+@@ -191,9 +192,20 @@
|
|
|
+ /* Define a macro which expands inline into the wrapper code for a system
|
|
|
+ call. */
|
|
|
+ # undef INLINE_SYSCALL
|
|
|
+-# define INLINE_SYSCALL(name, nr, args...) \
|
|
|
++# define INLINE_SYSCALL(name, nr_args...) \
|
|
|
+ ({ \
|
|
|
+- unsigned long int resultvar = INTERNAL_SYSCALL (name, , nr, args); \
|
|
|
++ unsigned long int resultvar = INTERNAL_SYSCALL (name, , ##nr_args); \
|
|
|
++ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
|
|
|
++ { \
|
|
|
++ __set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
|
|
|
++ resultvar = (unsigned long int) -1; \
|
|
|
++ } \
|
|
|
++ (long int) resultvar; })
|
|
|
++
|
|
|
++# undef INLINE_SYSCALL_ASM
|
|
|
++# define INLINE_SYSCALL_ASM(name, nr_args...) \
|
|
|
++ ({ \
|
|
|
++ unsigned long int resultvar = INTERNAL_SYSCALL_ASM (name, , ##nr_args); \
|
|
|
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
|
|
|
+ { \
|
|
|
+ __set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
|
|
|
+@@ -205,9 +217,9 @@
|
|
|
+ into the wrapper code for a system call. It should be used when size
|
|
|
+ of any argument > size of long int. */
|
|
|
+ # undef INLINE_SYSCALL_TYPES
|
|
|
+-# define INLINE_SYSCALL_TYPES(name, nr, args...) \
|
|
|
++# define INLINE_SYSCALL_TYPES(name, nr_args...) \
|
|
|
+ ({ \
|
|
|
+- unsigned long int resultvar = INTERNAL_SYSCALL_TYPES (name, , nr, args); \
|
|
|
++ unsigned long int resultvar = INTERNAL_SYSCALL_TYPES (name, , ##nr_args); \
|
|
|
+ if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (resultvar, ))) \
|
|
|
+ { \
|
|
|
+ __set_errno (INTERNAL_SYSCALL_ERRNO (resultvar, )); \
|
|
|
+@@ -227,13 +239,19 @@
|
|
|
+ LOAD_ARGS_##nr (args) \
|
|
|
+ LOAD_REGS_##nr \
|
|
|
+ asm volatile ( \
|
|
|
+- "syscall\n\t" \
|
|
|
++ SYSCALLDB \
|
|
|
+ : "=a" (resultvar) \
|
|
|
+ : "0" (name) ASM_ARGS_##nr : "memory", REGISTERS_CLOBBERED_BY_SYSCALL); \
|
|
|
+ (long int) resultvar; })
|
|
|
++# define INTERNAL_SYSCALL_NCS_ASM INTERNAL_SYSCALL_NCS
|
|
|
++
|
|
|
+ # undef INTERNAL_SYSCALL
|
|
|
+-# define INTERNAL_SYSCALL(name, err, nr, args...) \
|
|
|
+- INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args)
|
|
|
++# define INTERNAL_SYSCALL(name, err, nr_args...) \
|
|
|
++ INTERNAL_SYSCALL_NCS (__NR_##name, err, ##nr_args)
|
|
|
++
|
|
|
++# undef INTERNAL_SYSCALL_ASM
|
|
|
++# define INTERNAL_SYSCALL_ASM(name, err, nr_args...) \
|
|
|
++ INTERNAL_SYSCALL_NCS_ASM (__NR_##name, err, ##nr_args)
|
|
|
+
|
|
|
+ # define INTERNAL_SYSCALL_NCS_TYPES(name, err, nr, args...) \
|
|
|
+ ({ \
|
|
|
+@@ -241,7 +259,7 @@
|
|
|
+ LOAD_ARGS_TYPES_##nr (args) \
|
|
|
+ LOAD_REGS_TYPES_##nr (args) \
|
|
|
+ asm volatile ( \
|
|
|
+- "syscall\n\t" \
|
|
|
++ SYSCALLDB \
|
|
|
+ : "=a" (resultvar) \
|
|
|
+ : "0" (name) ASM_ARGS_##nr : "memory", REGISTERS_CLOBBERED_BY_SYSCALL); \
|
|
|
+ (long int) resultvar; })
|
|
|
+diff -ruNp a/sysdeps/unix/sysv/linux/x86_64/vfork.S b/sysdeps/unix/sysv/linux/x86_64/vfork.S
|
|
|
+--- a/sysdeps/unix/sysv/linux/x86_64/vfork.S
|
|
|
++++ b/sysdeps/unix/sysv/linux/x86_64/vfork.S
|
|
|
+@@ -46,7 +46,7 @@ ENTRY (__vfork)
|
|
|
+
|
|
|
+ /* Stuff the syscall number in RAX and enter into the kernel. */
|
|
|
+ movl $SYS_ify (vfork), %eax
|
|
|
+- syscall
|
|
|
++ SYSCALLDB
|
|
|
+
|
|
|
+ /* Push back the return PC. */
|
|
|
+ pushq %rdi
|
|
|
+diff -ruNp a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
|
|
|
+--- a/sysdeps/x86_64/dl-machine.h
|
|
|
++++ b/sysdeps/x86_64/dl-machine.h
|
|
|
+@@ -554,7 +554,8 @@ elf_machine_lazy_rel (struct link_map *m
|
|
|
+ value = ((ElfW(Addr) (*) (void)) value) ();
|
|
|
+ *reloc_addr = value;
|
|
|
+ }
|
|
|
+- else
|
|
|
++ /* for graphene, get around R_X86_64_NONE */
|
|
|
++ else if (__builtin_expect (r_type != R_X86_64_NONE, 1))
|
|
|
+ _dl_reloc_bad_type (map, r_type, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+diff -ruNp a/sysdeps/x86_64/nptl/tls.h b/sysdeps/x86_64/nptl/tls.h
|
|
|
+--- a/sysdeps/x86_64/nptl/tls.h
|
|
|
++++ b/sysdeps/x86_64/nptl/tls.h
|
|
|
+@@ -28,6 +28,8 @@
|
|
|
+ # include <sysdep.h>
|
|
|
+ # include <libc-internal.h>
|
|
|
+ # include <kernel-features.h>
|
|
|
++# include <shim_tls.h>
|
|
|
++# include <syscalldb.h>
|
|
|
+
|
|
|
+ /* Replacement type for __m128 since this file is included by ld.so,
|
|
|
+ which is compiled with -mno-sse. It must not change the alignment
|
|
|
+@@ -67,6 +69,10 @@ typedef struct
|
|
|
+ # else
|
|
|
+ int __glibc_reserved1;
|
|
|
+ # endif
|
|
|
++
|
|
|
++ shim_tcb_t shim_tcb; /* For graphene, we allocate a shim_tcb
|
|
|
++ in the real tcb. */
|
|
|
++
|
|
|
+ int __glibc_unused1;
|
|
|
+ /* Reservation of some values for the TM ABI. */
|
|
|
+ void *__private_tm[4];
|
|
|
+@@ -138,7 +144,6 @@ typedef struct
|
|
|
+ # define GET_DTV(descr) \
|
|
|
+ (((tcbhead_t *) (descr))->dtv)
|
|
|
+
|
|
|
+-
|
|
|
+ /* Code to initially initialize the thread pointer. This might need
|
|
|
+ special attention since 'errno' is not yet available and if the
|
|
|
+ operation can cause a failure 'errno' must not be touched.
|
|
|
+@@ -155,7 +160,7 @@ typedef struct
|
|
|
+ _head->self = _thrdescr; \
|
|
|
+ \
|
|
|
+ /* It is a simple syscall to set the %fs value for the thread. */ \
|
|
|
+- asm volatile ("syscall" \
|
|
|
++ asm volatile (SYSCALLDB \
|
|
|
+ : "=a" (_result) \
|
|
|
+ : "0" ((unsigned long int) __NR_arch_prctl), \
|
|
|
+ "D" ((unsigned long int) ARCH_SET_FS), \
|