|
@@ -364,6 +364,39 @@ enclave_entry:
|
|
|
movq $0, %gs:SGX_OCALL_PREPARED
|
|
|
4:
|
|
|
|
|
|
+ # The last instructions of _restore_sgx_context need to be atomic for
|
|
|
+ # the code below (see _restore_sgx_context for more details). So
|
|
|
+ # emulate this if we were interrupted there.
|
|
|
+ leaq .Ltmp_rip_saved0(%rip), %rax
|
|
|
+ cmpq %rax, SGX_GPR_RIP(%rbx)
|
|
|
+ je .Lemulate_tmp_rip_saved0
|
|
|
+
|
|
|
+ leaq .Ltmp_rip_saved1(%rip), %rax
|
|
|
+ cmpq %rax, SGX_GPR_RIP(%rbx)
|
|
|
+ je .Lemulate_tmp_rip_saved1
|
|
|
+
|
|
|
+ leaq .Ltmp_rip_saved2(%rip), %rax
|
|
|
+ cmpq %rax, SGX_GPR_RIP(%rbx)
|
|
|
+ je .Lemulate_tmp_rip_saved2
|
|
|
+
|
|
|
+ jmp .Lemulate_tmp_rip_end
|
|
|
+
|
|
|
+.Lemulate_tmp_rip_saved0:
|
|
|
+ # emulate movq SGX_CPU_CONTEXT_R15 - SGX_CPU_CONTEXT_RIP(%rsp), %r15
|
|
|
+ movq SGX_GPR_RSP(%rbx), %rax
|
|
|
+ movq SGX_CPU_CONTEXT_R15 - SGX_CPU_CONTEXT_RIP(%rax), %rax
|
|
|
+ movq %rax, SGX_GPR_R15(%rbx)
|
|
|
+.Lemulate_tmp_rip_saved1:
|
|
|
+ # emulate movq SGX_CPU_CONTEXT_RSP - SGX_CPU_CONTEXT_RIP(%rsp), %rsp
|
|
|
+ movq SGX_GPR_RSP(%rbx), %rax
|
|
|
+ movq SGX_CPU_CONTEXT_RSP - SGX_CPU_CONTEXT_RIP(%rax), %rax
|
|
|
+ movq %rax, SGX_GPR_RSP(%rbx)
|
|
|
+.Lemulate_tmp_rip_saved2:
|
|
|
+ # emulate jmp *%gs:SGX_TMP_RIP
|
|
|
+ movq %gs:SGX_TMP_RIP, %rax
|
|
|
+ movq %rax, SGX_GPR_RIP(%rbx)
|
|
|
+.Lemulate_tmp_rip_end:
|
|
|
+
|
|
|
movq SGX_GPR_RSP(%rbx), %rsi
|
|
|
subq $(SGX_CPU_CONTEXT_SIZE + RED_ZONE_SIZE), %rsi
|
|
|
|
|
@@ -680,52 +713,74 @@ __morestack:
|
|
|
popq %rbp
|
|
|
retq
|
|
|
|
|
|
-/*
|
|
|
- * Restore an sgx_cpu_context_t as generated by .Lhandle_exception. Execution will
|
|
|
- * continue as specified by the rip in the context.
|
|
|
- *
|
|
|
- * It is required that:
|
|
|
- *
|
|
|
- * %rdi == *(%rdi + SGX_CPU_CONTEXT_RSP) - (SGX_CPU_CONTEXT_SIZE + RED_ZONE_SIZE)
|
|
|
- *
|
|
|
- * This holds for the original sgx_context allocated by .Lhandle_exception.
|
|
|
- * restore_sgx_context is a safe wrapper which checks this.
|
|
|
- */
|
|
|
+ # noreturn void _restore_sgx_context(sgx_cpu_context_t* uc);
|
|
|
+ # Restore an sgx_cpu_context_t as generated by .Lhandle_exception. Execution will
|
|
|
+ # continue as specified by the rip in the context.
|
|
|
+ # If RDI (uc) points into the signal stack we need to ensure that
|
|
|
+ # until the last read from there RSP points there or
|
|
|
+ # .Lsetup_exception_handler might mess with it because it would think
|
|
|
+ # that the signal stack is not in use. In this case we assume that RSP
|
|
|
+ # points into the signal stack when we get called.
|
|
|
+ # (Also keep the redzone in mind, see asserts for sgx_cpu_context_t in sgx_arch.h)
|
|
|
.global _restore_sgx_context
|
|
|
.type _restore_sgx_context, @function
|
|
|
-
|
|
|
_restore_sgx_context:
|
|
|
- movq SGX_CPU_CONTEXT_RAX(%rdi), %rax
|
|
|
- movq SGX_CPU_CONTEXT_RCX(%rdi), %rcx
|
|
|
- movq SGX_CPU_CONTEXT_RDX(%rdi), %rdx
|
|
|
- movq SGX_CPU_CONTEXT_RBX(%rdi), %rbx
|
|
|
+ .cfi_startproc
|
|
|
+ movq %rdi, %r15
|
|
|
+
|
|
|
+ movq SGX_CPU_CONTEXT_RAX(%r15), %rax
|
|
|
+ movq SGX_CPU_CONTEXT_RCX(%r15), %rcx
|
|
|
+ movq SGX_CPU_CONTEXT_RDX(%r15), %rdx
|
|
|
+ movq SGX_CPU_CONTEXT_RBX(%r15), %rbx
|
|
|
# For %rsp see below.
|
|
|
- movq SGX_CPU_CONTEXT_RBP(%rdi), %rbp
|
|
|
- movq SGX_CPU_CONTEXT_RSI(%rdi), %rsi
|
|
|
- # For %rdi see below.
|
|
|
- movq SGX_CPU_CONTEXT_R8(%rdi), %r8
|
|
|
- movq SGX_CPU_CONTEXT_R9(%rdi), %r9
|
|
|
- movq SGX_CPU_CONTEXT_R10(%rdi), %r10
|
|
|
- movq SGX_CPU_CONTEXT_R11(%rdi), %r11
|
|
|
- movq SGX_CPU_CONTEXT_R12(%rdi), %r12
|
|
|
- movq SGX_CPU_CONTEXT_R13(%rdi), %r13
|
|
|
- movq SGX_CPU_CONTEXT_R14(%rdi), %r14
|
|
|
- movq SGX_CPU_CONTEXT_R15(%rdi), %r15
|
|
|
-
|
|
|
- # We need to make sure that %rsp - RED_ZONE_SIZE never points above
|
|
|
- # anything we still need. Otherwise .Lhandle_exception might mess with
|
|
|
- # it. SGX_CPU_CONTEXT_RDI - SGX_CPU_CONTEXT_RFLAGS <= RED_ZONE_SIZE, see
|
|
|
- # sgx_arch.h.
|
|
|
- leaq SGX_CPU_CONTEXT_RFLAGS(%rdi), %rsp
|
|
|
- popfq # remember to not touch any flags after here
|
|
|
-
|
|
|
- movq SGX_CPU_CONTEXT_RDI(%rdi), %rdi
|
|
|
- # Now %rdi is restored so we need to use the stack to access the
|
|
|
- # context.
|
|
|
-
|
|
|
- # Now pop %rip and fix stack pointer in one operation (to avoid
|
|
|
- # problems with nesting, see comment above). SGX_CPU_CONTEXT_RIP is
|
|
|
- # directly after SGX_CPU_CONTEXT_RFLAGS, see sgx_arch.h. Note that retq
|
|
|
- # decreases %rsp by 8 for the popped %rip additionally to the passed
|
|
|
- # offset.
|
|
|
- retq $(SGX_CPU_CONTEXT_SIZE + RED_ZONE_SIZE - SGX_CPU_CONTEXT_RIP - 8)
|
|
|
+ movq SGX_CPU_CONTEXT_RBP(%r15), %rbp
|
|
|
+ movq SGX_CPU_CONTEXT_RSI(%r15), %rsi
|
|
|
+ movq SGX_CPU_CONTEXT_RDI(%r15), %rdi
|
|
|
+ movq SGX_CPU_CONTEXT_R8(%r15), %r8
|
|
|
+ movq SGX_CPU_CONTEXT_R9(%r15), %r9
|
|
|
+ movq SGX_CPU_CONTEXT_R10(%r15), %r10
|
|
|
+ movq SGX_CPU_CONTEXT_R11(%r15), %r11
|
|
|
+ movq SGX_CPU_CONTEXT_R12(%r15), %r12
|
|
|
+ movq SGX_CPU_CONTEXT_R13(%r15), %r13
|
|
|
+ movq SGX_CPU_CONTEXT_R14(%r15), %r14
|
|
|
+ # R15 will be restored below
|
|
|
+
|
|
|
+ leaq SGX_CPU_CONTEXT_RFLAGS(%r15), %rsp
|
|
|
+ popfq
|
|
|
+
|
|
|
+ # See the comment at .Lsetup_exception_handler.
|
|
|
+ #
|
|
|
+ # The use of SGX_TMP_RIP (enclave_tls::tmp_rip per-enclave-thread field) must be atomic.
|
|
|
+ # Consider a data race:
|
|
|
+ # (1) thread handles a previous exception in SSA=0,
|
|
|
+ # (2) thread is done and returns from exception handler via restore_sgx_context(),
|
|
|
+ # (3) in the middle of _restore_sgx_context() a new exception arrives,
|
|
|
+ # (4) the exception handler for this new exception is prepared in SSA=1,
|
|
|
+ # (5) thread returns back to SSA=0 and handles this new exception,
|
|
|
+ # (6) thread is done and returns from exception handler via _restore_sgx_context()
|
|
|
+ # and updates SGX_TMP_RIP (overwrites enclave_tls::tmp_rip). Now the thread returned in
|
|
|
+ # the middle of _restore_sgx_context() and will try to jmp *%gs:SGX_TMP_RIP but this value
|
|
|
+ # is lost, and SIGILL/SEGFAULT follows.
|
|
|
+ #
|
|
|
+ # The last 4 instructions that restore RIP, RSP and R15 (needed
|
|
|
+ # as tmp reg) need to be atomic from the point of view of
|
|
|
+ # .Lsetup_exception_handler.
|
|
|
+ #
|
|
|
+ # The reason is that .Lsetup_exception_handler can interrupt us in the
|
|
|
+ # middle and the nested exception handler that it injects would mess
|
|
|
+ # with %gs:SGX_TMP_RIP when it calls us to return (%gs:SGX_TMP_RIP is a
|
|
|
+ # single memory location per thread, so not re-entry save).
|
|
|
+ #
|
|
|
+ # Since they are not atomic, .Lsetup_exception_handler will emulate this
|
|
|
+ # behavior if it gets called while executing them (see there).
|
|
|
+
|
|
|
+ # RSP currently points to RIP so need relative addressing to restore RIP, R15, and RSP
|
|
|
+ movq SGX_CPU_CONTEXT_RIP - SGX_CPU_CONTEXT_RIP(%rsp), %r15
|
|
|
+ movq %r15, %gs:SGX_TMP_RIP
|
|
|
+.Ltmp_rip_saved0:
|
|
|
+ movq SGX_CPU_CONTEXT_R15 - SGX_CPU_CONTEXT_RIP(%rsp), %r15
|
|
|
+.Ltmp_rip_saved1:
|
|
|
+ movq SGX_CPU_CONTEXT_RSP - SGX_CPU_CONTEXT_RIP(%rsp), %rsp
|
|
|
+.Ltmp_rip_saved2:
|
|
|
+ jmp *%gs:SGX_TMP_RIP
|
|
|
+ .cfi_endproc
|