Skip to content
This repository has been archived by the owner on Jan 20, 2022. It is now read-only.

Adding dynamic memory management features (EDMM) with SGX2 supports #234

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Pal/src/host/Linux-SGX/ecall_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
enum {
ECALL_ENCLAVE_START = 0,
ECALL_THREAD_START,
ECALL_STACK_EXPAND,
ECALL_THREAD_SETUP,
ECALL_THREAD_CREATE,
ECALL_NR,
};

Expand Down
125 changes: 122 additions & 3 deletions Pal/src/host/Linux-SGX/enclave_ecalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,111 @@ void pal_start_thread (void);

extern void * enclave_base, * enclave_top;

struct thread_map {
unsigned int tid;
unsigned int thread_index;
unsigned int status;
sgx_arch_tcs_t * tcs;
unsigned long tcs_addr;
unsigned long ssa_addr;
unsigned long tls_addr;
unsigned long aux_stack_addr; /* only applicable to EDMM */
unsigned long enclave_entry;
};

/* pal_expand_stack grows the stack dynamically under EDMM mode,
* the growing strategy is (1) commit EPC pages to the space between
* fault address and the current stack top; (2) commit one more EPC
* page below the fault address for future stack grow

* fault_addr: the address where causing #PF by push instructions
*/
void pal_expand_stack(unsigned long fault_addr)
{
unsigned long stack_commit_top = GET_ENCLAVE_TLS(stack_commit_top);
unsigned long accept_flags = SGX_SECINFO_FLAGS_R | SGX_SECINFO_FLAGS_W |
SGX_SECINFO_FLAGS_REG | SGX_SECINFO_FLAGS_PENDING;
unsigned long stack_init_addr = GET_ENCLAVE_TLS(initial_stack_offset);
unsigned long end_addr = fault_addr - PRESET_PAGESIZE;

SGX_DBG(DBG_M, "fault_addr, stack_commit_top, stack_init_addr: %p, %p, %p\n",
fault_addr, stack_commit_top, stack_init_addr);
if (fault_addr < (stack_init_addr - ENCLAVE_STACK_SIZE * PRESET_PAGESIZE)) {
SGX_DBG(DBG_E, "stack overrun, stop!\n");
return ;
}
/* Bridge the gap between fault addr and top if any */
sgx_accept_pages(accept_flags, fault_addr, stack_commit_top, 0);

stack_commit_top = fault_addr;

/* Overgrow one more page */
if (end_addr >= stack_init_addr - ENCLAVE_STACK_SIZE * PRESET_PAGESIZE) {
sgx_accept_pages(accept_flags, end_addr, fault_addr, 0);
stack_commit_top = fault_addr;
}

}

/* This function setup the pages necessary for runing a thread including:
* (1) SSAs (2) TLS (3)TCS (4) Stack
* ecall_args: pointer to the thread-dependent information for setup the new thread
*/
void pal_thread_setup(void * ecall_args){
struct thread_map * thread_info = (struct thread_map *)ecall_args;
unsigned long regular_flags = SGX_SECINFO_FLAGS_R | SGX_SECINFO_FLAGS_W |
SGX_SECINFO_FLAGS_REG | SGX_SECINFO_FLAGS_PENDING;
SGX_DBG(DBG_M, "the created thread using tcs at %p, tls at %p, ssa at %p\n",
thread_info->tcs_addr, thread_info->tls_addr, thread_info->ssa_addr);
sgx_accept_pages(regular_flags, thread_info->tcs_addr, thread_info->tcs_addr + PRESET_PAGESIZE, 0);
sgx_accept_pages(regular_flags, thread_info->tls_addr, thread_info->tls_addr + PRESET_PAGESIZE, 0);
sgx_accept_pages(regular_flags, thread_info->ssa_addr, thread_info->ssa_addr + 2 * PRESET_PAGESIZE, 0);

// Setup TLS
struct enclave_tls* tls = (struct enclave_tls *) thread_info->tls_addr;
tls->enclave_size = GET_ENCLAVE_TLS(enclave_size);
tls->tcs_offset = thread_info->tcs_addr;

unsigned long stack_gap = thread_info->thread_index * (ENCLAVE_STACK_SIZE + PRESET_PAGESIZE); // There is a gap between stacks
tls->initial_stack_offset = GET_ENCLAVE_TLS(initial_stack_offset) - stack_gap;

tls->ssa = (void *)thread_info->ssa_addr;
tls->gpr = tls->ssa + PRESET_PAGESIZE - sizeof(sgx_arch_gpr_t);
tls->aux_stack_offset = thread_info->aux_stack_addr;
tls->stack_commit_top = tls->initial_stack_offset;
tls->ocall_pending = 0;

// Setup TCS
thread_info->tcs = (sgx_arch_tcs_t *) thread_info->tcs_addr;
memset((void*)thread_info->tcs_addr, 0, PRESET_PAGESIZE);
thread_info->tcs->ossa = thread_info->ssa_addr;
thread_info->tcs->nssa = 2;
thread_info->tcs->oentry = thread_info->enclave_entry;
thread_info->tcs->ofsbasgx = 0;
thread_info->tcs->ogsbasgx = thread_info->tls_addr;
thread_info->tcs->fslimit = 0xfff;
thread_info->tcs->gslimit = 0xfff;

// PRE-ALLOCATE two pages for STACK
unsigned long accept_flags = SGX_SECINFO_FLAGS_R | SGX_SECINFO_FLAGS_W |
SGX_SECINFO_FLAGS_REG | SGX_SECINFO_FLAGS_PENDING;

sgx_accept_pages(accept_flags, tls->initial_stack_offset - 2 * PRESET_PAGESIZE, tls->initial_stack_offset, 0);
}

/* pal_thread_create finalizes the creataion of thread by changing
* the type of tcs page from regular to TCS
* ecall_args: the tcs page address to be TCS type
*/
void pal_thread_create(void * ecall_args){
struct thread_map * thread_info = (struct thread_map *)ecall_args;
unsigned long tcs_flags = SGX_SECINFO_FLAGS_TCS | SGX_SECINFO_FLAGS_MODIFIED;

int rs = sgx_accept_pages(tcs_flags, thread_info->tcs_addr, thread_info->tcs_addr + PRESET_PAGESIZE, 0);
if (rs != 0) SGX_DBG(DBG_E, "EACCEPT TCS Change failed: %d\n", rs);
}

/* handle_ecall is the main entry of all ecall functions */
int handle_ecall (long ecall_index, void * ecall_args, void * exit_target,
void * untrusted_stack, void * enclave_base_addr)
{
Expand All @@ -37,7 +142,8 @@ int handle_ecall (long ecall_index, void * ecall_args, void * exit_target,
SET_ENCLAVE_TLS(exit_target, exit_target);
SET_ENCLAVE_TLS(ustack_top, untrusted_stack);
SET_ENCLAVE_TLS(ustack, untrusted_stack);

SET_ENCLAVE_TLS(ocall_pending, 0);

switch(ecall_index) {
case ECALL_ENCLAVE_START: {
ms_ecall_enclave_start_t * ms =
Expand All @@ -47,14 +153,27 @@ int handle_ecall (long ecall_index, void * ecall_args, void * exit_target,

pal_linux_main(ms->ms_arguments, ms->ms_environments,
ms->ms_sec_info);
ocall_exit();
break;
}

case ECALL_THREAD_START:
pal_start_thread();
ocall_exit();
break;
case ECALL_STACK_EXPAND:
pal_expand_stack((unsigned long)ecall_args);
break;
case ECALL_THREAD_SETUP:
pal_thread_setup(ecall_args);
break;
case ECALL_THREAD_CREATE:
pal_thread_create(ecall_args);
break;
default:
SGX_DBG(DBG_E, "Ecall error, invalid ecall index!\n");
ocall_exit();
}

ocall_exit();

return 0;
}
110 changes: 102 additions & 8 deletions Pal/src/host/Linux-SGX/enclave_entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,19 @@ enclave_entry:

# current SSA is in RAX (Trusted)
cmp $0, %rax
jne .Lhandle_resume
je .Lnormal_enter

# Exception for growing stacks
cmp $2, %rdi
je .Lnormal_enter

# Not OCALL made in Exception context
cmp $1, %gs:SGX_OCALL_PENDING
jne .Lhandle_resume

# TCS is in RBX (Trusted)

.Lnormal_enter:
# AEP address in RCX (Trusted)
mov %rcx, %gs:SGX_AEP

Expand All @@ -35,9 +44,7 @@ enclave_entry:
# from a OCALL in the untrusted PAL. Attackers can manipulate RDI
# to deceive the trusted PAL.

# A safe design: check if %gs:SGX_EXIT_TARGET is ever assigned
mov %gs:SGX_EXIT_TARGET, %rcx
cmp $0, %rcx
cmp $0, %gs:SGX_OCALL_PENDING
jne .Lreturn_from_ocall

# PAL convention:
Expand All @@ -54,10 +61,22 @@ enclave_entry:
# push untructed stack address to RCX
mov %rsp, %rcx

# handle stack grow, using auxiliary stack
cmp $2, %rdi
je .Lhandle_aux_stack

# setup thread context, using auxiliary stack
cmp $3, %rdi
je .Lhandle_aux_stack

cmp $4, %rdi
je .Lhandle_aux_stack

# switch to enclve stack: enclave base + %gs:SGX_INITIAL_STACK_OFFSET
add %gs:SGX_INITIAL_STACK_OFFSET, %rbx
mov %rbx, %rsp

.Ldo_handle_ecall:
# clear the rest of register states
xor %rax, %rax
xor %rbx, %rbx
Expand All @@ -72,12 +91,25 @@ enclave_entry:
# register states need to be carefully checked, so we move the handling
# to handle_ecall() in enclave_ecalls.c
call handle_ecall

# never return to this point (should die)

# ecall_enclave_start/ecall_thread_start never return to this point
# other ecalls return here
xor %rdi, %rdi
xor %rsi, %rsi
jmp .Leexit

.Lhandle_aux_stack:
add %gs:SGX_AUX_STACK_OFFSET, %rbx
mov %rbx, %rsp

# In exception handler, ecall returns to the next
# RCX contains the return address, however occupied
# Using SGX_AEP instead

mov %gs:SGX_AEP, %r9
mov %r9, %gs:SGX_ECALL_RET_TARGET
jmp .Ldo_handle_ecall

.Lhandle_resume:
# PAL convention:
# RDI - external event
Expand Down Expand Up @@ -233,7 +265,9 @@ sgx_ocall:

push %rbp
mov %rsp, %gs:SGX_STACK


# ocall sets ocall_pending before exit
movq $1, %gs:SGX_OCALL_PENDING
jmp .Leexit

.Lexception_handler:
Expand All @@ -251,20 +285,34 @@ sgx_ocall:
xor %rbp, %rbp

mov %gs:SGX_USTACK, %rsp
and $STACK_ALIGN, %rsp

# If it's an ecall return using
# ecall_ret_target as exit target
cmp $1, %gs:SGX_OCALL_PENDING
jne .Lecall_return_setup

mov %gs:SGX_EXIT_TARGET, %rbx
mov %gs:SGX_AEP, %rcx

.Lexecute_exit:
mov $EEXIT, %rax
ENCLU

.Lecall_return_setup:
mov %gs:SGX_ECALL_RET_TARGET, %rbx
mov %gs:SGX_AEP, %rcx
jmp .Lexecute_exit

.Lreturn_from_ocall:
# PAL convention:
# RDI - return value
# RSI - external event (if there is any)

mov %rdi, %rax

# clear ocall_pending
movq $0, %gs:SGX_OCALL_PENDING

# restore FSBASE if necessary
mov %gs:SGX_FSBASE, %rbx
cmp $0, %rbx
Expand Down Expand Up @@ -353,6 +401,52 @@ sgx_getkey:
.cfi_endproc
.size sgx_getkey, .-sgx_getkey

/*
* sgx_accept:
* EACCEPT pages for dynamic memory management
*/
.global sgx_accept
.type sgx_accept, @function

sgx_accept:
.cfi_startproc

push %rbx
push %rcx
mov %rdi, %rbx
mov %rsi, %rcx
mov $EACCEPT, %rax
ENCLU
pop %rcx
pop %rbx
ret

.cfi_endproc
.size sgx_accept, .-sgx_accept

/*
* sgx_modpe:
* EMODPE pages for dynamic memory management
*/
.global sgx_modpe
.type sgx_modpe, @function

sgx_modpe:
.cfi_startproc

push %rbx
push %rcx
mov %rdi, %rbx
mov %rsi, %rcx
mov $EMODPE, %rax
ENCLU
pop %rcx
pop %rbx
ret

.cfi_endproc
.size sgx_modpe, .-sgx_modpe

/*
* rdrand:
* Get hardware generated random value.
Expand Down
34 changes: 34 additions & 0 deletions Pal/src/host/Linux-SGX/enclave_framework.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,40 @@ int sgx_verify_report (sgx_arch_report_t * report)
return 0;
}


#define SE_DECLSPEC_ALIGN(x) __attribute__((aligned(x)))

/* sgx_accept_pages do EACCEPT on the pages from address lo to address hi */
int sgx_accept_pages(uint64_t sfl, size_t lo, size_t hi, bool executable)
{
size_t addr = hi;
SE_DECLSPEC_ALIGN(sizeof(sgx_arch_secinfo_t)) sgx_arch_secinfo_t si;
si.flags = sfl;

for (uint16_t i = 0; i < (sizeof(si.reserved)/sizeof(si.reserved[0])); i++)
si.reserved[i] = 0;

SGX_DBG(DBG_M, "sgx_accept_pages: %p - %p, executable: %d \n", lo, hi, executable);
SE_DECLSPEC_ALIGN(sizeof(sgx_arch_secinfo_t)) sgx_arch_secinfo_t smi = si;
smi.flags |= SGX_SECINFO_FLAGS_X;

while (lo < addr)
{
addr -= PRESET_PAGESIZE;
int rc = sgx_accept(&si, (const void *)addr);

/* FIXME: Need a better handle here, adding the flow for checking multiple EACCEPT on the same page */
if (rc != 0) {
// SGX_DBG(DBG_E, "eaccept fails: %d\n", rc);
// return rc;
continue;
}
if (executable)
rc = sgx_modpe(&smi, (const void *)addr);
}
return 0;
}

int init_enclave_key (void)
{
sgx_arch_keyrequest_t keyrequest;
Expand Down
Loading