Skip to content

Commit

Permalink
Split vcpu lock
Browse files Browse the repository at this point in the history
Change-Id: I24e714a3db3c873860f0dab5a424e5c3e585eb0c
  • Loading branch information
jeehoonkang authored and efenniht committed Apr 15, 2020
1 parent 287cdf6 commit 927c334
Show file tree
Hide file tree
Showing 9 changed files with 85 additions and 91 deletions.
33 changes: 16 additions & 17 deletions inc/hf/vcpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,6 @@ enum vcpu_state {
/** The vCPU is ready to be run. */
VCPU_STATE_READY,

/** The vCPU is currently running. */
VCPU_STATE_RUNNING,

/** The vCPU is waiting for a message. */
VCPU_STATE_BLOCKED_MAILBOX,

Expand Down Expand Up @@ -65,7 +62,16 @@ struct vcpu_fault_info {
};

struct vcpu {
struct spinlock lock;
/*
* Protects accesses to vCPU's state and architecture registers. If a
* vCPU is running, its execution lock is logically held by the
* running pCPU.
*/
struct spinlock execution_lock;
/*
* Protects accesses to vCPU's interrupts.
*/
struct spinlock interrupts_lock;

/*
* The state is only changed in the context of the vCPU being run. This
Expand All @@ -78,26 +84,19 @@ struct vcpu {
struct vm *vm;
struct arch_regs regs;
struct interrupts interrupts;

/*
* Determine whether the 'regs' field is available for use. This is set
* to false when a vCPU is about to run on a physical CPU, and is set
* back to true when it is descheduled.
*/
bool regs_available;
};

/** Encapsulates a vCPU whose lock is held. */
struct vcpu_locked {
/** Encapsulates a vCPU whose execution lock is held. */
struct vcpu_execution_locked {
struct vcpu *vcpu;
};

struct vcpu_locked vcpu_lock(struct vcpu *vcpu);
void vcpu_unlock(struct vcpu_locked *locked);
struct vcpu_execution_locked vcpu_lock(struct vcpu *vcpu);
void vcpu_unlock(struct vcpu_execution_locked *locked);
void vcpu_init(struct vcpu *vcpu, struct vm *vm);
void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg);
void vcpu_on(struct vcpu_execution_locked vcpu, ipaddr_t entry, uintreg_t arg);
spci_vcpu_index_t vcpu_index(const struct vcpu *vcpu);
bool vcpu_is_off(struct vcpu_locked vcpu);
bool vcpu_is_off(struct vcpu_execution_locked vcpu);
bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry,
uintreg_t arg);

Expand Down
73 changes: 28 additions & 45 deletions src/api.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
* acquisition of locks held concurrently by the same physical CPU. Our current
* ordering requirements are as follows:
*
* vm::lock -> vcpu::lock -> mm_stage1_lock -> dlog sl
* vcpu::execution_lock -> vm::lock -> vcpu::interrupts_lock -> mm_stage1_lock -> dlog sl
*
* Locks of the same kind require the lock of lowest address to be locked first,
* see `sl_lock_both()`.
Expand Down Expand Up @@ -117,10 +117,8 @@ static struct vcpu *api_switch_to_primary(struct vcpu *current,
/* Set the return value for the primary VM's call to HF_VCPU_RUN. */
arch_regs_set_retval(&next->regs, primary_ret);

/* Mark the current vCPU as waiting. */
sl_lock(&current->lock);
/* Mark the current vcpu as waiting. */
current->state = secondary_state;
sl_unlock(&current->lock);

return next;
}
Expand Down Expand Up @@ -277,9 +275,7 @@ spci_vcpu_count_t api_vcpu_get_count(spci_vm_id_t vm_id,
*/
void api_regs_state_saved(struct vcpu *vcpu)
{
sl_lock(&vcpu->lock);
vcpu->regs_available = true;
sl_unlock(&vcpu->lock);
sl_unlock(&vcpu->execution_lock);
}

/**
Expand Down Expand Up @@ -323,7 +319,7 @@ static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
uint32_t intid_mask = 1U << (intid % INTERRUPT_REGISTER_BITS);
int64_t ret = 0;

sl_lock(&target_vcpu->lock);
sl_lock(&target_vcpu->interrupts_lock);

/*
* We only need to change state and (maybe) trigger a virtual IRQ if it
Expand Down Expand Up @@ -364,7 +360,7 @@ static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
/* Either way, make it pending. */
target_vcpu->interrupts.interrupt_pending[intid_index] |= intid_mask;

sl_unlock(&target_vcpu->lock);
sl_unlock(&target_vcpu->interrupts_lock);

return ret;
}
Expand Down Expand Up @@ -399,31 +395,18 @@ static struct spci_value spci_msg_recv_return(const struct vm *receiver)
/**
* Prepares the vCPU to run by updating its state and fetching whether a return
* value needs to be forced onto the vCPU.
*
* Returns:
* - false if it fails to prepare `vcpu` to run.
* - true if it succeeds to prepare `vcpu` to run. In this case,
`vcpu->execution_lock` has acquired.
*/
static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
struct spci_value *run_ret)
{
bool need_vm_lock;
bool need_vm_lock = false;
bool ret;

/*
* Check that the registers are available so that the vCPU can be run.
*
* The VM lock is not needed in the common case so it must only be taken
* when it is going to be needed. This ensures there are no inter-vCPU
* dependencies in the common run case meaning the sensitive context
* switch performance is consistent.
*/
sl_lock(&vcpu->lock);

/* The VM needs to be locked to deliver mailbox messages. */
need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
if (need_vm_lock) {
sl_unlock(&vcpu->lock);
sl_lock(&vcpu->vm->lock);
sl_lock(&vcpu->lock);
}

/*
* If the vCPU is already running somewhere then we can't run it here
* simultaneously. While it is actually running then the state should be
Expand All @@ -435,7 +418,7 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
* until this has finished, so count this state as still running for the
* purposes of this check.
*/
if (vcpu->state == VCPU_STATE_RUNNING || !vcpu->regs_available) {
if (sl_try_lock(&vcpu->execution_lock)) {
/*
* vCPU is running on another pCPU.
*
Expand All @@ -458,8 +441,13 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
goto out;
}

/* The VM needs to be locked to deliver mailbox messages. */
need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
if (need_vm_lock) {
sl_lock(&vcpu->vm->lock);
}

switch (vcpu->state) {
case VCPU_STATE_RUNNING:
case VCPU_STATE_OFF:
case VCPU_STATE_ABORTED:
ret = false;
Expand Down Expand Up @@ -517,23 +505,18 @@ static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,

/* It has been decided that the vCPU should be run. */
vcpu->cpu = current->cpu;
vcpu->state = VCPU_STATE_RUNNING;

/*
* Mark the registers as unavailable now that we're about to reflect
* them onto the real registers. This will also prevent another physical
* CPU from trying to read these registers.
*/
vcpu->regs_available = false;

ret = true;

out:
sl_unlock(&vcpu->lock);
if (need_vm_lock) {
sl_unlock(&vcpu->vm->lock);
}

if (!ret) {
sl_unlock(&vcpu->execution_lock);
}

return ret;
}

Expand Down Expand Up @@ -1051,15 +1034,15 @@ bool api_spci_msg_recv_block_interrupted(struct vcpu *current)
{
bool interrupted;

sl_lock(&current->lock);
sl_lock(&current->interrupts_lock);

/*
* Don't block if there are enabled and pending interrupts, to match
* behaviour of wait_for_interrupt.
*/
interrupted = (current->interrupts.enabled_and_pending_count > 0);

sl_unlock(&current->lock);
sl_unlock(&current->interrupts_lock);

return interrupted;
}
Expand Down Expand Up @@ -1253,7 +1236,7 @@ int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
return -1;
}

sl_lock(&current->lock);
sl_lock(&current->interrupts_lock);
if (enable) {
/*
* If it is pending and was not enabled before, increment the
Expand All @@ -1279,7 +1262,7 @@ int64_t api_interrupt_enable(uint32_t intid, bool enable, struct vcpu *current)
~intid_mask;
}

sl_unlock(&current->lock);
sl_unlock(&current->interrupts_lock);
return 0;
}

Expand All @@ -1297,7 +1280,7 @@ uint32_t api_interrupt_get(struct vcpu *current)
* Find the first enabled and pending interrupt ID, return it, and
* deactivate it.
*/
sl_lock(&current->lock);
sl_lock(&current->interrupts_lock);
for (i = 0; i < HF_NUM_INTIDS / INTERRUPT_REGISTER_BITS; ++i) {
uint32_t enabled_and_pending =
current->interrupts.interrupt_enabled[i] &
Expand All @@ -1317,7 +1300,7 @@ uint32_t api_interrupt_get(struct vcpu *current)
}
}

sl_unlock(&current->lock);
sl_unlock(&current->interrupts_lock);
return first_interrupt;
}

Expand Down
9 changes: 5 additions & 4 deletions src/arch/aarch64/hypervisor/handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "hf/dlog.h"
#include "hf/panic.h"
#include "hf/spci.h"
#include "hf/spinlock.h"
#include "hf/vm.h"

#include "vmapi/hf/call.h"
Expand Down Expand Up @@ -398,20 +399,20 @@ static void update_vi(struct vcpu *next)
*/
struct vcpu *vcpu = current();

sl_lock(&vcpu->lock);
sl_lock(&vcpu->interrupts_lock);
set_virtual_interrupt_current(
vcpu->interrupts.enabled_and_pending_count > 0);
sl_unlock(&vcpu->lock);
sl_unlock(&vcpu->interrupts_lock);
} else {
/*
* About to switch vCPUs, set the bit for the vCPU to which we
* are switching in the saved copy of the register.
*/
sl_lock(&next->lock);
sl_lock(&next->interrupts_lock);
set_virtual_interrupt(
&next->regs,
next->interrupts.enabled_and_pending_count > 0);
sl_unlock(&next->lock);
sl_unlock(&next->interrupts_lock);
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/arch/aarch64/hypervisor/psci_handler.c
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ bool psci_secondary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0,
cpu_id_t target_affinity = arg0;
uint32_t lowest_affinity_level = arg1;
struct vm *vm = vcpu->vm;
struct vcpu_locked target_vcpu;
struct vcpu_execution_locked target_vcpu;
spci_vcpu_index_t target_vcpu_index =
vcpu_id_to_index(target_affinity);

Expand Down
6 changes: 6 additions & 0 deletions src/arch/aarch64/inc/hf/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
*/

#include <stdint.h>
#include <stdatomic.h>

#include "hf/arch/types.h"

Expand Down Expand Up @@ -61,6 +62,11 @@ static inline void sl_lock(struct spinlock *l)
: "cc");
}

static inline bool sl_try_lock(struct spinlock *l)
{
return !atomic_flag_test_and_set_explicit((volatile atomic_flag *)&l->v, memory_order_acquire);
}

static inline void sl_unlock(struct spinlock *l)
{
/*
Expand Down
5 changes: 5 additions & 0 deletions src/arch/fake/inc/hf/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ static inline void sl_lock(struct spinlock *l)
}
}

static inline bool sl_try_lock(struct spinlock *l)
{
return !atomic_flag_test_and_set_explicit(&l->v, memory_order_acquire);
}

static inline void sl_unlock(struct spinlock *l)
{
atomic_flag_clear_explicit(&l->v, memory_order_release);
Expand Down
8 changes: 4 additions & 4 deletions src/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,11 +141,11 @@ bool cpu_on(struct cpu *c, ipaddr_t entry, uintreg_t arg)
if (!prev) {
struct vm *vm = vm_find(HF_PRIMARY_VM_ID);
struct vcpu *vcpu = vm_get_vcpu(vm, cpu_index(c));
struct vcpu_locked vcpu_locked;
struct vcpu_execution_locked vcpu_execution_locked;

vcpu_locked = vcpu_lock(vcpu);
vcpu_on(vcpu_locked, entry, arg);
vcpu_unlock(&vcpu_locked);
vcpu_execution_locked = vcpu_lock(vcpu);
vcpu_on(vcpu_execution_locked, entry, arg);
vcpu_unlock(&vcpu_execution_locked);
}

return prev;
Expand Down
8 changes: 4 additions & 4 deletions src/load.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ static bool load_primary(struct mm_stage1_locked stage1_locked,
{
struct vm *vm;
struct vm_locked vm_locked;
struct vcpu_locked vcpu_locked;
struct vcpu_execution_locked vcpu_execution_locked;
size_t i;
bool ret;

Expand Down Expand Up @@ -218,9 +218,9 @@ static bool load_primary(struct mm_stage1_locked stage1_locked,
dlog_info("Loaded primary VM with %u vCPUs, entry at %#x.\n",
vm->vcpu_count, pa_addr(primary_begin));

vcpu_locked = vcpu_lock(vm_get_vcpu(vm, 0));
vcpu_on(vcpu_locked, ipa_from_pa(primary_begin), params->kernel_arg);
vcpu_unlock(&vcpu_locked);
vcpu_execution_locked = vcpu_lock(vm_get_vcpu(vm, 0));
vcpu_on(vcpu_execution_locked, ipa_from_pa(primary_begin), params->kernel_arg);
vcpu_unlock(&vcpu_execution_locked);
ret = true;

out:
Expand Down
Loading

0 comments on commit 927c334

Please sign in to comment.