Skip to content

Commit

Permalink
introduce a type to handle the CoreId
Browse files Browse the repository at this point in the history
- increase the readability of the code
  • Loading branch information
stlankes committed Apr 14, 2020
1 parent 2c3dab3 commit 92df04d
Show file tree
Hide file tree
Showing 8 changed files with 56 additions and 38 deletions.
14 changes: 8 additions & 6 deletions src/arch/x86_64/kernel/apic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@ use arch::x86_64::mm::paging;
use arch::x86_64::mm::paging::{BasePageSize, PageSize, PageTableEntryFlags};
use arch::x86_64::mm::virtualmem;
use config::*;
use core::convert::TryInto;
use core::sync::atomic::spin_loop_hint;
use core::{cmp, fmt, intrinsics, mem, ptr, u32};
use environment;
use mm;
use scheduler;
use scheduler::CoreId;
use x86::controlregs::*;
use x86::msr::*;

Expand Down Expand Up @@ -476,7 +478,7 @@ pub fn init_x2apic() {
}

/// Initialize the required entry.asm variables for the next CPU to be booted.
pub fn init_next_processor_variables(core_id: usize) {
pub fn init_next_processor_variables(core_id: CoreId) {
// Allocate stack and PerCoreVariables structure for the CPU and pass the addresses.
// Keep the stack executable to possibly support dynamically generated code on the stack (see https://security.stackexchange.com/a/47825).
let stack = mm::allocate(KERNEL_STACK_SIZE, false);
Expand Down Expand Up @@ -538,15 +540,15 @@ pub fn boot_application_processors() {
let core_id = core_id();

for core_id_to_boot in 0..apic_ids.len() {
if core_id_to_boot != core_id {
if core_id_to_boot != core_id.try_into().unwrap() {
let apic_id = apic_ids[core_id_to_boot];
let destination = u64::from(apic_id) << 32;

debug!(
"Waking up CPU {} with Local APIC ID {}",
core_id_to_boot, apic_id
);
init_next_processor_variables(core_id_to_boot);
init_next_processor_variables(core_id_to_boot.try_into().unwrap());

// Save the current number of initialized CPUs.
let current_processor_count = arch::get_processor_count();
Expand Down Expand Up @@ -597,7 +599,7 @@ pub fn ipi_tlb_flush() {

// Send an IPI with our TLB Flush interrupt number to all other CPUs.
for core_id_to_interrupt in 0..apic_ids.len() {
if core_id_to_interrupt != core_id {
if core_id_to_interrupt != core_id.try_into().unwrap() {
let local_apic_id = apic_ids[core_id_to_interrupt];
let destination = u64::from(local_apic_id) << 32;
local_apic_write(
Expand All @@ -612,10 +614,10 @@ pub fn ipi_tlb_flush() {
}

/// Send an inter-processor interrupt to wake up a CPU Core that is in a HALT state.
pub fn wakeup_core(core_id_to_wakeup: usize) {
pub fn wakeup_core(core_id_to_wakeup: CoreId) {
if core_id_to_wakeup != core_id() {
let apic_ids = unsafe { CPU_LOCAL_APIC_IDS.as_ref().unwrap() };
let local_apic_id = apic_ids[core_id_to_wakeup];
let local_apic_id = apic_ids[core_id_to_wakeup as usize];
let destination = u64::from(local_apic_id) << 32;
local_apic_write(
IA32_X2APIC_ICR,
Expand Down
4 changes: 2 additions & 2 deletions src/arch/x86_64/kernel/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,8 @@ pub fn get_mbinfo() -> usize {
unsafe { intrinsics::volatile_load(&(*BOOT_INFO).mb_info) as usize }
}

pub fn get_processor_count() -> usize {
unsafe { intrinsics::volatile_load(&(*BOOT_INFO).cpu_online) as usize }
pub fn get_processor_count() -> u32 {
unsafe { intrinsics::volatile_load(&(*BOOT_INFO).cpu_online) as u32 }
}

/// Whether HermitCore is running under the "uhyve" hypervisor.
Expand Down
10 changes: 5 additions & 5 deletions src/arch/x86_64/kernel/percore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,23 @@

use arch::x86_64::kernel::BOOT_INFO;
use core::{intrinsics, ptr};
use scheduler::PerCoreScheduler;
use scheduler::{CoreId, PerCoreScheduler};
use x86::bits64::task::TaskStateSegment;
use x86::msr::*;

pub static mut PERCORE: PerCoreVariables = PerCoreVariables::new(0);

pub struct PerCoreVariables {
/// Sequential ID of this CPU Core.
core_id: PerCoreVariable<usize>,
core_id: PerCoreVariable<CoreId>,
/// Scheduler for this CPU Core.
scheduler: PerCoreVariable<*mut PerCoreScheduler>,
/// Task State Segment (TSS) allocated for this CPU Core.
pub tss: PerCoreVariable<*mut TaskStateSegment>,
}

impl PerCoreVariables {
pub const fn new(core_id: usize) -> Self {
pub const fn new(core_id: CoreId) -> Self {
Self {
core_id: PerCoreVariable::new(core_id),
scheduler: PerCoreVariable::new(ptr::null_mut() as *mut PerCoreScheduler),
Expand Down Expand Up @@ -94,12 +94,12 @@ impl<T: Is32BitVariable> PerCoreVariableMethods<T> for PerCoreVariable<T> {

#[cfg(not(test))]
#[inline]
pub fn core_id() -> usize {
pub fn core_id() -> CoreId {
unsafe { PERCORE.core_id.get() }
}

#[cfg(test)]
pub fn core_id() -> usize {
pub fn core_id() -> CoreId {
0
}

Expand Down
10 changes: 5 additions & 5 deletions src/collections/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ pub use self::doublylinkedlist::*;
pub struct AvoidInterrupts(bool);

impl AvoidInterrupts {
#[inline]
pub fn new() -> Self {
Self(irq::nested_disable())
}
#[inline]
pub fn new() -> Self {
Self(irq::nested_disable())
}
}

impl Drop for AvoidInterrupts {
#[inline]
fn drop(&mut self) {
irq::nested_enable(self.0);
}
}
}
24 changes: 16 additions & 8 deletions src/scheduler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,23 @@ use arch::switch;
use collections::AvoidInterrupts;
use config::*;
use core::cell::RefCell;
use core::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
use core::sync::atomic::{AtomicU32, Ordering};
use scheduler::task::*;
use synch::spinlock::*;

/// Time slice of a task in microseconds.
/// When this time has elapsed and the scheduler is called, it may switch to another ready task.
pub const TASK_TIME_SLICE: u64 = 10_000;

static NEXT_CORE_ID: AtomicUsize = AtomicUsize::new(1);
static NO_TASKS: AtomicU32 = AtomicU32::new(0);
/// Map between Core ID and per-core scheduler
static mut SCHEDULERS: Option<BTreeMap<usize, &PerCoreScheduler>> = None;
static mut SCHEDULERS: Option<BTreeMap<CoreId, &PerCoreScheduler>> = None;
/// Map between Task ID and Task Control Block
static TASKS: SpinlockIrqSave<Option<BTreeMap<TaskId, VecDeque<TaskHandle>>>> =
SpinlockIrqSave::new(None);
static TID_COUNTER: AtomicU32 = AtomicU32::new(0);

/// Unique identifier for a core.
pub type CoreId = u32;

struct SchedulerInput {
/// Queue of new tasks
Expand All @@ -52,7 +53,7 @@ impl SchedulerInput {
}
pub struct PerCoreScheduler {
/// Core ID of this per-core scheduler
core_id: usize,
core_id: CoreId,
/// Task which is currently running
current_task: Rc<RefCell<Task>>,
/// Idle Task
Expand All @@ -73,7 +74,12 @@ pub struct PerCoreScheduler {

impl PerCoreScheduler {
/// Spawn a new task.
pub fn spawn(func: extern "C" fn(usize), arg: usize, prio: Priority, core_id: usize) -> TaskId {
pub fn spawn(
func: extern "C" fn(usize),
arg: usize,
prio: Priority,
core_id: CoreId,
) -> TaskId {
// Create the new task.
let tid = get_tid();
let task = Rc::new(RefCell::new(Task::new(
Expand Down Expand Up @@ -139,10 +145,11 @@ impl PerCoreScheduler {
}

pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
let _ = AvoidInterrupts::new();

// Get the Core ID of the next CPU.
let core_id = {
let core_id: CoreId = {
// Increase the CPU number by 1.
let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);

Expand Down Expand Up @@ -447,6 +454,7 @@ impl PerCoreScheduler {
}

fn get_tid() -> TaskId {
static TID_COUNTER: AtomicU32 = AtomicU32::new(0);
let mut guard = TASKS.lock();

loop {
Expand Down Expand Up @@ -503,7 +511,7 @@ pub fn add_current_core() {
}

#[inline]
fn get_scheduler(core_id: usize) -> &'static PerCoreScheduler {
fn get_scheduler(core_id: CoreId) -> &'static PerCoreScheduler {
// Get the scheduler for the desired core.
if let Some(result) = unsafe { SCHEDULERS.as_ref().unwrap().get(&core_id) } {
result
Expand Down
15 changes: 8 additions & 7 deletions src/scheduler/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ use arch::scheduler::{TaskStacks, TaskTLS};
use collections::{DoublyLinkedList, Node};
use core::cell::RefCell;
use core::fmt;
use scheduler::CoreId;

/// The status of the task - used for scheduling
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
Expand Down Expand Up @@ -90,19 +91,19 @@ pub const NO_PRIORITIES: usize = 31;
pub struct TaskHandle {
id: TaskId,
priority: Priority,
core_id: usize,
core_id: CoreId,
}

impl TaskHandle {
pub fn new(id: TaskId, priority: Priority, core_id: usize) -> Self {
pub fn new(id: TaskId, priority: Priority, core_id: CoreId) -> Self {
Self {
id: id,
priority: priority,
core_id: core_id,
}
}

pub fn get_core_id(&self) -> usize {
pub fn get_core_id(&self) -> CoreId {
self.core_id
}

Expand Down Expand Up @@ -362,7 +363,7 @@ pub struct Task {
/// Last FPU state before a context switch to another task using the FPU
pub last_fpu_state: arch::processor::FPUState,
/// ID of the core this task is running on
pub core_id: usize,
pub core_id: CoreId,
/// Stack of the task
pub stacks: TaskStacks,
/// next task in queue
Expand All @@ -384,7 +385,7 @@ pub trait TaskFrame {
}

impl Task {
pub fn new(tid: TaskId, core_id: usize, task_status: TaskStatus, task_prio: Priority) -> Task {
pub fn new(tid: TaskId, core_id: CoreId, task_status: TaskStatus, task_prio: Priority) -> Task {
debug!("Creating new task {}", tid);

Task {
Expand All @@ -404,7 +405,7 @@ impl Task {
}
}

pub fn new_idle(tid: TaskId, core_id: usize) -> Task {
pub fn new_idle(tid: TaskId, core_id: CoreId) -> Task {
debug!("Creating idle task {}", tid);

Task {
Expand All @@ -424,7 +425,7 @@ impl Task {
}
}

pub fn clone(tid: TaskId, core_id: usize, task: &Task) -> Task {
pub fn clone(tid: TaskId, core_id: CoreId, task: &Task) -> Task {
debug!("Cloning task {} from task {}", tid, task.id);

Task {
Expand Down
3 changes: 2 additions & 1 deletion src/syscalls/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
// copied, modified, or distributed except according to those terms.

use arch;
use core::convert::TryInto;

/** Returns the number of processors currently online. */
#[no_mangle]
pub extern "C" fn sys_get_processor_count() -> usize {
arch::get_processor_count()
arch::get_processor_count().try_into().unwrap()
}

/** Returns the processor frequency in MHz. */
Expand Down
14 changes: 10 additions & 4 deletions src/syscalls/tasks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
use arch;
use arch::kernel::get_processor_count;
use arch::percore::*;
use core::convert::TryInto;
use core::isize;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::sync::atomic::{AtomicU32, Ordering};
use errno::*;
#[cfg(feature = "newlib")]
use mm::{task_heap_end, task_heap_start};
Expand Down Expand Up @@ -175,16 +176,21 @@ pub extern "C" fn sys_spawn(
prio: u8,
selector: isize,
) -> i32 {
static CORE_COUNTER: AtomicUsize = AtomicUsize::new(1);
static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);

let core_id = if selector < 0 {
// use Round Robin to schedule the cores
CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
} else {
selector as usize
selector as u32
};

let task_id = scheduler::PerCoreScheduler::spawn(func, arg, Priority::from(prio), core_id);
let task_id = scheduler::PerCoreScheduler::spawn(
func,
arg,
Priority::from(prio),
core_id.try_into().unwrap(),
);
if !id.is_null() {
unsafe {
*id = task_id.into() as u32;
Expand Down

0 comments on commit 92df04d

Please sign in to comment.