Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PX4: Add support for CONFIG_SMP=y (NuttX only) #827

Merged
merged 7 commits into from
Dec 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,14 @@ class WorkQueue : public IntrusiveSortedListNode<WorkQueue *>
#ifdef __PX4_NUTTX
// In NuttX work can be enqueued from an ISR
#ifdef CONFIG_BUILD_FLAT
#ifdef CONFIG_SMP
void work_lock() { _flags = spin_lock_irqsave_wo_note(&_spinlock); }
void work_unlock() { spin_unlock_irqrestore_wo_note(&_spinlock, _flags); }
spinlock_t _spinlock;
#else
void work_lock() { _flags = enter_critical_section(); }
void work_unlock() { leave_critical_section(_flags); }
#endif
irqstate_t _flags;
#else
// For non-flat targets, work is enqueued by user threads as well
Expand Down
77 changes: 50 additions & 27 deletions platforms/nuttx/src/px4/common/cpuload.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,21 +69,21 @@ void init_task_hash(void)
static struct system_load_taskinfo_s *get_task_info(pid_t pid)
{
struct system_load_taskinfo_s *ret = NULL;
irqstate_t flags = enter_critical_section();
irqstate_t flags = px4_enter_critical_section();

if (hashtab) {
ret = hashtab[HASH(pid)];
}

leave_critical_section(flags);
px4_leave_critical_section(flags);
return ret;
}

static void drop_task_info(pid_t pid)
{
irqstate_t flags = enter_critical_section();
irqstate_t flags = px4_enter_critical_section();
hashtab[HASH(pid)] = NULL;
leave_critical_section(flags);
px4_leave_critical_section(flags);
}

static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid)
Expand All @@ -95,7 +95,7 @@ static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid)

/* Use critical section to protect the hash table */

irqstate_t flags = enter_critical_section();
irqstate_t flags = px4_enter_critical_section();

/* Keep trying until we get it or run out of memory */

Expand All @@ -109,7 +109,7 @@ static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid)

if (hashtab[hash] == NULL) {
hashtab[hash] = task_info;
leave_critical_section(flags);
px4_leave_critical_section(flags);
return OK;
}

Expand All @@ -118,7 +118,7 @@ static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid)
newtab = (struct system_load_taskinfo_s **)kmm_zalloc(hashtab_size * 2 * sizeof(*newtab));

if (newtab == NULL) {
leave_critical_section(flags);
px4_leave_critical_section(flags);
return -ENOMEM;
}

Expand Down Expand Up @@ -160,16 +160,16 @@ void cpuload_monitor_start()
{
if (cpuload_monitor_all_count.fetch_add(1) == 0) {
// if the count was previously 0 (idle thread only) then clear any existing runtime data
sched_lock();
irqstate_t flags = px4_enter_critical_section();

system_load.start_time = hrt_absolute_time();

for (int i = 1; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) {
for (int i = CONFIG_SMP_NCPUS; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) {
system_load.tasks[i].total_runtime = 0;
system_load.tasks[i].curr_start_time = 0;
}

sched_unlock();
px4_leave_critical_section(flags);
}
}

Expand All @@ -191,25 +191,14 @@ void cpuload_initialize_once()
task.valid = false;
}

int static_tasks_count = 2; // there are at least 2 threads that should be initialized statically - "idle" and "init"

#ifdef CONFIG_PAGING
static_tasks_count++; // include paging thread in initialization
#endif /* CONFIG_PAGING */
#if CONFIG_SCHED_WORKQUEUE
static_tasks_count++; // include high priority work0 thread in initialization
#endif /* CONFIG_SCHED_WORKQUEUE */
#if CONFIG_SCHED_LPWORK
static_tasks_count++; // include low priority work1 thread in initialization
#endif /* CONFIG_SCHED_WORKQUEUE */

// perform static initialization of "system" threads
for (system_load.total_count = 0; system_load.total_count < static_tasks_count; system_load.total_count++) {
for (system_load.total_count = 0; system_load.total_count < CONFIG_SMP_NCPUS; system_load.total_count++) {
system_load.tasks[system_load.total_count].total_runtime = 0;
system_load.tasks[system_load.total_count].curr_start_time = 0;
system_load.tasks[system_load.total_count].tcb = nxsched_get_tcb(
system_load.total_count); // it is assumed that these static threads have consecutive PIDs
system_load.tasks[system_load.total_count].valid = true;
hash_task_info(&system_load.tasks[system_load.total_count], system_load.total_count);
}

system_load.initialized = true;
Expand Down Expand Up @@ -265,8 +254,8 @@ void sched_note_stop(FAR struct tcb_s *tcb)
void sched_note_suspend(FAR struct tcb_s *tcb)
{
if (system_load.initialized) {
if (tcb->pid == 0) {
system_load.tasks[0].total_runtime += hrt_elapsed_time(&system_load.tasks[0].curr_start_time);
if (tcb->pid < CONFIG_SMP_NCPUS) {
system_load.tasks[tcb->pid].total_runtime += hrt_elapsed_time(&system_load.tasks[tcb->pid].curr_start_time);
return;

} else {
Expand Down Expand Up @@ -294,8 +283,8 @@ void sched_note_suspend(FAR struct tcb_s *tcb)
void sched_note_resume(FAR struct tcb_s *tcb)
{
if (system_load.initialized) {
if (tcb->pid == 0) {
hrt_store_absolute_time(&system_load.tasks[0].curr_start_time);
if (tcb->pid < CONFIG_SMP_NCPUS) {
hrt_store_absolute_time(&system_load.tasks[tcb->pid].curr_start_time);
return;

} else {
Expand Down Expand Up @@ -343,5 +332,39 @@ void sched_note_syscall_enter(int nr);

#endif

#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_INSTRUMENTATION)
void sched_note_cpu_start(FAR struct tcb_s *tcb, int cpu)
{
/* Not interesting for us */
}

void sched_note_cpu_started(FAR struct tcb_s *tcb)
{
/* Not interesting for us */
}
#endif

#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_INSTRUMENTATION_SWITCH)
void sched_note_cpu_pause(FAR struct tcb_s *tcb, int cpu)
{
/* Not interesting for us */
}

void sched_note_cpu_paused(FAR struct tcb_s *tcb)
{
/* Handled via sched_note_suspend */
}

void sched_note_cpu_resume(FAR struct tcb_s *tcb, int cpu)
{
/* Not interesting for us */
}

void sched_note_cpu_resumed(FAR struct tcb_s *tcb)
{
/* Handled via sched_note_resume */
}
#endif

__END_DECLS
#endif // PX4_NUTTX && CONFIG_SCHED_INSTRUMENTATION
29 changes: 19 additions & 10 deletions platforms/nuttx/src/px4/common/hrt_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@

#include <drivers/drv_hrt.h>
#include <nuttx/kmalloc.h>
#include <nuttx/spinlock.h>
#include <queue.h>

#ifndef MODULE_NAME
Expand All @@ -59,6 +60,14 @@ static sq_queue_t callout_queue;
static sq_queue_t callout_freelist;
static sq_queue_t callout_inflight;

/* SMP spinlock for g_hrt_ioctl_lock.
*
* Note: when SMP=no the spin lock turns into normal critical section i.e. it
* only disables interrupts
*/

static spinlock_t g_hrt_ioctl_lock = SP_UNLOCKED;

/* Check if entry is in list */

static bool entry_inlist(sq_queue_t *queue, sq_entry_t *item)
Expand Down Expand Up @@ -124,7 +133,7 @@ static struct usr_hrt_call *dup_entry(const px4_hrt_handle_t handle, struct hrt_
{
struct usr_hrt_call *e = NULL;

irqstate_t flags = px4_enter_critical_section();
irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock);

/* check if this is already queued */
e = pop_entry(&callout_queue, handle, entry);
Expand All @@ -134,7 +143,7 @@ static struct usr_hrt_call *dup_entry(const px4_hrt_handle_t handle, struct hrt_
e = (void *)sq_remfirst(&callout_freelist);
}

px4_leave_critical_section(flags);
spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags);

if (!e) {
/* Allocate a new kernel side item for the user call */
Expand All @@ -156,9 +165,9 @@ static struct usr_hrt_call *dup_entry(const px4_hrt_handle_t handle, struct hrt_
e->usr_entry = entry;

/* Add this to the callout_queue list */
flags = px4_enter_critical_section();
flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock);
sq_addfirst(&e->list_item, &callout_queue);
px4_leave_critical_section(flags);
spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags);

} else {
PX4_ERR("out of memory");
Expand Down Expand Up @@ -222,7 +231,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg)
px4_sem_wait(callout_sem);

/* Atomically update the pointer to user side hrt entry */
flags = px4_enter_critical_section();
flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock);
e = pop_user(&callout_inflight, callout_sem);

if (e) {
Expand All @@ -241,7 +250,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg)
PX4_ERR("HRT_WAITEVENT error no entry");
}

px4_leave_critical_section(flags);
spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags);
}
break;

Expand Down Expand Up @@ -279,7 +288,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg)
case HRT_CANCEL:
if (h && h->entry) {
/* Find the user entry */
irqstate_t flags = px4_enter_critical_section();
irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock);
struct usr_hrt_call *e = pop_entry(&callout_queue, h->handle, h->entry);

if (e) {
Expand All @@ -288,7 +297,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg)

}

px4_leave_critical_section(flags);
spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags);

} else {
PX4_ERR("HRT_CANCEL called with NULL entry");
Expand Down Expand Up @@ -330,7 +339,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg)
struct usr_hrt_call *e;
irqstate_t flags;

flags = px4_enter_critical_section();
flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock);

while ((e = (void *)sq_remfirst(&callout_queue))) {
if (callback_sem == e->entry.callout_sem) {
Expand All @@ -343,7 +352,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg)

px4_sem_destroy(callback_sem);

px4_leave_critical_section(flags);
spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags);

*(px4_sem_t **)arg = NULL;
kmm_free(callback_sem);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,12 @@

#pragma once

#include <nuttx/config.h>

#include <stdint.h>

#include <nuttx/irq.h>
#include <nuttx/spinlock.h>

#include <px4_platform_common/sem.h>

Expand All @@ -54,6 +57,9 @@ class atomic_block
px4_sem_t _lock;
irqstate_t _irqlock;
};
#ifdef CONFIG_SMP
spinlock_t _spinlock;
#endif
};

}
4 changes: 4 additions & 0 deletions platforms/nuttx/src/px4/common/include/px4_platform/cpuload.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@
#define CONFIG_FS_PROCFS_MAX_TASKS 64
#endif

#ifndef CONFIG_SMP_NCPUS
#define CONFIG_SMP_NCPUS 1
#endif

#ifdef CONFIG_SCHED_INSTRUMENTATION

#include <sched.h>
Expand Down
Loading
Loading