From 1ab7f7227c511ddc4dea95065f1d0cfcdce05ecf Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Tue, 29 Oct 2024 11:28:24 +0200 Subject: [PATCH 1/7] cpuload.cpp: Support CONFIG_SMP=y for CPU load monitoring. --- platforms/nuttx/src/px4/common/cpuload.cpp | 77 ++++++++++++------- .../px4/common/include/px4_platform/cpuload.h | 4 + platforms/nuttx/src/px4/common/print_load.cpp | 37 ++++++--- src/modules/load_mon/LoadMon.cpp | 4 +- 4 files changed, 81 insertions(+), 41 deletions(-) diff --git a/platforms/nuttx/src/px4/common/cpuload.cpp b/platforms/nuttx/src/px4/common/cpuload.cpp index 84742f4f887f..09f92828d556 100644 --- a/platforms/nuttx/src/px4/common/cpuload.cpp +++ b/platforms/nuttx/src/px4/common/cpuload.cpp @@ -69,21 +69,21 @@ void init_task_hash(void) static struct system_load_taskinfo_s *get_task_info(pid_t pid) { struct system_load_taskinfo_s *ret = NULL; - irqstate_t flags = enter_critical_section(); + irqstate_t flags = px4_enter_critical_section(); if (hashtab) { ret = hashtab[HASH(pid)]; } - leave_critical_section(flags); + px4_leave_critical_section(flags); return ret; } static void drop_task_info(pid_t pid) { - irqstate_t flags = enter_critical_section(); + irqstate_t flags = px4_enter_critical_section(); hashtab[HASH(pid)] = NULL; - leave_critical_section(flags); + px4_leave_critical_section(flags); } static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid) @@ -95,7 +95,7 @@ static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid) /* Use critical section to protect the hash table */ - irqstate_t flags = enter_critical_section(); + irqstate_t flags = px4_enter_critical_section(); /* Keep trying until we get it or run out of memory */ @@ -109,7 +109,7 @@ static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid) if (hashtab[hash] == NULL) { hashtab[hash] = task_info; - leave_critical_section(flags); + px4_leave_critical_section(flags); return OK; } @@ -118,7 +118,7 @@ static int hash_task_info(struct system_load_taskinfo_s *task_info, pid_t pid) newtab = (struct system_load_taskinfo_s **)kmm_zalloc(hashtab_size * 2 * sizeof(*newtab)); if (newtab == NULL) { - leave_critical_section(flags); + px4_leave_critical_section(flags); return -ENOMEM; } @@ -160,16 +160,16 @@ void cpuload_monitor_start() { if (cpuload_monitor_all_count.fetch_add(1) == 0) { // if the count was previously 0 (idle thread only) then clear any existing runtime data - sched_lock(); + irqstate_t flags = px4_enter_critical_section(); system_load.start_time = hrt_absolute_time(); - for (int i = 1; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) { + for (int i = CONFIG_SMP_NCPUS; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) { system_load.tasks[i].total_runtime = 0; system_load.tasks[i].curr_start_time = 0; } - sched_unlock(); + px4_leave_critical_section(flags); } } @@ -191,25 +191,14 @@ void cpuload_initialize_once() task.valid = false; } - int static_tasks_count = 2; // there are at least 2 threads that should be initialized statically - "idle" and "init" - -#ifdef CONFIG_PAGING - static_tasks_count++; // include paging thread in initialization -#endif /* CONFIG_PAGING */ -#if CONFIG_SCHED_WORKQUEUE - static_tasks_count++; // include high priority work0 thread in initialization -#endif /* CONFIG_SCHED_WORKQUEUE */ -#if CONFIG_SCHED_LPWORK - static_tasks_count++; // include low priority work1 thread in initialization -#endif /* CONFIG_SCHED_WORKQUEUE */ - // perform static initialization of "system" threads - for (system_load.total_count = 0; system_load.total_count < static_tasks_count; system_load.total_count++) { + for (system_load.total_count = 0; system_load.total_count < CONFIG_SMP_NCPUS; system_load.total_count++) { system_load.tasks[system_load.total_count].total_runtime = 0; system_load.tasks[system_load.total_count].curr_start_time = 0; system_load.tasks[system_load.total_count].tcb = nxsched_get_tcb( system_load.total_count); // it is assumed that these static threads have consecutive PIDs system_load.tasks[system_load.total_count].valid = true; + hash_task_info(&system_load.tasks[system_load.total_count], system_load.total_count); } system_load.initialized = true; @@ -265,8 +254,8 @@ void sched_note_stop(FAR struct tcb_s *tcb) void sched_note_suspend(FAR struct tcb_s *tcb) { if (system_load.initialized) { - if (tcb->pid == 0) { - system_load.tasks[0].total_runtime += hrt_elapsed_time(&system_load.tasks[0].curr_start_time); + if (tcb->pid < CONFIG_SMP_NCPUS) { + system_load.tasks[tcb->pid].total_runtime += hrt_elapsed_time(&system_load.tasks[tcb->pid].curr_start_time); return; } else { @@ -294,8 +283,8 @@ void sched_note_suspend(FAR struct tcb_s *tcb) void sched_note_resume(FAR struct tcb_s *tcb) { if (system_load.initialized) { - if (tcb->pid == 0) { - hrt_store_absolute_time(&system_load.tasks[0].curr_start_time); + if (tcb->pid < CONFIG_SMP_NCPUS) { + hrt_store_absolute_time(&system_load.tasks[tcb->pid].curr_start_time); return; } else { @@ -343,5 +332,39 @@ void sched_note_syscall_enter(int nr); #endif +#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_INSTRUMENTATION) +void sched_note_cpu_start(FAR struct tcb_s *tcb, int cpu) +{ + /* Not interesting for us */ +} + +void sched_note_cpu_started(FAR struct tcb_s *tcb) +{ + /* Not interesting for us */ +} +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_INSTRUMENTATION_SWITCH) +void sched_note_cpu_pause(FAR struct tcb_s *tcb, int cpu) +{ + /* Not interesting for us */ +} + +void sched_note_cpu_paused(FAR struct tcb_s *tcb) +{ + /* Handled via sched_note_suspend */ +} + +void sched_note_cpu_resume(FAR struct tcb_s *tcb, int cpu) +{ + /* Not interesting for us */ +} + +void sched_note_cpu_resumed(FAR struct tcb_s *tcb) +{ + /* Handled via sched_note_resume */ +} +#endif + __END_DECLS #endif // PX4_NUTTX && CONFIG_SCHED_INSTRUMENTATION diff --git a/platforms/nuttx/src/px4/common/include/px4_platform/cpuload.h b/platforms/nuttx/src/px4/common/include/px4_platform/cpuload.h index f7a3215e6c3f..9f7e5f8c5d79 100644 --- a/platforms/nuttx/src/px4/common/include/px4_platform/cpuload.h +++ b/platforms/nuttx/src/px4/common/include/px4_platform/cpuload.h @@ -39,6 +39,10 @@ #define CONFIG_FS_PROCFS_MAX_TASKS 64 #endif +#ifndef CONFIG_SMP_NCPUS +#define CONFIG_SMP_NCPUS 1 +#endif + #ifdef CONFIG_SCHED_INSTRUMENTATION #include diff --git a/platforms/nuttx/src/px4/common/print_load.cpp b/platforms/nuttx/src/px4/common/print_load.cpp index 53db28992f7c..c2e8c3545ce6 100644 --- a/platforms/nuttx/src/px4/common/print_load.cpp +++ b/platforms/nuttx/src/px4/common/print_load.cpp @@ -77,12 +77,16 @@ void init_print_load(struct print_load_s *s) s->new_time = system_load.start_time; s->interval_start_time = system_load.start_time; - sched_lock(); - // special case for IDLE thread - s->last_times[0] = system_load.tasks[0].total_runtime; - sched_unlock(); + irqstate_t flags = px4_enter_critical_section(); - for (int i = 1; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) { + // special case for IDLE thread(s) + for (int i = 0; i < CONFIG_SMP_NCPUS; i++) { + s->last_times[i] = system_load.tasks[i].total_runtime; + } + + px4_leave_critical_section(flags); + + for (int i = CONFIG_SMP_NCPUS; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) { s->last_times[i] = 0; } @@ -99,6 +103,9 @@ static constexpr const char *tstate_name(const tstate_t s) return "PEND"; case TSTATE_TASK_READYTORUN: +#ifdef CONFIG_SMP + case TSTATE_TASK_ASSIGNED: +#endif return "READY"; case TSTATE_TASK_RUNNING: @@ -144,7 +151,7 @@ void print_load_buffer(char *buffer, int buffer_length, print_load_callback_f cb // create a copy of the runtimes because this could be updated during the print output uint64_t total_runtime[CONFIG_FS_PROCFS_MAX_TASKS] {}; - sched_lock(); + irqstate_t flags = px4_enter_critical_section(); print_state->new_time = hrt_absolute_time(); @@ -154,7 +161,7 @@ void print_load_buffer(char *buffer, int buffer_length, print_load_callback_f cb } } - sched_unlock(); + px4_leave_critical_section(flags); if (print_state->new_time > print_state->interval_start_time) { print_state->interval_time_us = print_state->new_time - print_state->interval_start_time; @@ -185,10 +192,10 @@ void print_load_buffer(char *buffer, int buffer_length, print_load_callback_f cb for (int i = 0; i < CONFIG_FS_PROCFS_MAX_TASKS; i++) { - sched_lock(); // need to lock the tcb access (but make it as short as possible) + flags = px4_enter_critical_section(); // need to lock the tcb access (but make it as short as possible) if (!system_load.tasks[i].valid) { - sched_unlock(); + px4_leave_critical_section(flags); continue; } @@ -233,12 +240,15 @@ void print_load_buffer(char *buffer, int buffer_length, print_load_callback_f cb } } - sched_unlock(); + px4_leave_critical_section(flags); switch (tcb_task_state) { case TSTATE_TASK_PENDING: case TSTATE_TASK_READYTORUN: case TSTATE_TASK_RUNNING: +#ifdef CONFIG_SMP + case TSTATE_TASK_ASSIGNED: +#endif print_state->running_count++; break; @@ -275,8 +285,8 @@ void print_load_buffer(char *buffer, int buffer_length, print_load_callback_f cb current_load = interval_runtime / print_state->interval_time_us; - if (tcb_pid == 0) { - idle_load = current_load; + if (tcb_pid < CONFIG_SMP_NCPUS) { + idle_load += current_load; } else { print_state->total_user_time += interval_runtime; @@ -325,6 +335,9 @@ void print_load_buffer(char *buffer, int buffer_length, print_load_callback_f cb float task_load = (float)(print_state->total_user_time) / print_state->interval_time_us; + /* Average of idle load over all CPUs */ + idle_load /= CONFIG_SMP_NCPUS; + /* this can happen if one tasks total runtime was not computed correctly by the scheduler instrumentation TODO */ if (task_load > (1.f - idle_load)) { diff --git a/src/modules/load_mon/LoadMon.cpp b/src/modules/load_mon/LoadMon.cpp index ffe1069ad8c9..cec5237ac45d 100644 --- a/src/modules/load_mon/LoadMon.cpp +++ b/src/modules/load_mon/LoadMon.cpp @@ -255,7 +255,7 @@ void LoadMon::stack_usage() static_assert(sizeof(task_stack_info.task_name) == CONFIG_TASK_NAME_SIZE, "task_stack_info.task_name must match NuttX CONFIG_TASK_NAME_SIZE"); - sched_lock(); + irqstate_t flags = px4_enter_critical_section(); if (system_load.tasks[_stack_task_index].valid && (system_load.tasks[_stack_task_index].tcb->pid > 0)) { @@ -282,7 +282,7 @@ void LoadMon::stack_usage() #endif // CONFIG_NFILE_DESCRIPTORS_PER_BLOCK } - sched_unlock(); + px4_leave_critical_section(flags); if (checked_task) { task_stack_info.stack_free = stack_free; From 46fc73eee506c10a210b97293b0b12b13a624c37 Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Tue, 29 Oct 2024 11:29:37 +0200 Subject: [PATCH 2/7] mpfs/hrt.c: Support CONFIG_SMP=y Implement the critical sections as spin locks. When CONFIG_SMP=n they fall back to normal critical sections i.e. local interrupts are disabled. --- .../nuttx/src/px4/microchip/mpfs/hrt/hrt.c | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c b/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c index 21dd8bc7591f..fa99c95bd66e 100644 --- a/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c +++ b/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -100,6 +101,13 @@ static struct sq_queue_s callout_queue; /* last loaded value for irq latency calculation */ static hrt_abstime loadval; +/* SMP spinlock for hrt. + * + * Note: when SMP=no the spin lock turns into normal critical section i.e. it + * only disables interrupts + */ +static spinlock_t g_hrt_lock = SP_UNLOCKED; + /* latency histogram */ const uint16_t latency_bucket_count = LATENCY_BUCKET_COUNT; const uint16_t latency_buckets[LATENCY_BUCKET_COUNT] = { 1, 2, 5, 10, 20, 50, 100, 1000 }; @@ -166,11 +174,14 @@ static int hrt_tim_isr(int irq, void *context, void *arg) { uint32_t status; + irqstate_t flags; status = getreg32(MPFS_MSTIMER_LO_BASE + MPFS_MSTIMER_TIM1RIS_OFFSET); /* was this a timer tick? */ if (status & MPFS_MSTIMER_RIS_MASK) { + /* get exclusive access to hrt */ + flags = spin_lock_irqsave_wo_note(&g_hrt_lock); /* do latency calculations */ hrt_latency_update(); @@ -181,6 +192,9 @@ hrt_tim_isr(int irq, void *context, void *arg) /* and schedule the next interrupt */ hrt_call_reschedule(); + /* release exclusive access */ + spin_unlock_irqrestore_wo_note(&g_hrt_lock, flags); + /* clear the interrupt */ putreg32((MPFS_MSTIMER_RIS_MASK), MPFS_MSTIMER_LO_BASE + MPFS_MSTIMER_TIM1RIS_OFFSET); } @@ -204,11 +218,11 @@ hrt_absolute_time(void) hrt_abstime hrt_elapsed_time_atomic(const volatile hrt_abstime *then) { - irqstate_t flags = px4_enter_critical_section(); + irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_lock); hrt_abstime delta = hrt_absolute_time() - *then; - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_lock, flags); return delta; } @@ -219,9 +233,7 @@ hrt_elapsed_time_atomic(const volatile hrt_abstime *then) void hrt_store_absolute_time(volatile hrt_abstime *t) { - irqstate_t flags = px4_enter_critical_section(); *t = hrt_absolute_time(); - px4_leave_critical_section(flags); } /** @@ -272,7 +284,7 @@ hrt_call_every(struct hrt_call *entry, hrt_abstime delay, hrt_abstime interval, static void hrt_call_internal(struct hrt_call *entry, hrt_abstime deadline, hrt_abstime interval, hrt_callout callout, void *arg) { - irqstate_t flags = px4_enter_critical_section(); + irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_lock); /* if the entry is currently queued, remove it */ /* note that we are using a potentially uninitialised @@ -293,7 +305,7 @@ hrt_call_internal(struct hrt_call *entry, hrt_abstime deadline, hrt_abstime inte hrt_call_enter(entry); - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_lock, flags); } /** @@ -313,7 +325,7 @@ hrt_called(struct hrt_call *entry) void hrt_cancel(struct hrt_call *entry) { - irqstate_t flags = px4_enter_critical_section(); + irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_lock); sq_rem(&entry->link, &callout_queue); entry->deadline = 0; @@ -323,7 +335,7 @@ hrt_cancel(struct hrt_call *entry) */ entry->period = 0; - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_lock, flags); } static void From e3d6ff3e78fbfdeeb1a710628a53eb1b28131d1e Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Tue, 29 Oct 2024 11:31:49 +0200 Subject: [PATCH 3/7] board_reset.cpp: Support CONFIG_SMP=y Execute the reboot command (jump to __start) on all CPUs. The boot CPU i.e. CPU0 will wait for the other CPUs to reboot first, otherwise when CPU0 does the .data/.bss section initialization it will pull the rug from under the other CPUs feet by modifying global data unexpectedly. --- .../mpfs/board_reset/board_reset.cpp | 97 ++++++++++++++++++- 1 file changed, 92 insertions(+), 5 deletions(-) diff --git a/platforms/nuttx/src/px4/microchip/mpfs/board_reset/board_reset.cpp b/platforms/nuttx/src/px4/microchip/mpfs/board_reset/board_reset.cpp index a532977f9556..1a58f128c0e1 100644 --- a/platforms/nuttx/src/px4/microchip/mpfs/board_reset/board_reset.cpp +++ b/platforms/nuttx/src/px4/microchip/mpfs/board_reset/board_reset.cpp @@ -39,11 +39,41 @@ #include #include +#include #include +#include #include #include "riscv_internal.h" +#ifdef CONFIG_SMP +/* The CPU mask for all (valid) CPUs */ + +#define ALL_CPUS ((1 << CONFIG_SMP_NCPUS) - 1) + +/* PX4 has -nostdinc++ set, which confuses the logic in + * making it pull the C-versions of atomic_fetch_add/atomic_load which use + * __auto_type, which should match the counters here. + */ + +#define __auto_type int * + +/* With SMP, first we need to wait for all CPUs to pause (g_cpus_paused). + * Then the CPUs can be reset, but CPU0 must be reset last (g_cpus_ready). + * Otherwise a race can occur on g_cpus_paused as CPU0 clears .bss. + * + * Note: We cannot use locks that allow synchronization (like semaphores) here + * because we want the system reset to happen as fast as possible. + */ + +static int g_cpus_paused; +static int g_cpus_ready; + +/* Handle for nxsched_smp_call_async */ + +static struct smp_call_data_s g_reboot_data; +#endif + extern "C" void __start(void); static void board_reset_enter_bootloader() @@ -64,19 +94,53 @@ static void board_reset_enter_bootloader_and_continue_boot() for (; ;); } -static void board_reset_enter_app(uintptr_t hartid) +static int board_reset_enter_app(FAR void *arg) { - register long r0 asm("a0") = (long)(hartid); + uintptr_t hartid = riscv_mhartid(); + + /* Mask local interrupts */ + + up_irq_save(); + +#ifdef CONFIG_SMP + /* Notify that this CPU is paused */ + + atomic_fetch_add(&g_cpus_paused, 1); + + /* Wait for ALL CPUs to be ready */ + + while (atomic_load(&g_cpus_paused) < CONFIG_SMP_NCPUS); + + /* Notify that this CPU has now ready */ + + atomic_fetch_add(&g_cpus_ready, 1); + + /* CPU0 must then wait for other CPUs to start */ + + if (this_cpu() == 0) { + while (atomic_load(&g_cpus_ready) < CONFIG_SMP_NCPUS); + } + +#endif + + /* Make sure the CPU cache is flushed so data is not lost */ + + up_flush_dcache_all(); asm volatile ( - "tail __start\n" :: "r"(r0) : "memory" + "fence\n" + "mv a0, %0\n" + "tail __start\n" :: "r"(hartid) : "memory" ); + + /* Never reached */ + + return 0; } int board_reset(int status) { - uintptr_t hartid = riscv_mhartid(); #if defined(BOARD_HAS_ON_RESET) board_on_reset(status); #endif @@ -88,9 +152,32 @@ int board_reset(int status) board_reset_enter_bootloader_and_continue_boot(); } +#ifdef CONFIG_SMP + g_reboot_data.func = board_reset_enter_app; + g_reboot_data.arg = NULL; + g_cpus_paused = 0; + g_cpus_ready = 0; + + /* Reset the other CPUs via SMP call. + * + * The SMP call in fact does run the callback on this CPU as well if + * requested to do so, but it does it BEFORE dispatching the request to the + * other CPUs. This is why we need to remove ourself from the CPU set. + */ + + cpu_set_t cpuset = ALL_CPUS; + CPU_CLR(this_cpu(), &cpuset); + + /* We cannot wait in smp_call because the target CPUs are reset, thus they + * will never acknowledge that they have run the handler! + */ + + nxsched_smp_call_async(cpuset, &g_reboot_data); +#endif + /* Just reboot via reset vector */ - board_reset_enter_app(hartid); + board_reset_enter_app(NULL); return 0; } From 30aaf93cf730951ea7024a705438b301270f5ecc Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Tue, 19 Nov 2024 13:25:01 +0200 Subject: [PATCH 4/7] hrt_ioctl: Support CONFIG_SMP=y Replace the critical sections with a dedicated hrt_ioctl spin lock --- platforms/nuttx/src/px4/common/hrt_ioctl.c | 29 ++++++++++++++-------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/platforms/nuttx/src/px4/common/hrt_ioctl.c b/platforms/nuttx/src/px4/common/hrt_ioctl.c index 4c0a7d6aab26..ca584ae21597 100644 --- a/platforms/nuttx/src/px4/common/hrt_ioctl.c +++ b/platforms/nuttx/src/px4/common/hrt_ioctl.c @@ -43,6 +43,7 @@ #include #include +#include #include #ifndef MODULE_NAME @@ -59,6 +60,14 @@ static sq_queue_t callout_queue; static sq_queue_t callout_freelist; static sq_queue_t callout_inflight; +/* SMP spinlock for g_hrt_ioctl_lock. + * + * Note: when SMP=no the spin lock turns into normal critical section i.e. it + * only disables interrupts + */ + +static spinlock_t g_hrt_ioctl_lock = SP_UNLOCKED; + /* Check if entry is in list */ static bool entry_inlist(sq_queue_t *queue, sq_entry_t *item) @@ -124,7 +133,7 @@ static struct usr_hrt_call *dup_entry(const px4_hrt_handle_t handle, struct hrt_ { struct usr_hrt_call *e = NULL; - irqstate_t flags = px4_enter_critical_section(); + irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock); /* check if this is already queued */ e = pop_entry(&callout_queue, handle, entry); @@ -134,7 +143,7 @@ static struct usr_hrt_call *dup_entry(const px4_hrt_handle_t handle, struct hrt_ e = (void *)sq_remfirst(&callout_freelist); } - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags); if (!e) { /* Allocate a new kernel side item for the user call */ @@ -156,9 +165,9 @@ static struct usr_hrt_call *dup_entry(const px4_hrt_handle_t handle, struct hrt_ e->usr_entry = entry; /* Add this to the callout_queue list */ - flags = px4_enter_critical_section(); + flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock); sq_addfirst(&e->list_item, &callout_queue); - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags); } else { PX4_ERR("out of memory"); @@ -222,7 +231,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg) px4_sem_wait(callout_sem); /* Atomically update the pointer to user side hrt entry */ - flags = px4_enter_critical_section(); + flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock); e = pop_user(&callout_inflight, callout_sem); if (e) { @@ -241,7 +250,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg) PX4_ERR("HRT_WAITEVENT error no entry"); } - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags); } break; @@ -279,7 +288,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg) case HRT_CANCEL: if (h && h->entry) { /* Find the user entry */ - irqstate_t flags = px4_enter_critical_section(); + irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock); struct usr_hrt_call *e = pop_entry(&callout_queue, h->handle, h->entry); if (e) { @@ -288,7 +297,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg) } - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags); } else { PX4_ERR("HRT_CANCEL called with NULL entry"); @@ -330,7 +339,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg) struct usr_hrt_call *e; irqstate_t flags; - flags = px4_enter_critical_section(); + flags = spin_lock_irqsave_wo_note(&g_hrt_ioctl_lock); while ((e = (void *)sq_remfirst(&callout_queue))) { if (callback_sem == e->entry.callout_sem) { @@ -343,7 +352,7 @@ hrt_ioctl(unsigned int cmd, unsigned long arg) px4_sem_destroy(callback_sem); - px4_leave_critical_section(flags); + spin_unlock_irqrestore_wo_note(&g_hrt_ioctl_lock, flags); *(px4_sem_t **)arg = NULL; kmm_free(callback_sem); From d8fc3e1a200c15dc76c6984ecb892721c27814ca Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Thu, 28 Nov 2024 10:01:23 +0200 Subject: [PATCH 5/7] px4_atomic: Support for CONFIG_SMP=y Use spinlock as the kernel side lock --- .../px4/common/include/px4_platform/atomic_block.h | 6 ++++++ platforms/nuttx/src/px4/common/px4_atomic.cpp | 11 +++++++++++ 2 files changed, 17 insertions(+) diff --git a/platforms/nuttx/src/px4/common/include/px4_platform/atomic_block.h b/platforms/nuttx/src/px4/common/include/px4_platform/atomic_block.h index f95d08ed858c..7d524b95d5e9 100644 --- a/platforms/nuttx/src/px4/common/include/px4_platform/atomic_block.h +++ b/platforms/nuttx/src/px4/common/include/px4_platform/atomic_block.h @@ -33,9 +33,12 @@ #pragma once +#include + #include #include +#include #include @@ -54,6 +57,9 @@ class atomic_block px4_sem_t _lock; irqstate_t _irqlock; }; +#ifdef CONFIG_SMP + spinlock_t _spinlock; +#endif }; } diff --git a/platforms/nuttx/src/px4/common/px4_atomic.cpp b/platforms/nuttx/src/px4/common/px4_atomic.cpp index 819dbc6ecc8b..93fdaebeec53 100644 --- a/platforms/nuttx/src/px4/common/px4_atomic.cpp +++ b/platforms/nuttx/src/px4/common/px4_atomic.cpp @@ -39,6 +39,9 @@ namespace px4 atomic_block::atomic_block(void) { _irqlock = 0; +#ifdef CONFIG_SMP + _spinlock = SP_UNLOCKED; +#endif } atomic_block::~atomic_block(void) @@ -48,12 +51,20 @@ atomic_block::~atomic_block(void) void atomic_block::start(void) { +#ifdef CONFIG_SMP + _irqlock = spin_lock_irqsave_wo_note(&_spinlock); +#else _irqlock = enter_critical_section(); +#endif } void atomic_block::finish(void) { +#ifdef CONFIG_SMP + spin_unlock_irqrestore_wo_note(&_spinlock, _irqlock); +#else leave_critical_section(_irqlock); +#endif } } From 1103d73f35a229440f4154c405e334f039ec41e4 Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Thu, 28 Nov 2024 10:22:52 +0200 Subject: [PATCH 6/7] WorkQueue.hpp: Support CONFIG_SMP=y --- .../px4_platform_common/px4_work_queue/WorkQueue.hpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/platforms/common/include/px4_platform_common/px4_work_queue/WorkQueue.hpp b/platforms/common/include/px4_platform_common/px4_work_queue/WorkQueue.hpp index 5381e76a5f14..f91e66aecb91 100644 --- a/platforms/common/include/px4_platform_common/px4_work_queue/WorkQueue.hpp +++ b/platforms/common/include/px4_platform_common/px4_work_queue/WorkQueue.hpp @@ -89,8 +89,14 @@ class WorkQueue : public IntrusiveSortedListNode #ifdef __PX4_NUTTX // In NuttX work can be enqueued from an ISR #ifdef CONFIG_BUILD_FLAT +#ifdef CONFIG_SMP + void work_lock() { _flags = spin_lock_irqsave_wo_note(&_spinlock); } + void work_unlock() { spin_unlock_irqrestore_wo_note(&_spinlock, _flags); } + spinlock_t _spinlock; +#else void work_lock() { _flags = enter_critical_section(); } void work_unlock() { leave_critical_section(_flags); } +#endif irqstate_t _flags; #else // For non-flat targets, work is enqueued by user threads as well From 5450a091eb2ec21fe3d63380976c00861163faaf Mon Sep 17 00:00:00 2001 From: Ville Juven Date: Thu, 28 Nov 2024 12:27:37 +0200 Subject: [PATCH 7/7] hrt.c: Remove hrt_elapsed_time_atomic (it is unused) Remove dead code --- platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c b/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c index fa99c95bd66e..b2d5b975c109 100644 --- a/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c +++ b/platforms/nuttx/src/px4/microchip/mpfs/hrt/hrt.c @@ -212,21 +212,6 @@ hrt_absolute_time(void) return getreg64(MPFS_CLINT_MTIME); } -/** - * Compare a time value with the current time as atomic operation - */ -hrt_abstime -hrt_elapsed_time_atomic(const volatile hrt_abstime *then) -{ - irqstate_t flags = spin_lock_irqsave_wo_note(&g_hrt_lock); - - hrt_abstime delta = hrt_absolute_time() - *then; - - spin_unlock_irqrestore_wo_note(&g_hrt_lock, flags); - - return delta; -} - /** * Store the absolute time in an interrupt-safe fashion */