Skip to content

Commit 9cdbc36

Browse files
committed
[libcpu-riscv]: [surpport SMP]: Fix issues with non-standard formatting
Fix issues with non-standard formatting Signed-off-by: Mengchen Teng <[email protected]>
1 parent 7a770ce commit 9cdbc36

File tree

6 files changed

+169
-154
lines changed

6 files changed

+169
-154
lines changed

bsp/qemu-virt64-riscv/driver/board.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@
2424
#include "plic.h"
2525
#include "stack.h"
2626

27+
#ifdef RT_USING_SMP
28+
#include "interrupt.h"
29+
#endif /* RT_USING_SMP */
30+
2731
#ifdef RT_USING_SMART
2832
#include "riscv_mmu.h"
2933
#include "mmu.h"
@@ -88,7 +92,7 @@ void rt_hw_board_init(void)
8892
#endif /* RT_USING_CONSOLE */
8993

9094
rt_hw_tick_init();
91-
95+
9296
#ifdef RT_USING_SMP
9397
/* ipi init */
9498
rt_hw_ipi_init();

libcpu/risc-v/common64/atomic_riscv.c

Lines changed: 46 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
1414
{
1515
rt_atomic_t result = 0;
1616
#if __riscv_xlen == 32
17-
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
17+
asm volatile("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
1818
#elif __riscv_xlen == 64
19-
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
19+
asm volatile("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
2020
#endif
2121
return result;
2222
}
@@ -25,9 +25,9 @@ rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
2525
{
2626
rt_atomic_t result = 0;
2727
#if __riscv_xlen == 32
28-
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
28+
asm volatile("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
2929
#elif __riscv_xlen == 64
30-
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
30+
asm volatile("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
3131
#endif
3232
return result;
3333
}
@@ -37,9 +37,9 @@ rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
3737
rt_atomic_t result = 0;
3838
val = -val;
3939
#if __riscv_xlen == 32
40-
asm volatile ("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
40+
asm volatile("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
4141
#elif __riscv_xlen == 64
42-
asm volatile ("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
42+
asm volatile("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
4343
#endif
4444
return result;
4545
}
@@ -48,9 +48,9 @@ rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
4848
{
4949
rt_atomic_t result = 0;
5050
#if __riscv_xlen == 32
51-
asm volatile ("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
51+
asm volatile("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
5252
#elif __riscv_xlen == 64
53-
asm volatile ("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
53+
asm volatile("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
5454
#endif
5555
return result;
5656
}
@@ -59,9 +59,9 @@ rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
5959
{
6060
rt_atomic_t result = 0;
6161
#if __riscv_xlen == 32
62-
asm volatile ("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
62+
asm volatile("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
6363
#elif __riscv_xlen == 64
64-
asm volatile ("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
64+
asm volatile("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
6565
#endif
6666
return result;
6767
}
@@ -70,9 +70,9 @@ rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
7070
{
7171
rt_atomic_t result = 0;
7272
#if __riscv_xlen == 32
73-
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
73+
asm volatile("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
7474
#elif __riscv_xlen == 64
75-
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
75+
asm volatile("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
7676
#endif
7777
return result;
7878
}
@@ -81,9 +81,9 @@ rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
8181
{
8282
rt_atomic_t result = 0;
8383
#if __riscv_xlen == 32
84-
asm volatile ("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
84+
asm volatile("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
8585
#elif __riscv_xlen == 64
86-
asm volatile ("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
86+
asm volatile("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
8787
#endif
8888
return result;
8989
}
@@ -92,9 +92,9 @@ void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
9292
{
9393
rt_atomic_t result = 0;
9494
#if __riscv_xlen == 32
95-
asm volatile ("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
95+
asm volatile("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
9696
#elif __riscv_xlen == 64
97-
asm volatile ("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
97+
asm volatile("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
9898
#endif
9999
}
100100

@@ -103,9 +103,9 @@ rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
103103
rt_atomic_t result = 0;
104104
rt_atomic_t temp = 1;
105105
#if __riscv_xlen == 32
106-
asm volatile ("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
106+
asm volatile("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
107107
#elif __riscv_xlen == 64
108-
asm volatile ("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
108+
asm volatile("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
109109
#endif
110110
return result;
111111
}
@@ -114,9 +114,9 @@ void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
114114
{
115115
rt_atomic_t result = 0;
116116
#if __riscv_xlen == 32
117-
asm volatile ("amoand.w %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
117+
asm volatile("amoand.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
118118
#elif __riscv_xlen == 64
119-
asm volatile ("amoand.d %0, x0, (%1)" : "=r"(result) :"r"(ptr) : "memory");
119+
asm volatile("amoand.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
120120
#endif
121121
}
122122

@@ -126,34 +126,34 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
126126
rt_atomic_t result = 0;
127127
#if __riscv_xlen == 32
128128
asm volatile(
129-
" fence iorw, ow\n"
130-
"1: lr.w.aq %[result], (%[ptr])\n"
131-
" bne %[result], %[tmp], 2f\n"
132-
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
133-
" bnez %[tmp], 1b\n"
134-
" li %[result], 1\n"
135-
" j 3f\n"
136-
" 2:sw %[result], (%[old])\n"
137-
" li %[result], 0\n"
138-
" 3:\n"
139-
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
140-
: [desired]"r" (desired), [old]"r"(old)
141-
: "memory");
129+
" fence iorw, ow\n"
130+
"1: lr.w.aq %[result], (%[ptr])\n"
131+
" bne %[result], %[tmp], 2f\n"
132+
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
133+
" bnez %[tmp], 1b\n"
134+
" li %[result], 1\n"
135+
" j 3f\n"
136+
" 2:sw %[result], (%[old])\n"
137+
" li %[result], 0\n"
138+
" 3:\n"
139+
: [result] "+r"(result), [tmp] "+r"(tmp), [ptr] "+r"(ptr)
140+
: [desired] "r"(desired), [old] "r"(old)
141+
: "memory");
142142
#elif __riscv_xlen == 64
143143
asm volatile(
144-
" fence iorw, ow\n"
145-
"1: lr.d.aq %[result], (%[ptr])\n"
146-
" bne %[result], %[tmp], 2f\n"
147-
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
148-
" bnez %[tmp], 1b\n"
149-
" li %[result], 1\n"
150-
" j 3f\n"
151-
" 2:sd %[result], (%[old])\n"
152-
" li %[result], 0\n"
153-
" 3:\n"
154-
: [result]"+r" (result), [tmp]"+r" (tmp), [ptr]"+r" (ptr)
155-
: [desired]"r" (desired), [old]"r"(old)
156-
: "memory");
144+
" fence iorw, ow\n"
145+
"1: lr.d.aq %[result], (%[ptr])\n"
146+
" bne %[result], %[tmp], 2f\n"
147+
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
148+
" bnez %[tmp], 1b\n"
149+
" li %[result], 1\n"
150+
" j 3f\n"
151+
" 2:sd %[result], (%[old])\n"
152+
" li %[result], 0\n"
153+
" 3:\n"
154+
: [result] "+r"(result), [tmp] "+r"(tmp), [ptr] "+r"(ptr)
155+
: [desired] "r"(desired), [old] "r"(old)
156+
: "memory");
157157
#endif
158158
return result;
159159
}

libcpu/risc-v/common64/cpuport.c

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,21 @@
1818
#include <sbi.h>
1919
#include <encoding.h>
2020

21+
#ifdef RT_USING_SMP
22+
#include "tick.h"
23+
#include "interrupt.h"
24+
#endif /* RT_USING_SMP */
25+
2126
#ifdef ARCH_RISCV_FPU
22-
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
27+
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
2328
#else
24-
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
29+
#define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
2530
#endif
2631

2732
#ifdef ARCH_RISCV_VECTOR
28-
#define K_SSTATUS_DEFAULT (K_SSTATUS_DEFAULT_BASE | SSTATUS_VS)
33+
#define K_SSTATUS_DEFAULT (K_SSTATUS_DEFAULT_BASE | SSTATUS_VS)
2934
#else
30-
#define K_SSTATUS_DEFAULT K_SSTATUS_DEFAULT_BASE
35+
#define K_SSTATUS_DEFAULT K_SSTATUS_DEFAULT_BASE
3136
#endif
3237
#ifdef RT_USING_SMART
3338
#include <lwp_arch.h>
@@ -51,8 +56,7 @@ volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
5156

5257
void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
5358
{
54-
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)
55-
((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
59+
rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
5660

5761
rt_memset(frame, 0, sizeof(struct rt_hw_switch_frame));
5862

@@ -68,8 +72,8 @@ int rt_hw_cpu_id(void)
6872
return 0;
6973
#else
7074
/* Currently, the hartid is stored in the satp register. */
71-
uint32_t hart_id;
72-
asm volatile ("csrr %0, satp" : "=r"(hart_id));
75+
rt_ubase_t hart_id;
76+
asm volatile("csrr %0, satp" : "=r"(hart_id));
7377
return hart_id;
7478
#endif /* RT_USING_SMP */
7579
}
@@ -126,7 +130,7 @@ void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t
126130
}
127131
#else
128132
void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread)
129-
{
133+
{
130134
/* Perform architecture-specific context switch. This call will
131135
* restore the target thread context and should not return when a
132136
* switch is performed. The caller (scheduler) invoked this function
@@ -166,12 +170,16 @@ void rt_hw_secondary_cpu_up(void)
166170
rt_uint64_t entry_pa;
167171
int hart, ret;
168172

169-
/* translate kernel virtual _start to physical address */
170-
entry_pa = (rt_uint64_t)&_start;//(rt_uint64_t)rt_kmem_v2p((void *)&_start);
173+
/* translate kernel virtual _start to physical address.
174+
* TODO: Virtual-to-physical translation is not needed here
175+
* because &_start is already a physical address on this platform.
176+
*/
177+
entry_pa = (rt_uint64_t)&_start;
171178

172179
for (hart = 0; hart < RT_CPUS_NR; hart++)
173180
{
174-
if (hart == boot_hartid) continue;
181+
if (hart == boot_hartid)
182+
continue;
175183

176184
ret = sbi_hsm_hart_start((unsigned long)hart,
177185
(unsigned long)entry_pa,
@@ -188,14 +196,12 @@ void secondary_cpu_entry(void)
188196
/* The PLIC peripheral interrupts are currently handled by the boot_hart. */
189197
/* Enable the Supervisor-Timer bit in SIE */
190198
rt_hw_tick_init();
191-
192-
#ifdef RT_USING_SMP
199+
193200
/* ipi init */
194201
rt_hw_ipi_init();
195-
#endif /* RT_USING_SMP */
196202

197203
rt_hw_spin_lock(&_cpus_lock);
198204
/* invoke system scheduler start for secondary CPU */
199205
rt_system_scheduler_start();
200206
}
201-
#endif /* RT_USING_SMP */
207+
#endif /* RT_USING_SMP */

0 commit comments

Comments
 (0)