diff --git a/.clang-format b/.clang-format deleted file mode 100644 index 1eb9b5dcc..000000000 --- a/.clang-format +++ /dev/null @@ -1,11 +0,0 @@ ---- -BasedOnStyle: Google -BreakAfterJavaFieldAnnotations: 'false' -BreakBeforeBraces: Linux -IncludeBlocks: Preserve -IndentWidth: '4' -SortIncludes: 'false' -AllowShortFunctionsOnASingleLine: 'Empty' -AllowShortLoopsOnASingleLine: 'true' -AlwaysBreakBeforeMultilineStrings: 'false' -... diff --git a/.github/workflows/code-quality.yaml b/.github/workflows/code-quality.yaml new file mode 100644 index 000000000..d1221b90a --- /dev/null +++ b/.github/workflows/code-quality.yaml @@ -0,0 +1,19 @@ +name: Code Quality + +on: + push: + branches: [ main ] + pull_request: + workflow_dispatch: + +jobs: + + coding-style: + runs-on: ubuntu-latest + container: baoproject/bao:latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: recursive + - run: make format-check diff --git a/Makefile b/Makefile index ce3971a92..e29a487be 100644 --- a/Makefile +++ b/Makefile @@ -307,8 +307,12 @@ all_files= $(realpath \ $(call list_dir_files_recursive, $(scripts_dir), *) \ $(call list_dir_files_recursive, $(config_dir)/example, *) \ ) +c_src_files=$(realpath $(call list_dir_files_recursive, src, *.c)) +c_hdr_files=$(realpath $(call list_dir_files_recursive, src, *.h)) +c_files=$(c_src_files) $(c_hdr_files) $(call ci, license, "Apache-2.0", $(all_files)) +$(call ci, format, $(c_files)) .PHONY: ci -ci: license-check +ci: license-check format-check diff --git a/README.md b/README.md index 19c9b20cf..492712d8f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # Bao - a lightweight static partitioning hypervisor +![code quality workflow](https://github.com/bao-project/bao-hypervisor/actions/workflows/code-quality.yaml/badge.svg) ![arm build workflow](https://github.com/bao-project/bao-hypervisor/actions/workflows/build-arm.yaml/badge.svg) ![riscv build workflow](https://github.com/bao-project/bao-hypervisor/actions/workflows/build-riscv.yaml/badge.svg) diff --git a/ci b/ci index 715fb594f..3ef5099b3 160000 --- a/ci +++ b/ci @@ -1 +1 @@ -Subproject commit 715fb594f8c9b8bcd82a9ab16b8ff4190f406e24 +Subproject commit 3ef5099b3c15be813ebde724ccdaae18b2a0dfe2 diff --git a/src/arch/armv8/aarch32/aborts.c b/src/arch/armv8/aarch32/aborts.c index da9269810..f65ed4792 100644 --- a/src/arch/armv8/aarch32/aborts.c +++ b/src/arch/armv8/aarch32/aborts.c @@ -7,9 +7,9 @@ #include #include -void internal_abort_handler(unsigned long gprs[]) { - - for(ssize_t i = 14; i >= 0; i--) { +void internal_abort_handler(unsigned long gprs[]) +{ + for (ssize_t i = 14; i >= 0; i--) { console_printk("x%d:\t\t0x%0lx\n", i, gprs[14 - i]); } console_printk("ESR:\t0x%0lx\n", sysreg_esr_el2_read()); diff --git a/src/arch/armv8/aarch32/boot.S b/src/arch/armv8/aarch32/boot.S index bb69c9029..0b613c225 100644 --- a/src/arch/armv8/aarch32/boot.S +++ b/src/arch/armv8/aarch32/boot.S @@ -19,15 +19,14 @@ .data .balign 4 /** - * barrier is used to minimal synchronization in boot - other cores wait for - * bsp to set it. + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. */ _barrier: .4byte 0 /** - * The following code MUST be at the base of the image, as this is bao's entry - * point. Therefore .boot section must also be the first in the linker script. - * DO NOT implement any code before the _reset_handler in this section. + * The following code MUST be at the base of the image, as this is bao's entry point. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. */ .section ".boot", "ax" .globl _reset_handler @@ -57,10 +56,10 @@ _reset_handler: adr r1, _el2_entry /** - * Linearize cpu id according to the number of clusters and processors - * per cluster. We are only considering two levels of affinity. - * TODO: this should be done some other way. We shouldn't depend on the platform - * description this early in the initialization. + * Linearize cpu id according to the number of clusters and processors per cluster. We are only + * considering two levels of affinity. + * TODO: this should be done some other way. We shouldn't depend on the platform description + * this early in the initialization. */ mov r3, r0, lsr #8 and r3, r3, #0xff @@ -88,8 +87,8 @@ _reset_handler: add r0, r0, r7 /* - * Install vector table physical address early, in case exception occurs - * during this initialization. + * Install vector table physical address early, in case exception occurs during this + * initialization. */ get_phys_addr r3, r4, _hyp_vector_table mcr p15, 4, r3, c12, c0, 0 // write HVBAR @@ -103,9 +102,9 @@ _reset_handler: bne 1f #else /** - * If the cpu master is not fixed, for setting it, we assume only one cpu is - * initially activated which later will turn on all the others. Therefore, there - * is no concurrency when setting CPU_MASTER and no atomic operations are needed. + * If the cpu master is not fixed, for setting it, we assume only one cpu is initially activated + * which later will turn on all the others. Therefore, there is no concurrency when setting + * CPU_MASTER and no atomic operations are needed. */ .pushsection .data _master_set: @@ -124,8 +123,8 @@ _set_master_cpu: 1: /** - * TODO: bring the system to a well known state. This includes disabling - * the MPU, all caches, BP and others, and invalidating them. + * TODO: bring the system to a well known state. This includes disabling the MPU, all caches, + * BP and others, and invalidating them. */ /* Clear stack pointer to avoid unaligned SP exceptions during boot */ @@ -182,7 +181,7 @@ _set_master_cpu: /* This point should never be reached */ b . -/***** Helper functions for boot code. ******/ +/***** Helper functions for boot code. ******/ .global boot_clear .func boot_clear @@ -199,8 +198,7 @@ boot_clear: .endfunc /* - * Code adapted from "Application Note Bare-metal Boot Code for ARMv8-A - * Processors - Version 1.0" + * Code adapted from "Application Note Bare-metal Boot Code for ARMv8-A Processors - Version 1.0" * * r0 - cache level to be invalidated (0 - dl1$, 1 - il1$) */ diff --git a/src/arch/armv8/aarch32/inc/arch/spinlock.h b/src/arch/armv8/aarch32/inc/arch/spinlock.h index 42d13bea6..b429f49b9 100644 --- a/src/arch/armv8/aarch32/inc/arch/spinlock.h +++ b/src/arch/armv8/aarch32/inc/arch/spinlock.h @@ -13,7 +13,7 @@ typedef struct { uint32_t next; } spinlock_t; -#define SPINLOCK_INITVAL ((spinlock_t){0,0}) +#define SPINLOCK_INITVAL ((spinlock_t){ 0, 0 }) static inline void spinlock_init(spinlock_t* lock) { @@ -48,9 +48,8 @@ static inline void spin_lock(spinlock_t* lock) "beq 3f\n\t" "wfe \n\t" "b 2b\n\t" - "3:\n\t" - : "=&r"(ticket), "=&r"(next), "=&r"(temp) - : "Q"(lock->ticket), "Q"(lock->next) : "memory"); + "3:\n\t" : "=&r"(ticket), "=&r"(next), "=&r"(temp) : "Q"(lock->ticket), "Q"(lock->next) + : "memory"); } static inline void spin_unlock(spinlock_t* lock) @@ -63,9 +62,7 @@ static inline void spin_unlock(spinlock_t* lock) "add %r0, %r0, #1\n\t" "stl %r0, %1\n\t" "dsb ish\n\t" - "sev\n\t" - : "=&r"(temp) - : "Q"(lock->next) : "memory"); + "sev\n\t" : "=&r"(temp) : "Q"(lock->next) : "memory"); } #endif /* __ARCH_SPINLOCK__ */ diff --git a/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h b/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h index 9845f5086..512bf6f17 100644 --- a/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h +++ b/src/arch/armv8/aarch32/inc/arch/subarch/sysregs.h @@ -10,51 +10,57 @@ #ifndef __ASSEMBLER__ -#define SYSREG_GEN_ACCESSORS(name, op1, crn, crm, op2) \ - static inline unsigned long sysreg_##name##_read() {\ - unsigned long _temp;\ - asm volatile("mrc p15, "#op1", %0, "#crn", "#crm", %1\n\r": "=r"(_temp): "i"(op2));\ - return _temp;\ - } \ - static inline void sysreg_##name##_write(unsigned long val) {\ - asm volatile("mcr p15, "#op1", %0, "#crn", "#crm", "#op2"\n\r": :"r"((val)));\ +#define SYSREG_GEN_ACCESSORS(name, op1, crn, crm, op2) \ + static inline unsigned long sysreg_##name##_read() \ + { \ + unsigned long _temp; \ + asm volatile("mrc p15, " #op1 ", %0, " #crn ", " #crm ", %1\n\r" : "=r"(_temp) : "i"(op2)); \ + return _temp; \ + } \ + static inline void sysreg_##name##_write(unsigned long val) \ + { \ + asm volatile("mcr p15, " #op1 ", %0, " #crn ", " #crm ", " #op2 "\n\r" : : "r"(val)); \ } -#define SYSREG_GEN_ACCESSORS_BANKED(name, reg) \ - static inline unsigned long sysreg_##name##_read() {\ - unsigned long _temp;\ - asm volatile("mrs %0, " XSTR(reg) "\n\r": "=r"(_temp));\ - return _temp;\ - } \ - static inline void sysreg_##name##_write(unsigned long val) {\ - asm volatile("msr " XSTR(reg) ", %0\n\r": :"r"((val)));\ +#define SYSREG_GEN_ACCESSORS_BANKED(name, reg) \ + static inline unsigned long sysreg_##name##_read() \ + { \ + unsigned long _temp; \ + asm volatile("mrs %0, " XSTR(reg) "\n\r" : "=r"(_temp)); \ + return _temp; \ + } \ + static inline void sysreg_##name##_write(unsigned long val) \ + { \ + asm volatile("msr " XSTR(reg) ", %0\n\r" : : "r"(val)); \ } -#define SYSREG_GEN_ACCESSORS_64(reg, op1, crm) \ - static inline unsigned long long sysreg_##reg##_read() {\ - unsigned long long _temp, _tempH;\ - asm volatile("mrrc p15, "#op1", %0, %1, "#crm"\n\r": "=r"(_temp), "=r"(_tempH));\ - return ((_tempH << 32) | _temp);\ - } \ - static inline void sysreg_##reg##_write(unsigned long long val) {\ - unsigned long long _tempH = (val>>32);\ - asm volatile("mcrr p15, "#op1", %0, %1, "#crm"\n\r": :"r"(val), "r"(_tempH));\ +#define SYSREG_GEN_ACCESSORS_64(reg, op1, crm) \ + static inline unsigned long long sysreg_##reg##_read() \ + { \ + unsigned long long _temp, _tempH; \ + asm volatile("mrrc p15, " #op1 ", %0, %1, " #crm "\n\r" : "=r"(_temp), "=r"(_tempH)); \ + return ((_tempH << 32) | _temp); \ + } \ + static inline void sysreg_##reg##_write(unsigned long long val) \ + { \ + unsigned long long _tempH = (val >> 32); \ + asm volatile("mcrr p15, " #op1 ", %0, %1, " #crm "\n\r" : : "r"(val), "r"(_tempH)); \ } -#define SYSREG_GEN_ACCESSORS_MERGE(reg, reg1, reg2) \ - static inline unsigned long long sysreg_##reg##_read() {\ - return ((unsigned long long)sysreg_##reg2##_read() << 32) |\ - sysreg_##reg1##_read();\ - }\ - static inline void sysreg_##reg##_write(unsigned long long val) {\ - sysreg_##reg1##_write(val);\ - sysreg_##reg2##_write(val >> 32);\ +#define SYSREG_GEN_ACCESSORS_MERGE(reg, reg1, reg2) \ + static inline unsigned long long sysreg_##reg##_read() \ + { \ + return ((unsigned long long)sysreg_##reg2##_read() << 32) | sysreg_##reg1##_read(); \ + } \ + static inline void sysreg_##reg##_write(unsigned long long val) \ + { \ + sysreg_##reg1##_write(val); \ + sysreg_##reg2##_write(val >> 32); \ } - /** - * We give aarch32 registers the same name as aarch64's to which they are - * architecturally mapped to, so that we can use the same name in common code. + * We give aarch32 registers the same name as aarch64's to which they are architecturally mapped + * to, so that we can use the same name in common code. */ SYSREG_GEN_ACCESSORS(esr_el2, 4, c5, c2, 0); // hsr SYSREG_GEN_ACCESSORS_BANKED(elr_el2, elr_hyp); @@ -66,13 +72,13 @@ SYSREG_GEN_ACCESSORS(ctr_el0, 0, c0, c0, 1); SYSREG_GEN_ACCESSORS(mpidr_el1, 0, c0, c0, 5); SYSREG_GEN_ACCESSORS(vmpidr_el2, 4, c0, c0, 5); SYSREG_GEN_ACCESSORS_64(cntvoff_el2, 4, c14); -SYSREG_GEN_ACCESSORS(sctlr_el1, 0, c1, c0, 0); +SYSREG_GEN_ACCESSORS(sctlr_el1, 0, c1, c0, 0); SYSREG_GEN_ACCESSORS(cntkctl_el1, 0, c14, c1, 0); SYSREG_GEN_ACCESSORS(pmcr_el0, 0, c9, c12, 0); SYSREG_GEN_ACCESSORS_64(par_el1, 0, c7); -SYSREG_GEN_ACCESSORS(tcr_el2, 4, c2, c0, 2); // htcr -SYSREG_GEN_ACCESSORS_64(ttbr0_el2, 4, c2); // httbr -SYSREG_GEN_ACCESSORS(cptr_el2, 4, c1, c1, 2); // hcptr +SYSREG_GEN_ACCESSORS(tcr_el2, 4, c2, c0, 2); // htcr +SYSREG_GEN_ACCESSORS_64(ttbr0_el2, 4, c2); // httbr +SYSREG_GEN_ACCESSORS(cptr_el2, 4, c1, c1, 2); // hcptr SYSREG_GEN_ACCESSORS(vtcr_el2, 4, c2, c1, 2); SYSREG_GEN_ACCESSORS_64(vttbr_el2, 6, c2); SYSREG_GEN_ACCESSORS(tpidr_el2, 4, c13, c0, 2); // htpidr @@ -109,10 +115,10 @@ SYSREG_GEN_ACCESSORS_64(icc_sgi1r_el1, 0, c12); SYSREG_GEN_ACCESSORS(vsctlr_el2, 4, c2, c0, 0); -#define SYSREG_GEN_GIC_LR(n, crn1, crn2, op2) \ - SYSREG_GEN_ACCESSORS(ich_lr##n, 4, c12, crn1, op2); \ +#define SYSREG_GEN_GIC_LR(n, crn1, crn2, op2) \ + SYSREG_GEN_ACCESSORS(ich_lr##n, 4, c12, crn1, op2); \ SYSREG_GEN_ACCESSORS(ich_lrc##n, 4, c12, crn2, op2); \ - SYSREG_GEN_ACCESSORS_MERGE(ich_lr##n##_el2, ich_lr##n, ich_lrc##n); \ + SYSREG_GEN_ACCESSORS_MERGE(ich_lr##n##_el2, ich_lr##n, ich_lrc##n); SYSREG_GEN_GIC_LR(0, c12, c14, 0); SYSREG_GEN_GIC_LR(1, c12, c14, 1); @@ -132,32 +138,39 @@ SYSREG_GEN_GIC_LR(14, c13, c15, 6); SYSREG_GEN_GIC_LR(15, c13, c15, 7); SYSREG_GEN_ACCESSORS(dccivac, 0, c7, c14, 1); -static inline void arm_dc_civac(vaddr_t cache_addr) { +static inline void arm_dc_civac(vaddr_t cache_addr) +{ sysreg_dccivac_write(cache_addr); } -static inline void arm_at_s1e2w(vaddr_t vaddr) { - asm volatile("mcr p15, 4, %0, c7, c8, 1" ::"r"(vaddr)); // ats1hw +static inline void arm_at_s1e2w(vaddr_t vaddr) +{ + asm volatile("mcr p15, 4, %0, c7, c8, 1" ::"r"(vaddr)); // ats1hw } -static inline void arm_at_s12e1w(vaddr_t vaddr) { - asm volatile("mcr p15, 0, %0, c7, c8, 5" ::"r"(vaddr)); // ats12nsopw +static inline void arm_at_s12e1w(vaddr_t vaddr) +{ + asm volatile("mcr p15, 0, %0, c7, c8, 5" ::"r"(vaddr)); // ats12nsopw } -static inline void arm_tlbi_alle2is() { +static inline void arm_tlbi_alle2is() +{ asm volatile("mcr p15, 4, r0, c8, c7, 0"); } -static inline void arm_tlbi_vmalls12e1is() { +static inline void arm_tlbi_vmalls12e1is() +{ asm volatile("mcr p15, 0, r0, c8, c7, 0"); } -static inline void arm_tlbi_vae2is(vaddr_t vaddr) { - asm volatile("mcr p15, 4, %0, c8, c7, 1" :: "r"(vaddr)); +static inline void arm_tlbi_vae2is(vaddr_t vaddr) +{ + asm volatile("mcr p15, 4, %0, c8, c7, 1" ::"r"(vaddr)); } -static inline void arm_tlbi_ipas2e1is(vaddr_t vaddr) { - asm volatile("mcr p15, 4, %0, c8, c0, 1" :: "r"(vaddr >> 12)); +static inline void arm_tlbi_ipas2e1is(vaddr_t vaddr) +{ + asm volatile("mcr p15, 4, %0, c8, c0, 1" ::"r"(vaddr >> 12)); } #endif /* |__ASSEMBLER__ */ diff --git a/src/arch/armv8/aarch32/vm.c b/src/arch/armv8/aarch32/vm.c index 1957c9ad2..e47899926 100644 --- a/src/arch/armv8/aarch32/vm.c +++ b/src/arch/armv8/aarch32/vm.c @@ -8,13 +8,17 @@ unsigned long vcpu_readreg(struct vcpu* vcpu, unsigned long reg) { - if (reg > 14) return 0; + if (reg > 14) { + return 0; + } return vcpu->regs.x[reg]; } void vcpu_writereg(struct vcpu* vcpu, unsigned long reg, unsigned long val) { - if (reg > 14) return; + if (reg > 14) { + return; + } vcpu->regs.x[reg] = val; } @@ -28,6 +32,7 @@ void vcpu_writepc(struct vcpu* vcpu, unsigned long pc) vcpu->regs.elr_hyp = pc; } -void vcpu_subarch_reset(struct vcpu* vcpu) { +void vcpu_subarch_reset(struct vcpu* vcpu) +{ vcpu->regs.spsr_hyp = SPSR_SVC | SPSR_F | SPSR_I | SPSR_A; } diff --git a/src/arch/armv8/aarch64/aborts.c b/src/arch/armv8/aarch64/aborts.c index 0f71eb3c3..a1a47ab87 100644 --- a/src/arch/armv8/aarch64/aborts.c +++ b/src/arch/armv8/aarch64/aborts.c @@ -7,9 +7,9 @@ #include #include -void internal_abort_handler(unsigned long gprs[]) { - - for(size_t i = 0; i < 31; i++) { +void internal_abort_handler(unsigned long gprs[]) +{ + for (size_t i = 0; i < 31; i++) { console_printk("x%d:\t\t0x%0lx\n", i, gprs[i]); } console_printk("SP:\t\t0x%0lx\n", gprs[31]); diff --git a/src/arch/armv8/aarch64/boot.S b/src/arch/armv8/aarch64/boot.S index 6863ef3e8..5c2a159ca 100644 --- a/src/arch/armv8/aarch64/boot.S +++ b/src/arch/armv8/aarch64/boot.S @@ -12,16 +12,15 @@ .data .align 3 /** - * barrier is used to minimal synchronization in boot - other cores wait for - * bsp to set it. + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. */ .global _boot_barrier _boot_barrier: .8byte 0 /** - * The following code MUST be at the base of the image, as this is bao's entry - * point. Therefore .boot section must also be the first in the linker script. - * DO NOT implement any code before the _reset_handler in this section. + * The following code MUST be at the base of the image, as this is bao's entry point. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. */ .section ".boot", "ax" .globl _reset_handler @@ -31,9 +30,8 @@ _reset_handler: /** * TODO: before anything... - * perform sanity checks on ID registers to ensure support for - * VE and TZ, 4K granule and possibly other needed features. - * Also, check current exception level. Act accordingly. + * perform sanity checks on ID registers to ensure support for VE and TZ, 4K granule and + * possibly other needed features. Also, check current exception level. Act accordingly. * However, we expect to be running at EL2 at this point. */ @@ -51,17 +49,17 @@ _reset_handler: adrp x1, _image_start /* - * Install vector table physical address early, in case exception occurs - * during this initialization. + * Install vector table physical address early, in case exception occurs during this + * initialization. */ adr x3, _hyp_vector_table msr VBAR_EL2, x3 /** - * Linearize cpu id according to the number of clusters and processors per - * cluster. We are only considering two levels of affinity. - * TODO: this should be done some other way. We shouldn't depend on the platform - * description this early in the initialization. + * Linearize cpu id according to the number of clusters and processors per cluster. We are + * only considering two levels of affinity. + * TODO: this should be done some other way. We shouldn't depend on the platform description + * this early in the initialization. */ mov x3, x0, lsr #8 @@ -96,9 +94,9 @@ _reset_handler: cbnz x9, 1f #else /** - * If the cpu master is not fixed, for setting it, we assume only one cpu is - * initially activated which later will turn on all the others. Therefore, there - * is no concurrency when setting CPU_MASTER and no atomic operations are needed. + * If the cpu master is not fixed, for setting it, we assume only one cpu is initially activated + * which later will turn on all the others. Therefore, there is no concurrency when setting + * CPU_MASTER and no atomic operations are needed. */ .pushsection .data _master_set: @@ -116,9 +114,8 @@ _set_master_cpu: 1: /** - * TODO: bring the system to a well known state. This includes disabling - * the MMU (done), all caches (missing i$), BP and others... - * and invalidating them. + * TODO: bring the system to a well known state. This includes disabling the MMU (done), + * all caches (missing i$), BP and others... and invalidating them. */ /* boot_clear stack pointer to avoid unaligned SP exceptions during boot */ @@ -195,8 +192,7 @@ boot_clear: .endfunc /* - * Code taken from "Application Note Bare-metal Boot Code for ARMv8-A - * Processors - Version 1.0" + * Code taken from "Application Note Bare-metal Boot Code for ARMv8-A Processors - Version 1.0" * * x0 - cache level to be invalidated (0 - dl1$, 1 - il1$, 2 - l2$) */ diff --git a/src/arch/armv8/aarch64/inc/arch/spinlock.h b/src/arch/armv8/aarch64/inc/arch/spinlock.h index 662c8ebaa..31143a067 100644 --- a/src/arch/armv8/aarch64/inc/arch/spinlock.h +++ b/src/arch/armv8/aarch64/inc/arch/spinlock.h @@ -13,7 +13,7 @@ typedef struct { uint32_t next; } spinlock_t; -#define SPINLOCK_INITVAL ((spinlock_t){0,0}) +#define SPINLOCK_INITVAL ((spinlock_t){ 0, 0 }) static inline void spinlock_init(spinlock_t* lock) { @@ -22,8 +22,7 @@ static inline void spinlock_init(spinlock_t* lock) } /** - * This lock follows the ticket lock algorithm described in Arm's ARM DDI0487I.a - * Appendix K13. + * This lock follows the ticket lock algorithm described in Arm's ARM DDI0487I.a Appendix K13. */ static inline void spin_lock(spinlock_t* lock) @@ -47,9 +46,8 @@ static inline void spin_lock(spinlock_t* lock) "b.eq 3f\n\t" "wfe\n\t" "b 2b\n\t" - "3:\n\t" - : "=&r"(ticket), "=&r"(next), "=&r"(temp) - : "Q"(lock->ticket), "Q"(lock->next) : "memory"); + "3:\n\t" : "=&r"(ticket), "=&r"(next), "=&r"(temp) : "Q"(lock->ticket), "Q"(lock->next) + : "memory"); } static inline void spin_unlock(spinlock_t* lock) @@ -62,10 +60,7 @@ static inline void spin_unlock(spinlock_t* lock) "add %w0, %w0, 1\n\t" "stlr %w0, %1\n\t" "dsb ish\n\t" - "sev\n\t" - : "=&r"(temp) - : "Q"(lock->next) : "memory"); + "sev\n\t" : "=&r"(temp) : "Q"(lock->next) : "memory"); } - #endif /* __ARCH_SPINLOCK__ */ diff --git a/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h b/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h index 8fb64496a..b64b239b3 100644 --- a/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h +++ b/src/arch/armv8/aarch64/inc/arch/subarch/sysregs.h @@ -8,55 +8,57 @@ #include -#define mpuir_el2 S3_4_C0_C0_4 -#define prselr_el2 S3_4_C6_C2_1 -#define prbar_el2 S3_4_C6_C8_0 -#define prlar_el2 S3_4_C6_C8_1 -#define prenr_el2 S3_4_C6_C1_1 -#define ich_misr_el2 S3_4_C12_C11_2 -#define ich_eisr_el2 S3_4_C12_C11_3 -#define ich_elrsr_el2 S3_4_C12_C11_5 -#define icc_iar1_el1 S3_0_C12_C12_0 -#define icc_eoir1_el1 S3_0_C12_C12_1 -#define icc_dir_el1 S3_0_C12_C11_1 -#define ich_vtr_el2 S3_4_C12_C11_1 -#define icc_sre_el2 S3_4_C12_C9_5 -#define icc_pmr_el1 S3_0_C4_C6_0 -#define icc_bpr1_el1 S3_0_C12_C12_3 -#define icc_ctlr_el1 S3_0_C12_C12_4 -#define icc_igrpen1_el1 S3_0_C12_C12_7 -#define ich_hcr_el2 S3_4_C12_C11_0 -#define icc_sgi1r_el1 S3_0_C12_C11_5 -#define ich_lr0_el2 S3_4_C12_C12_0 -#define ich_lr1_el2 S3_4_C12_C12_1 -#define ich_lr2_el2 S3_4_C12_C12_2 -#define ich_lr3_el2 S3_4_C12_C12_3 -#define ich_lr4_el2 S3_4_C12_C12_4 -#define ich_lr5_el2 S3_4_C12_C12_5 -#define ich_lr6_el2 S3_4_C12_C12_6 -#define ich_lr7_el2 S3_4_C12_C12_7 -#define ich_lr8_el2 S3_4_C12_C13_0 -#define ich_lr9_el2 S3_4_C12_C13_1 -#define ich_lr10_el2 S3_4_C12_C13_2 -#define ich_lr11_el2 S3_4_C12_C13_3 -#define ich_lr12_el2 S3_4_C12_C13_4 -#define ich_lr13_el2 S3_4_C12_C13_5 -#define ich_lr14_el2 S3_4_C12_C13_6 -#define ich_lr15_el2 S3_4_C12_C13_7 +#define mpuir_el2 S3_4_C0_C0_4 +#define prselr_el2 S3_4_C6_C2_1 +#define prbar_el2 S3_4_C6_C8_0 +#define prlar_el2 S3_4_C6_C8_1 +#define prenr_el2 S3_4_C6_C1_1 +#define ich_misr_el2 S3_4_C12_C11_2 +#define ich_eisr_el2 S3_4_C12_C11_3 +#define ich_elrsr_el2 S3_4_C12_C11_5 +#define icc_iar1_el1 S3_0_C12_C12_0 +#define icc_eoir1_el1 S3_0_C12_C12_1 +#define icc_dir_el1 S3_0_C12_C11_1 +#define ich_vtr_el2 S3_4_C12_C11_1 +#define icc_sre_el2 S3_4_C12_C9_5 +#define icc_pmr_el1 S3_0_C4_C6_0 +#define icc_bpr1_el1 S3_0_C12_C12_3 +#define icc_ctlr_el1 S3_0_C12_C12_4 +#define icc_igrpen1_el1 S3_0_C12_C12_7 +#define ich_hcr_el2 S3_4_C12_C11_0 +#define icc_sgi1r_el1 S3_0_C12_C11_5 +#define ich_lr0_el2 S3_4_C12_C12_0 +#define ich_lr1_el2 S3_4_C12_C12_1 +#define ich_lr2_el2 S3_4_C12_C12_2 +#define ich_lr3_el2 S3_4_C12_C12_3 +#define ich_lr4_el2 S3_4_C12_C12_4 +#define ich_lr5_el2 S3_4_C12_C12_5 +#define ich_lr6_el2 S3_4_C12_C12_6 +#define ich_lr7_el2 S3_4_C12_C12_7 +#define ich_lr8_el2 S3_4_C12_C13_0 +#define ich_lr9_el2 S3_4_C12_C13_1 +#define ich_lr10_el2 S3_4_C12_C13_2 +#define ich_lr11_el2 S3_4_C12_C13_3 +#define ich_lr12_el2 S3_4_C12_C13_4 +#define ich_lr13_el2 S3_4_C12_C13_5 +#define ich_lr14_el2 S3_4_C12_C13_6 +#define ich_lr15_el2 S3_4_C12_C13_7 #ifndef __ASSEMBLER__ -#define SYSREG_GEN_ACCESSORS_NAME(reg, name) \ - static inline unsigned long sysreg##reg##read() {\ - unsigned long _temp;\ - asm volatile("mrs %0, " XSTR(name) "\n\r" : "=r"(_temp));\ - return _temp;\ - } \ - static inline void sysreg##reg##write(unsigned long val) {\ - asm volatile("msr " XSTR(name) ", %0\n\r" ::"r"(val));\ +#define SYSREG_GEN_ACCESSORS_NAME(reg, name) \ + static inline unsigned long sysreg##reg##read() \ + { \ + unsigned long _temp; \ + asm volatile("mrs %0, " XSTR(name) "\n\r" : "=r"(_temp)); \ + return _temp; \ + } \ + static inline void sysreg##reg##write(unsigned long val) \ + { \ + asm volatile("msr " XSTR(name) ", %0\n\r" ::"r"(val)); \ } -#define SYSREG_GEN_ACCESSORS(reg) SYSREG_GEN_ACCESSORS_NAME(_##reg##_, reg) +#define SYSREG_GEN_ACCESSORS(reg) SYSREG_GEN_ACCESSORS_NAME(_##reg##_, reg) SYSREG_GEN_ACCESSORS(esr_el2); SYSREG_GEN_ACCESSORS(elr_el2); @@ -121,31 +123,38 @@ SYSREG_GEN_ACCESSORS(ich_lr13_el2); SYSREG_GEN_ACCESSORS(ich_lr14_el2); SYSREG_GEN_ACCESSORS(ich_lr15_el2); -static inline void arm_dc_civac(vaddr_t cache_addr) { - asm volatile ("dc civac, %0\n\t" :: "r"(cache_addr)); +static inline void arm_dc_civac(vaddr_t cache_addr) +{ + asm volatile("dc civac, %0\n\t" ::"r"(cache_addr)); } -static inline void arm_at_s1e2w(vaddr_t vaddr) { - asm volatile("at s1e2w, %0" ::"r"(vaddr)); +static inline void arm_at_s1e2w(vaddr_t vaddr) +{ + asm volatile("at s1e2w, %0" ::"r"(vaddr)); } -static inline void arm_at_s12e1w(vaddr_t vaddr) { - asm volatile("at s12e1w, %0" ::"r"(vaddr)); +static inline void arm_at_s12e1w(vaddr_t vaddr) +{ + asm volatile("at s12e1w, %0" ::"r"(vaddr)); } -static inline void arm_tlbi_alle2is() { +static inline void arm_tlbi_alle2is() +{ asm volatile("tlbi alle2is"); } -static inline void arm_tlbi_vmalls12e1is() { +static inline void arm_tlbi_vmalls12e1is() +{ asm volatile("tlbi vmalls12e1is"); } -static inline void arm_tlbi_vae2is(vaddr_t vaddr) { +static inline void arm_tlbi_vae2is(vaddr_t vaddr) +{ asm volatile("tlbi vae2is, %0" ::"r"(vaddr >> 12)); } -static inline void arm_tlbi_ipas2e1is(vaddr_t vaddr) { +static inline void arm_tlbi_ipas2e1is(vaddr_t vaddr) +{ asm volatile("tlbi ipas2e1is, %0" ::"r"(vaddr >> 12)); } diff --git a/src/arch/armv8/aarch64/inc/arch/subarch/vm.h b/src/arch/armv8/aarch64/inc/arch/subarch/vm.h index 176f0a3de..7f6704de8 100644 --- a/src/arch/armv8/aarch64/inc/arch/subarch/vm.h +++ b/src/arch/armv8/aarch64/inc/arch/subarch/vm.h @@ -12,7 +12,6 @@ struct arch_regs { uint64_t x[31]; uint64_t elr_el2; uint64_t spsr_el2; -} __attribute__((aligned(16))); // makes size always aligned to 16 to respect - // stack alignment +} __attribute__((aligned(16))); // makes size always aligned to 16 to respect stack alignment -#endif /* VM_SUBARCH_H */ +#endif /* VM_SUBARCH_H */ diff --git a/src/arch/armv8/aarch64/vm.c b/src/arch/armv8/aarch64/vm.c index 1fd594275..e474da433 100644 --- a/src/arch/armv8/aarch64/vm.c +++ b/src/arch/armv8/aarch64/vm.c @@ -8,13 +8,17 @@ unsigned long vcpu_readreg(struct vcpu* vcpu, unsigned long reg) { - if (reg > 30) return 0; + if (reg > 30) { + return 0; + } return vcpu->regs.x[reg]; } void vcpu_writereg(struct vcpu* vcpu, unsigned long reg, unsigned long val) { - if (reg > 30) return; + if (reg > 30) { + return; + } vcpu->regs.x[reg] = val; } @@ -28,6 +32,7 @@ void vcpu_writepc(struct vcpu* vcpu, unsigned long pc) vcpu->regs.elr_el2 = pc; } -void vcpu_subarch_reset(struct vcpu* vcpu) { +void vcpu_subarch_reset(struct vcpu* vcpu) +{ vcpu->regs.spsr_el2 = SPSR_EL1h | SPSR_F | SPSR_I | SPSR_A | SPSR_D; } diff --git a/src/arch/armv8/aborts.c b/src/arch/armv8/aborts.c index c3bfc4b44..825a24842 100644 --- a/src/arch/armv8/aborts.c +++ b/src/arch/armv8/aborts.c @@ -20,8 +20,7 @@ void aborts_data_lower(unsigned long iss, unsigned long far, unsigned long il, u ERROR("no information to handle data abort (0x%x)", far); } - unsigned long DSFC = - bit64_extract(iss, ESR_ISS_DA_DSFC_OFF, ESR_ISS_DA_DSFC_LEN) & (0xf << 2); + unsigned long DSFC = bit64_extract(iss, ESR_ISS_DA_DSFC_OFF, ESR_ISS_DA_DSFC_LEN) & (0xf << 2); if (DSFC != ESR_ISS_DA_DSFC_TRNSLT && DSFC != ESR_ISS_DA_DSFC_PERMIS) { ERROR("data abort is not translation fault - cant deal with it"); @@ -32,17 +31,13 @@ void aborts_data_lower(unsigned long iss, unsigned long far, unsigned long il, u if (handler != NULL) { struct emul_access emul; emul.addr = addr; - emul.width = - (1 << bit64_extract(iss, ESR_ISS_DA_SAS_OFF, ESR_ISS_DA_SAS_LEN)); + emul.width = (1 << bit64_extract(iss, ESR_ISS_DA_SAS_OFF, ESR_ISS_DA_SAS_LEN)); emul.write = iss & ESR_ISS_DA_WnR_BIT ? true : false; emul.reg = bit64_extract(iss, ESR_ISS_DA_SRT_OFF, ESR_ISS_DA_SRT_LEN); - emul.reg_width = - 4 + (4 * bit64_extract(iss, ESR_ISS_DA_SF_OFF, ESR_ISS_DA_SF_LEN)); - emul.sign_ext = - bit64_extract(iss, ESR_ISS_DA_SSE_OFF, ESR_ISS_DA_SSE_LEN); + emul.reg_width = 4 + (4 * bit64_extract(iss, ESR_ISS_DA_SF_OFF, ESR_ISS_DA_SF_LEN)); + emul.sign_ext = bit64_extract(iss, ESR_ISS_DA_SSE_OFF, ESR_ISS_DA_SSE_LEN); - // TODO: check if the access is aligned. If not, inject an exception in - // the vm + // TODO: check if the access is aligned. If not, inject an exception in the vm if (handler(&emul)) { unsigned long pc_step = 2 + (2 * il); @@ -51,13 +46,12 @@ void aborts_data_lower(unsigned long iss, unsigned long far, unsigned long il, u ERROR("data abort emulation failed (0x%x)", far); } } else { - ERROR("no emulation handler for abort(0x%x at 0x%x)", far, - vcpu_readpc(cpu()->vcpu)); + ERROR("no emulation handler for abort(0x%x at 0x%x)", far, vcpu_readpc(cpu()->vcpu)); } } -long int standard_service_call(unsigned long _fn_num) { - +long int standard_service_call(unsigned long _fn_num) +{ int64_t ret = -1; unsigned long smc_fid = vcpu_readreg(cpu()->vcpu, 0); @@ -74,13 +68,13 @@ long int standard_service_call(unsigned long _fn_num) { return ret; } -static inline void syscall_handler(unsigned long iss, unsigned long far, - unsigned long il, unsigned long ec) +static inline void syscall_handler(unsigned long iss, unsigned long far, unsigned long il, + unsigned long ec) { unsigned long fid = vcpu_readreg(cpu()->vcpu, 0); long ret = SMCC_E_NOT_SUPPORTED; - switch(fid & ~SMCC_FID_FN_NUM_MSK) { + switch (fid & ~SMCC_FID_FN_NUM_MSK) { case SMCC32_FID_STD_SRVC: case SMCC64_FID_STD_SRVC: ret = standard_service_call(fid); @@ -106,9 +100,9 @@ void smc_handler(unsigned long iss, unsigned long far, unsigned long il, unsigne syscall_handler(iss, far, il, ec); /** - * Since SMCs are trapped due to setting hcr_el2.tsc, the "preferred - * exception return address" is the address of the actual smc instruction. - * Thus, we need to adjust it to the next instruction. + * Since SMCs are trapped due to setting hcr_el2.tsc, the "preferred exception return address" + * is the address of the actual smc instruction. Thus, we need to adjust it to the next + * instruction. */ vcpu_writepc(cpu()->vcpu, vcpu_readpc(cpu()->vcpu) + 4); } @@ -117,9 +111,9 @@ static regaddr_t reg_addr_translate(unsigned long iss) { iss &= ESR_ISS_SYSREG_ADDR_64; if (iss == ICC_SGI1R_CASE) { - return (regaddr_t) ICC_SGI1R_ADDR; + return (regaddr_t)ICC_SGI1R_ADDR; } else { - return (regaddr_t) UNDEFINED_REG_ADDR; + return (regaddr_t)UNDEFINED_REG_ADDR; } } @@ -133,7 +127,7 @@ void sysreg_handler(unsigned long iss, unsigned long far, unsigned long il, unsi } emul_handler_t handler = vm_emul_get_reg(cpu()->vcpu->vm, reg_addr); - if(handler != NULL){ + if (handler != NULL) { struct emul_access emul; emul.addr = reg_addr; emul.width = 8; @@ -141,7 +135,7 @@ void sysreg_handler(unsigned long iss, unsigned long far, unsigned long il, unsi emul.reg = bit64_extract(iss, ESR_ISS_SYSREG_REG_OFF, ESR_ISS_SYSREG_REG_LEN); emul.reg_high = bit64_extract(iss, ESR_ISS_SYSREG_REG2_OFF, ESR_ISS_SYSREG_REG2_LEN); emul.reg_width = 8; - emul.multi_reg = (ec == ESR_EC_RG_64)? true : false; + emul.multi_reg = (ec == ESR_EC_RG_64) ? true : false; emul.sign_ext = false; if (handler(&emul)) { @@ -152,18 +146,20 @@ void sysreg_handler(unsigned long iss, unsigned long far, unsigned long il, unsi } } else { ERROR("no emulation handler for register access (0x%x at 0x%x)", reg_addr, - vcpu_readpc(cpu()->vcpu)); + vcpu_readpc(cpu()->vcpu)); } } -abort_handler_t abort_handlers[64] = {[ESR_EC_DALEL] = aborts_data_lower, - [ESR_EC_SMC32] = smc_handler, - [ESR_EC_SMC64] = smc_handler, - [ESR_EC_SYSRG] = sysreg_handler, - [ESR_EC_RG_32] = sysreg_handler, - [ESR_EC_RG_64] = sysreg_handler, - [ESR_EC_HVC32] = hvc_handler, - [ESR_EC_HVC64] = hvc_handler,}; +abort_handler_t abort_handlers[64] = { + [ESR_EC_DALEL] = aborts_data_lower, + [ESR_EC_SMC32] = smc_handler, + [ESR_EC_SMC64] = smc_handler, + [ESR_EC_SYSRG] = sysreg_handler, + [ESR_EC_RG_32] = sysreg_handler, + [ESR_EC_RG_64] = sysreg_handler, + [ESR_EC_HVC32] = hvc_handler, + [ESR_EC_HVC64] = hvc_handler, +}; void aborts_sync_handler() { @@ -183,8 +179,9 @@ void aborts_sync_handler() unsigned long iss = bit64_extract(esr, ESR_ISS_OFF, ESR_ISS_LEN); abort_handler_t handler = abort_handlers[ec]; - if (handler) + if (handler) { handler(iss, ipa_fault_addr, il, ec); - else - ERROR("no handler for abort ec = 0x%x", ec); // unknown guest exception + } else { + ERROR("no handler for abort ec = 0x%x", ec); // unknown guest exception + } } diff --git a/src/arch/armv8/armv8-a/aarch32/boot.S b/src/arch/armv8/armv8-a/aarch32/boot.S index 48ceb0698..65420a275 100644 --- a/src/arch/armv8/armv8-a/aarch32/boot.S +++ b/src/arch/armv8/armv8-a/aarch32/boot.S @@ -24,8 +24,8 @@ boot_arch_profile_init: mov r13, lr /* - * Register r12 contains the size of the allocated physical memory between - * the loadable sections of the image and the non-loadable. + * Register r12 contains the size of the allocated physical memory between the loadable + * sections of the image and the non-loadable. */ ldr r10, =extra_allocated_phys_mem diff --git a/src/arch/armv8/armv8-a/aarch32/inc/arch/bao.h b/src/arch/armv8/armv8-a/aarch32/inc/arch/bao.h index 13f05e832..7226a98cb 100644 --- a/src/arch/armv8/armv8-a/aarch32/inc/arch/bao.h +++ b/src/arch/armv8/armv8-a/aarch32/inc/arch/bao.h @@ -6,19 +6,17 @@ #ifndef __ARCH_BAO_H__ #define __ARCH_BAO_H__ -#define BAO_VAS_BASE (0x40000000) -#define BAO_CPU_BASE (0x50000000) -#define BAO_VM_BASE (0x60000000) -#define BAO_VAS_TOP (0x80000000) -#define PAGE_SIZE (0x1000) -#define STACK_SIZE (PAGE_SIZE) +#define BAO_VAS_BASE (0x40000000) +#define BAO_CPU_BASE (0x50000000) +#define BAO_VM_BASE (0x60000000) +#define BAO_VAS_TOP (0x80000000) +#define PAGE_SIZE (0x1000) +#define STACK_SIZE (PAGE_SIZE) -#define GPR(N) "r"#N +#define GPR(N) "r" #N #ifndef __ASSEMBLER__ - #endif /* !__ASSEMBLER__ */ - #endif /* __ARCH_BAO_H__ */ diff --git a/src/arch/armv8/armv8-a/aarch32/vmm.c b/src/arch/armv8/armv8-a/aarch32/vmm.c index ed0bdeb7f..134cee29d 100644 --- a/src/arch/armv8/armv8-a/aarch32/vmm.c +++ b/src/arch/armv8/armv8-a/aarch32/vmm.c @@ -7,20 +7,17 @@ #include #include -void vmm_arch_init_tcr() { - +void vmm_arch_init_tcr() +{ if (cpu()->id == CPU_MASTER) { - /* Despite LPAE, we only support 32-bit guest physical address spaces. */ parange = PAR_32BIT; - } cpu_sync_barrier(&cpu_glb_sync); - uint64_t vtcr = VTCR_RES1 | VTCR_ORGN0_WB_RA_WA | VTCR_IRGN0_WB_RA_WA | - VTCR_T0SZ(0) | VTCR_SH0_IS | VTCR_SL0_12; + uint64_t vtcr = VTCR_RES1 | VTCR_ORGN0_WB_RA_WA | VTCR_IRGN0_WB_RA_WA | VTCR_T0SZ(0) | + VTCR_SH0_IS | VTCR_SL0_12; sysreg_vtcr_el2_write(vtcr); - } diff --git a/src/arch/armv8/armv8-a/aarch64/boot.S b/src/arch/armv8/armv8-a/aarch64/boot.S index 6f77a0bba..708d6ba14 100644 --- a/src/arch/armv8/armv8-a/aarch64/boot.S +++ b/src/arch/armv8/armv8-a/aarch64/boot.S @@ -18,8 +18,8 @@ boot_arch_profile_init: mov x20, x30 /* - * Register x18 contains the size of the allocated physical memory between - * the loadable sections of the image and the non-loadable. + * Register x18 contains the size of the allocated physical memory between the loadable + * sections of the image and the non-loadable. */ ldr x18, =extra_allocated_phys_mem @@ -172,8 +172,8 @@ map_cpu: setup_cpu: /** - * The operation is purposely commented out. - * We are assuming monitor code already enabled smp coherency. + * The operation is purposely commented out. We are assuming monitor code already enabled smp + * coherency. */ /* setup translation configurations */ @@ -195,14 +195,12 @@ setup_cpu: msr TTBR0_EL2, x3 /** - * TODO: set implementation defined registers such as ACTLR or AMAIR. - * Maybe define a macro for this in a implementation oriented directory - * inside arch. + * TODO: set implementation defined registers such as ACTLR or AMAIR. Maybe define a macro for + * this in a implementation oriented directory inside arch. */ /** - * TODO: invalidate caches, TLBs and branch prediction. - * Need for barriers? + * TODO: invalidate caches, TLBs and branch prediction. Need for barriers? */ ldr x5, =_enter_vas @@ -248,7 +246,7 @@ warm_boot: /* save x0 which contains pointer to saved state psci context */ mov x19, x0 - /* invalidate l1$ */ + /* invalidate l1$ */ mov x0, #0 bl boot_cache_invalidate diff --git a/src/arch/armv8/armv8-a/aarch64/inc/arch/bao.h b/src/arch/armv8/armv8-a/aarch64/inc/arch/bao.h index 9efbf8881..245bcb2b0 100644 --- a/src/arch/armv8/armv8-a/aarch64/inc/arch/bao.h +++ b/src/arch/armv8/armv8-a/aarch64/inc/arch/bao.h @@ -6,19 +6,17 @@ #ifndef __ARCH_BAO_H__ #define __ARCH_BAO_H__ -#define BAO_VAS_BASE (0xfd8000000000) -#define BAO_CPU_BASE (0xfe0000000000) -#define BAO_VM_BASE (0xfe8000000000) -#define BAO_VAS_TOP (0xff0000000000) -#define PAGE_SIZE (0x1000) -#define STACK_SIZE (PAGE_SIZE) +#define BAO_VAS_BASE (0xfd8000000000) +#define BAO_CPU_BASE (0xfe0000000000) +#define BAO_VM_BASE (0xfe8000000000) +#define BAO_VAS_TOP (0xff0000000000) +#define PAGE_SIZE (0x1000) +#define STACK_SIZE (PAGE_SIZE) -#define GPR(N) "x"#N +#define GPR(N) "x" #N #ifndef __ASSEMBLER__ - #endif /* !__ASSEMBLER__ */ - #endif /* __ARCH_BAO_H__ */ diff --git a/src/arch/armv8/armv8-a/aarch64/relocate.S b/src/arch/armv8/armv8-a/aarch64/relocate.S index b10139224..cf45a2abe 100644 --- a/src/arch/armv8/armv8-a/aarch64/relocate.S +++ b/src/arch/armv8/armv8-a/aarch64/relocate.S @@ -50,8 +50,7 @@ memcpy: switch_space: /** - * update flat maping page table entry to feature new physical address space - * entry page + * update flat maping page table entry to feature new physical address space entry page */ adr x3, _image_start PTE_INDEX_ASM x4, x3, 1 diff --git a/src/arch/armv8/armv8-a/aarch64/vmm.c b/src/arch/armv8/armv8-a/aarch64/vmm.c index 26c0daa6c..6e9c17149 100644 --- a/src/arch/armv8/armv8-a/aarch64/vmm.c +++ b/src/arch/armv8/armv8-a/aarch64/vmm.c @@ -10,14 +10,12 @@ void vmm_arch_init_tcr() { /** - * Check available physical address range which will limit - * IPA size. Patch 2-stage page table descriptors if this forces - * the initial lookup to level 1. + * Check available physical address range which will limit IPA size. Patch 2-stage page table + * descriptors if this forces the initial lookup to level 1. * - * In multi-cluster heterogenous we only support the minimum parange - * for a vm's physicall adress space. - * TODO: we could make this more dynamic and adapt it to each virtual - * machine. + * In multi-cluster heterogenous we only support the minimum parange for a vm's physicall + * adress space. + * TODO: we could make this more dynamic and adapt it to each virtual machine. */ static size_t min_parange = 0b111; @@ -25,7 +23,7 @@ void vmm_arch_init_tcr() size_t temp_parange = sysreg_id_aa64mmfr0_el1_read() & ID_AA64MMFR0_PAR_MSK; spin_lock(&lock); - if(temp_parange < min_parange) { + if (temp_parange < min_parange) { min_parange = temp_parange; } spin_unlock(&lock); @@ -47,10 +45,9 @@ void vmm_arch_init_tcr() cpu_sync_barrier(&cpu_glb_sync); - uint64_t vtcr = VTCR_RES1 | ((parange << VTCR_PS_OFF) & VTCR_PS_MSK) | - VTCR_TG0_4K | VTCR_ORGN0_WB_RA_WA | VTCR_IRGN0_WB_RA_WA | - VTCR_T0SZ(64 - parange_table[parange]) | VTCR_SH0_IS | - ((parange_table[parange] < 44) ? VTCR_SL0_12 : VTCR_SL0_01); + uint64_t vtcr = VTCR_RES1 | ((parange << VTCR_PS_OFF) & VTCR_PS_MSK) | VTCR_TG0_4K | + VTCR_ORGN0_WB_RA_WA | VTCR_IRGN0_WB_RA_WA | VTCR_T0SZ(64 - parange_table[parange]) | + VTCR_SH0_IS | ((parange_table[parange] < 44) ? VTCR_SL0_12 : VTCR_SL0_01); sysreg_vtcr_el2_write(vtcr); } diff --git a/src/arch/armv8/armv8-a/cpu.c b/src/arch/armv8/armv8-a/cpu.c index 251b17c36..f715b986d 100644 --- a/src/arch/armv8/armv8-a/cpu.c +++ b/src/arch/armv8/armv8-a/cpu.c @@ -8,13 +8,15 @@ #include #include -void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr) { +void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr) +{ if (cpuid == CPU_MASTER) { - /* power on necessary, but still sleeping, secondary cpu cores - * Assumes CPU zero is doing this */ - for (size_t cpu_core_id = 0; cpu_core_id < platform.cpu_num; - cpu_core_id++) { - if(cpu_core_id == cpuid) continue; + /* power on necessary, but still sleeping, secondary cpu cores Assumes CPU zero is doing + * this */ + for (size_t cpu_core_id = 0; cpu_core_id < platform.cpu_num; cpu_core_id++) { + if (cpu_core_id == cpuid) { + continue; + } unsigned long mpidr = cpu_id_to_mpidr(cpu_core_id); // TODO: pass config addr in contextid (x0 register) int32_t result = psci_cpu_on(mpidr, load_addr, 0); @@ -25,10 +27,10 @@ void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr) { } } -void cpu_arch_profile_idle() { - +void cpu_arch_profile_idle() +{ int64_t err = psci_power_down(PSCI_WAKEUP_IDLE); - if(err) { + if (err) { switch (err) { case PSCI_E_NOT_SUPPORTED: /** @@ -42,7 +44,7 @@ void cpu_arch_profile_idle() { } /** - * Power down was sucessful but did not jump to requested entry - * point. Just return to the architectural + * Power down was sucessful but did not jump to requested entry point. Just return to the + * architectural */ } diff --git a/src/arch/armv8/armv8-a/inc/arch/iommu.h b/src/arch/armv8/armv8-a/inc/arch/iommu.h index 66de86fc6..080f293e3 100644 --- a/src/arch/armv8/armv8-a/inc/arch/iommu.h +++ b/src/arch/armv8/armv8-a/inc/arch/iommu.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ diff --git a/src/arch/armv8/armv8-a/inc/arch/page_table.h b/src/arch/armv8/armv8-a/inc/arch/page_table.h index bcfd2e4d6..bd955b4cf 100644 --- a/src/arch/armv8/armv8-a/inc/arch/page_table.h +++ b/src/arch/armv8/armv8-a/inc/arch/page_table.h @@ -10,125 +10,125 @@ #include #ifdef __ASSEMBLER__ -#define PT_SIZE PAGE_SIZE +#define PT_SIZE PAGE_SIZE #define PTE_INDEX_SHIFT(LEVEL) ((9 * (3 - LEVEL)) + 12) #define PTE_INDEX(LEVEL, ADDR) ((ADDR >> PTE_INDEX_SHIFT(LEVEL)) & (0x1FF)) +// We turn clang-format off at this point since this is an assembly macro and thus is incorrectly +// formatted. Despite this being assembly we keep this macro here so that is next to its C macro +// counter-part defined above. +// clang-format off .macro PTE_INDEX_ASM index, addr, level lsr \index, \addr, #PTE_INDEX_SHIFT(\level) and \index, \index, #0x1ff lsl \index, \index, #3 .endm +// clang-format on #endif #ifdef AARCH32 -#define PT_SHARED_LVL (1) +#define PT_SHARED_LVL (1) #else -#define PT_SHARED_LVL (0) +#define PT_SHARED_LVL (0) #endif -#define HYP_ROOT_PT_SIZE PAGE_SIZE - -#define ADDR_MSK(MSB, LSB) (((1ULL << (MSB + 1)) - 1) & ~((1ULL << (LSB)) - 1)) -#define PTE_ADDR_MSK ADDR_MSK(47, 12) -#define PTE_FLAGS_MSK (~PTE_ADDR_MSK) -#define PTE_MASK(OFF, LEN) BIT64_MASK(OFF, LEN) - -#define PTE_TYPE_MSK (0x3) -#define PTE_VALID (0x1) -#define PTE_SUPERPAGE (0x1) -#define PTE_TABLE (0x3) -#define PTE_PAGE (0x3) - -#define PTE_NSTable (1LL << 63) -#define PTE_APTable_OFF (61) -#define PTE_APTable_MSK (0x3LL << PTE_APTable_OFF) -#define PTE_APTable_ALL (0x0LL << PTE_APTable_OFF) -#define PTE_APTable_NOEL0 (0x1LL << PTE_APTable_OFF) -#define PTE_APTable_RO (0x2LL << PTE_APTable_OFF) -#define PTE_APTable_RO_NOEL0 (0x3LL << PTE_APTable_OFF) -#define PTE_XNTable (1LL << 60) -#define PTE_PXNTable (1LL << 59) - -#define PTE_PBHA_OFF (59) -#define PTE_PBHA_MSK (0xf << PTE_PBHA_OFF) -#define PTE_PBHA(VAL) ((VAL << PTE_PBHA_OFF) & PTE_PBHA_MSK) -#define PTE_XN (1LL << 54) -#define PTE_PXN (1LL << 53) -#define PTE_Con (1LL << 52) -#define PTE_DBM (1LL << 51) -#define PTE_nG (1LL << 11) -#define PTE_AF (1LL << 10) -#define PTE_SH_OFF (8) -#define PTE_SH_MSK (0x3LL << PTE_SH_OFF) -#define PTE_SH_NS (0x0LL << PTE_SH_OFF) -#define PTE_SH_OS (0x2LL << PTE_SH_OFF) -#define PTE_SH_IS (0x3LL << PTE_SH_OFF) -#define PTE_AP_OFF (6) -#define PTE_AP_MSK (0x3LL << PTE_AP_OFF) -#define PTE_AP_RW_PRIV (0x0LL << PTE_AP_OFF) -#define PTE_AP_RO_PRIV (0x2LL << PTE_AP_OFF) -#define PTE_AP_RW (0x1LL << PTE_AP_OFF) -#define PTE_AP_RO (0x3LL << PTE_AP_OFF) -#define PTE_NS (1 << 5) -#define PTE_ATTR_OFF (2) -#define PTE_ATTR_MSK (0x7LL << PTE_ATTR_OFF) -#define PTE_ATTR(N) ((N << PTE_ATTR_OFF) & PTE_ATTR_MSK) +#define HYP_ROOT_PT_SIZE PAGE_SIZE + +#define ADDR_MSK(MSB, LSB) (((1ULL << (MSB + 1)) - 1) & ~((1ULL << (LSB)) - 1)) +#define PTE_ADDR_MSK ADDR_MSK(47, 12) +#define PTE_FLAGS_MSK (~PTE_ADDR_MSK) +#define PTE_MASK(OFF, LEN) BIT64_MASK(OFF, LEN) + +#define PTE_TYPE_MSK (0x3) +#define PTE_VALID (0x1) +#define PTE_SUPERPAGE (0x1) +#define PTE_TABLE (0x3) +#define PTE_PAGE (0x3) + +#define PTE_NSTable (1LL << 63) +#define PTE_APTable_OFF (61) +#define PTE_APTable_MSK (0x3LL << PTE_APTable_OFF) +#define PTE_APTable_ALL (0x0LL << PTE_APTable_OFF) +#define PTE_APTable_NOEL0 (0x1LL << PTE_APTable_OFF) +#define PTE_APTable_RO (0x2LL << PTE_APTable_OFF) +#define PTE_APTable_RO_NOEL0 (0x3LL << PTE_APTable_OFF) +#define PTE_XNTable (1LL << 60) +#define PTE_PXNTable (1LL << 59) + +#define PTE_PBHA_OFF (59) +#define PTE_PBHA_MSK (0xf << PTE_PBHA_OFF) +#define PTE_PBHA(VAL) ((VAL << PTE_PBHA_OFF) & PTE_PBHA_MSK) +#define PTE_XN (1LL << 54) +#define PTE_PXN (1LL << 53) +#define PTE_Con (1LL << 52) +#define PTE_DBM (1LL << 51) +#define PTE_nG (1LL << 11) +#define PTE_AF (1LL << 10) +#define PTE_SH_OFF (8) +#define PTE_SH_MSK (0x3LL << PTE_SH_OFF) +#define PTE_SH_NS (0x0LL << PTE_SH_OFF) +#define PTE_SH_OS (0x2LL << PTE_SH_OFF) +#define PTE_SH_IS (0x3LL << PTE_SH_OFF) +#define PTE_AP_OFF (6) +#define PTE_AP_MSK (0x3LL << PTE_AP_OFF) +#define PTE_AP_RW_PRIV (0x0LL << PTE_AP_OFF) +#define PTE_AP_RO_PRIV (0x2LL << PTE_AP_OFF) +#define PTE_AP_RW (0x1LL << PTE_AP_OFF) +#define PTE_AP_RO (0x3LL << PTE_AP_OFF) +#define PTE_NS (1 << 5) +#define PTE_ATTR_OFF (2) +#define PTE_ATTR_MSK (0x7LL << PTE_ATTR_OFF) +#define PTE_ATTR(N) ((N << PTE_ATTR_OFF) & PTE_ATTR_MSK) /* Stage 2 fields */ -#define PTE_MEMATTR_OFF (2) -#define PTE_MEMATTR_DEV_nGnRnE ((0x00 << 0) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_DEV_nGnRE ((0x01 << 0) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_DEV_nGRE ((0x02 << 0) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_DEV_GRE ((0x03 << 0) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_NRML_ONC ((0x01 << 2) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_NRML_OWTC ((0x02 << 2) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_NRML_OWBC ((0x03 << 2) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_NRML_INC ((0x01 << 0) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_NRML_IWTC ((0x02 << 0) << PTE_MEMATTR_OFF) -#define PTE_MEMATTR_NRML_IWBC ((0x03 << 0) << PTE_MEMATTR_OFF) - -#define PTE_S2AP_RO (0x1 << PTE_AP_OFF) -#define PTE_S2AP_WO (0x2 << PTE_AP_OFF) -#define PTE_S2AP_RW (0x3 << PTE_AP_OFF) - -#define PTE_RSW_OFF (55) -#define PTE_RSW_WDT (4) -#define PTE_RSW_MSK \ - (((1ULL << (PTE_RSW_OFF + PTE_RSW_WDT)) - 1) - ((1ULL << (PTE_RSW_OFF)) - 1)) +#define PTE_MEMATTR_OFF (2) +#define PTE_MEMATTR_DEV_nGnRnE ((0x00 << 0) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_DEV_nGnRE ((0x01 << 0) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_DEV_nGRE ((0x02 << 0) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_DEV_GRE ((0x03 << 0) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_NRML_ONC ((0x01 << 2) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_NRML_OWTC ((0x02 << 2) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_NRML_OWBC ((0x03 << 2) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_NRML_INC ((0x01 << 0) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_NRML_IWTC ((0x02 << 0) << PTE_MEMATTR_OFF) +#define PTE_MEMATTR_NRML_IWBC ((0x03 << 0) << PTE_MEMATTR_OFF) + +#define PTE_S2AP_RO (0x1 << PTE_AP_OFF) +#define PTE_S2AP_WO (0x2 << PTE_AP_OFF) +#define PTE_S2AP_RW (0x3 << PTE_AP_OFF) + +#define PTE_RSW_OFF (55) +#define PTE_RSW_WDT (4) +#define PTE_RSW_MSK (((1ULL << (PTE_RSW_OFF + PTE_RSW_WDT)) - 1) - ((1ULL << (PTE_RSW_OFF)) - 1)) /* ------------------------------------------------------------- */ -#define PTE_RSW_EMPT (0x0LL << PTE_RSW_OFF) -#define PTE_RSW_OPEN (0x1LL << PTE_RSW_OFF) -#define PTE_RSW_FULL (0x2LL << PTE_RSW_OFF) -#define PTE_RSW_RSRV (0x3LL << PTE_RSW_OFF) +#define PTE_RSW_EMPT (0x0LL << PTE_RSW_OFF) +#define PTE_RSW_OPEN (0x1LL << PTE_RSW_OFF) +#define PTE_RSW_FULL (0x2LL << PTE_RSW_OFF) +#define PTE_RSW_RSRV (0x3LL << PTE_RSW_OFF) #define PT_ROOT_FLAGS_REC_IND_OFF (0) #define PT_ROOT_FLAGS_REC_IND_LEN (13) -#define PT_ROOT_FLAGS_REC_IND_MSK \ - BIT64_MASK(PT_ROOT_FLAGS_REC_IND_OFF, PT_ROOT_FLAGS_REC_IND_LEN) +#define PT_ROOT_FLAGS_REC_IND_MSK BIT64_MASK(PT_ROOT_FLAGS_REC_IND_OFF, PT_ROOT_FLAGS_REC_IND_LEN) -#define PT_CPU_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 1) -#define PT_VM_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 2) +#define PT_CPU_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 1) +#define PT_VM_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 2) -#define PTE_INVALID (0) -#define PTE_HYP_FLAGS (PTE_ATTR(1) | PTE_AP_RW | PTE_SH_IS | PTE_AF) -#define PTE_HYP_DEV_FLAGS \ - (PTE_ATTR(2) | PTE_AP_RW | PTE_SH_IS | PTE_AF | PTE_XN) +#define PTE_INVALID (0) +#define PTE_HYP_FLAGS (PTE_ATTR(1) | PTE_AP_RW | PTE_SH_IS | PTE_AF) +#define PTE_HYP_DEV_FLAGS (PTE_ATTR(2) | PTE_AP_RW | PTE_SH_IS | PTE_AF | PTE_XN) -#define PTE_VM_FLAGS \ - (PTE_MEMATTR_NRML_OWBC | PTE_MEMATTR_NRML_IWBC | PTE_SH_NS | PTE_S2AP_RW | \ - PTE_AF) +#define PTE_VM_FLAGS \ + (PTE_MEMATTR_NRML_OWBC | PTE_MEMATTR_NRML_IWBC | PTE_SH_NS | PTE_S2AP_RW | PTE_AF) -#define PTE_VM_DEV_FLAGS \ - (PTE_MEMATTR_DEV_GRE | PTE_SH_NS | PTE_S2AP_RW | PTE_AF) +#define PTE_VM_DEV_FLAGS (PTE_MEMATTR_DEV_GRE | PTE_SH_NS | PTE_S2AP_RW | PTE_AF) #ifndef __ASSEMBLER__ -typedef uint64_t pte_t; + typedef uint64_t pte_t; typedef pte_t pte_type_t; typedef pte_t pte_flags_t; diff --git a/src/arch/armv8/armv8-a/inc/arch/profile/cpu.h b/src/arch/armv8/armv8-a/inc/arch/profile/cpu.h index d3cb14017..a72863646 100644 --- a/src/arch/armv8/armv8-a/inc/arch/profile/cpu.h +++ b/src/arch/armv8/armv8-a/inc/arch/profile/cpu.h @@ -13,7 +13,8 @@ struct cpu_arch_profile { struct psci_off_state psci_off_state; }; -static inline struct cpu* cpu() { +static inline struct cpu* cpu() +{ return (struct cpu*)BAO_CPU_BASE; } diff --git a/src/arch/armv8/armv8-a/inc/arch/smc.h b/src/arch/armv8/armv8-a/inc/arch/smc.h index f85914bc9..85a28f911 100644 --- a/src/arch/armv8/armv8-a/inc/arch/smc.h +++ b/src/arch/armv8/armv8-a/inc/arch/smc.h @@ -15,7 +15,7 @@ struct smc_res { unsigned long x3; }; -unsigned long smc_call(unsigned long x0, unsigned long x1, unsigned long x2, - unsigned long x3, struct smc_res *res); +unsigned long smc_call(unsigned long x0, unsigned long x1, unsigned long x2, unsigned long x3, + struct smc_res* res); #endif diff --git a/src/arch/armv8/armv8-a/inc/arch/smmuv2.h b/src/arch/armv8/armv8-a/inc/arch/smmuv2.h index 24d2238fa..dcd0b00fc 100644 --- a/src/arch/armv8/armv8-a/inc/arch/smmuv2.h +++ b/src/arch/armv8/armv8-a/inc/arch/smmuv2.h @@ -8,108 +8,108 @@ #include -#define SMMUV2_CR0_GFRE (0x1 << 1) -#define SMMUV2_CR0_GFIE (0x1 << 2) -#define SMMUV2_CR0_GCFGFRE (0x1 << 4) -#define SMMUV2_CR0_GCFGFIE (0x1 << 5) -#define SMMUV2_CR0_USFCFG (0x1 << 10) -#define SMMUV2_CR0_SMCFCFG (0x1 << 21) -#define SMMUV2_CR0_CLIENTPD (0x1 << 0) - -#define SMMUV2_CR0_CLEAR(cr0) (cr0 & (0x3 << 30 | 0x1 << 11)) - -#define SMMUV2_IDR0_MASK (0xFF) -#define SMMUV2_IDR0_S2TS_BIT (0x1 << 30) -#define SMMUV2_IDR0_SMS_BIT (0x1 << 27) -#define SMMUV2_IDR0_CTTW_BIT (0x1 << 14) -#define SMMUV2_IDR0_BTM_BIT (0x1 << 13) - -#define SMMUV2_IDR1_PAGESIZE_BIT (0x1 << 31) -#define SMMUV2_IDR1_NUMCB_OFF (0) -#define SMMUV2_IDR1_NUMCB_LEN (8) -#define SMMUV2_IDR1_NUMPAGEDXB_OFF (28) -#define SMMUV2_IDR1_NUMPAGEDXB_LEN (3) - -#define SMMUV2_IDR2_PTFSv8_4kB_BIT (0x1 << 12) -#define SMMUV2_IDR2_OAS_OFF (4) -#define SMMUV2_IDR2_OAS_LEN (4) -#define SMMUV2_IDR2_IAS_OFF (0) -#define SMMUV2_IDR2_IAS_LEN (4) - -#define SMMUV2_IDR7_MAJOR_OFF (4) -#define SMMUV2_IDR7_MAJOR_LEN (4) - -#define SMMU_SMR_ID_OFF 0 -#define SMMU_SMR_ID_LEN 15 -#define SMMU_SMR_ID(smr) bit32_extract(smr, SMMU_SMR_ID_OFF, SMMU_SMR_ID_LEN) - -#define SMMU_ID_MSK BIT32_MASK(0, SMMU_SMR_ID_LEN) - -#define SMMU_SMR_MASK_OFF 16 -#define SMMU_SMR_MASK_LEN 15 -#define SMMU_SMR_MASK(smr) bit32_extract(smr, SMMU_SMR_MASK_OFF, SMMU_SMR_MASK_LEN) - -#define SMMUV2_SMR_VALID (0x1 << 31) - -#define S2CR_IMPL_OFF (30) -#define S2CR_IMPL_LEN (2) -#define S2CR_IMPL_MASK BIT32_MASK(S2CR_IMPL_OFF, S2CR_IMPL_LEN) - -#define S2CR_TRANSIENTCFG_OFF (28) -#define S2CR_TRANSIENTCFG_LEN (2) -#define S2CR_TRANSIENTCFG_MASK BIT32_MASK(S2CR_TRANSIENTCFG_OFF, S2CR_TRANSIENTCFG_LEN) +#define SMMUV2_CR0_GFRE (0x1 << 1) +#define SMMUV2_CR0_GFIE (0x1 << 2) +#define SMMUV2_CR0_GCFGFRE (0x1 << 4) +#define SMMUV2_CR0_GCFGFIE (0x1 << 5) +#define SMMUV2_CR0_USFCFG (0x1 << 10) +#define SMMUV2_CR0_SMCFCFG (0x1 << 21) +#define SMMUV2_CR0_CLIENTPD (0x1 << 0) + +#define SMMUV2_CR0_CLEAR(cr0) (cr0 & (0x3 << 30 | 0x1 << 11)) + +#define SMMUV2_IDR0_MASK (0xFF) +#define SMMUV2_IDR0_S2TS_BIT (0x1 << 30) +#define SMMUV2_IDR0_SMS_BIT (0x1 << 27) +#define SMMUV2_IDR0_CTTW_BIT (0x1 << 14) +#define SMMUV2_IDR0_BTM_BIT (0x1 << 13) + +#define SMMUV2_IDR1_PAGESIZE_BIT (0x1 << 31) +#define SMMUV2_IDR1_NUMCB_OFF (0) +#define SMMUV2_IDR1_NUMCB_LEN (8) +#define SMMUV2_IDR1_NUMPAGEDXB_OFF (28) +#define SMMUV2_IDR1_NUMPAGEDXB_LEN (3) + +#define SMMUV2_IDR2_PTFSv8_4kB_BIT (0x1 << 12) +#define SMMUV2_IDR2_OAS_OFF (4) +#define SMMUV2_IDR2_OAS_LEN (4) +#define SMMUV2_IDR2_IAS_OFF (0) +#define SMMUV2_IDR2_IAS_LEN (4) + +#define SMMUV2_IDR7_MAJOR_OFF (4) +#define SMMUV2_IDR7_MAJOR_LEN (4) + +#define SMMU_SMR_ID_OFF 0 +#define SMMU_SMR_ID_LEN 15 +#define SMMU_SMR_ID(smr) bit32_extract(smr, SMMU_SMR_ID_OFF, SMMU_SMR_ID_LEN) + +#define SMMU_ID_MSK BIT32_MASK(0, SMMU_SMR_ID_LEN) + +#define SMMU_SMR_MASK_OFF 16 +#define SMMU_SMR_MASK_LEN 15 +#define SMMU_SMR_MASK(smr) bit32_extract(smr, SMMU_SMR_MASK_OFF, SMMU_SMR_MASK_LEN) + +#define SMMUV2_SMR_VALID (0x1 << 31) + +#define S2CR_IMPL_OFF (30) +#define S2CR_IMPL_LEN (2) +#define S2CR_IMPL_MASK BIT32_MASK(S2CR_IMPL_OFF, S2CR_IMPL_LEN) + +#define S2CR_TRANSIENTCFG_OFF (28) +#define S2CR_TRANSIENTCFG_LEN (2) +#define S2CR_TRANSIENTCFG_MASK BIT32_MASK(S2CR_TRANSIENTCFG_OFF, S2CR_TRANSIENTCFG_LEN) #define S2CR_TRANSIENTCFG_NON_TRANSIENT (0x2 << S2CR_TRANSIENTCFG_OFF) -#define S2CR_INSTCFG_OFF (26) -#define S2CR_INSTCFG_LEN (2) -#define S2CR_INSTCFG_MASK BIT32_MASK(S2CR_INSTCFG_OFF, S2CR_INSTCFG_LEN) -#define S2CR_INSTCFG_DATA_ONLY (0x2 << S2CR_INSTCFG_OFF) +#define S2CR_INSTCFG_OFF (26) +#define S2CR_INSTCFG_LEN (2) +#define S2CR_INSTCFG_MASK BIT32_MASK(S2CR_INSTCFG_OFF, S2CR_INSTCFG_LEN) +#define S2CR_INSTCFG_DATA_ONLY (0x2 << S2CR_INSTCFG_OFF) -#define S2CR_PRIVCFG_OFF (24) -#define S2CR_PRIVCFG_LEN (2) -#define S2CR_PRIVCFG_MASK BIT32_MASK(S2CR_PRIVCFG_OFF, S2CR_PRIVCFG_LEN) +#define S2CR_PRIVCFG_OFF (24) +#define S2CR_PRIVCFG_LEN (2) +#define S2CR_PRIVCFG_MASK BIT32_MASK(S2CR_PRIVCFG_OFF, S2CR_PRIVCFG_LEN) -#define S2CR_WACFG_OFF (22) -#define S2CR_WACFG_LEN (2) -#define S2CR_WACFG_MASK BIT32_MASK(S2CR_WACFG_OFF, S2CR_WACFG_LEN) +#define S2CR_WACFG_OFF (22) +#define S2CR_WACFG_LEN (2) +#define S2CR_WACFG_MASK BIT32_MASK(S2CR_WACFG_OFF, S2CR_WACFG_LEN) -#define S2CR_RACFG_OFF (20) -#define S2CR_RACFG_LEN (2) -#define S2CR_RACFG_MASK BIT32_MASK(S2CR_RACFG_OFF, S2CR_RACFG_LEN) +#define S2CR_RACFG_OFF (20) +#define S2CR_RACFG_LEN (2) +#define S2CR_RACFG_MASK BIT32_MASK(S2CR_RACFG_OFF, S2CR_RACFG_LEN) -#define S2CR_NSCFG_OFF (18) -#define S2CR_NSCFG_LEN (2) -#define S2CR_NSCFG_MASK BIT32_MASK(S2CR_NSCFG_OFF, S2CR_NSCFG_LEN) +#define S2CR_NSCFG_OFF (18) +#define S2CR_NSCFG_LEN (2) +#define S2CR_NSCFG_MASK BIT32_MASK(S2CR_NSCFG_OFF, S2CR_NSCFG_LEN) -#define S2CR_TYPE_OFF (16) -#define S2CR_TYPE_LEN (2) -#define S2CR_TYPE_MASK BIT32_MASK(S2CR_TYPE_OFF, S2CR_TYPE_LEN) +#define S2CR_TYPE_OFF (16) +#define S2CR_TYPE_LEN (2) +#define S2CR_TYPE_MASK BIT32_MASK(S2CR_TYPE_OFF, S2CR_TYPE_LEN) -#define S2CR_MemAttr_OFF (12) -#define S2CR_MemAttr_LEN (4) -#define S2CR_MemAttr_MASK BIT32_MASK(S2CR_MemAttr_OFF, S2CR_MemAttr_LEN) +#define S2CR_MemAttr_OFF (12) +#define S2CR_MemAttr_LEN (4) +#define S2CR_MemAttr_MASK BIT32_MASK(S2CR_MemAttr_OFF, S2CR_MemAttr_LEN) -#define S2CR_MTCFG_OFF (11) -#define S2CR_MTCFG_LEN (1) -#define S2CR_MTCFG_MASK BIT32_MASK(S2CR_MTCFG_OFF, S2CR_MTCFG_LEN) +#define S2CR_MTCFG_OFF (11) +#define S2CR_MTCFG_LEN (1) +#define S2CR_MTCFG_MASK BIT32_MASK(S2CR_MTCFG_OFF, S2CR_MTCFG_LEN) -#define S2CR_EXIDVALID_OFF (10) -#define S2CR_EXIDVALID_LEN (1) -#define S2CR_EXIDVALID_MASK BIT32_MASK(S2CR_EXIDVALID_OFF, S2CR_EXIDVALID_LEN) +#define S2CR_EXIDVALID_OFF (10) +#define S2CR_EXIDVALID_LEN (1) +#define S2CR_EXIDVALID_MASK BIT32_MASK(S2CR_EXIDVALID_OFF, S2CR_EXIDVALID_LEN) -#define S2CR_SHCFG_OFF (8) -#define S2CR_SHCFG_LEN (2) -#define S2CR_SHCFG_MASK BIT32_MASK(S2CR_SHCFG_OFF, S2CR_SHCFG_LEN) -#define S2CR_SHCFG_IN_SHR (0x2 << S2CR_SHCFG_OFF) +#define S2CR_SHCFG_OFF (8) +#define S2CR_SHCFG_LEN (2) +#define S2CR_SHCFG_MASK BIT32_MASK(S2CR_SHCFG_OFF, S2CR_SHCFG_LEN) +#define S2CR_SHCFG_IN_SHR (0x2 << S2CR_SHCFG_OFF) -#define S2CR_CBNDX_OFF (0) -#define S2CR_CBNDX_LEN (8) -#define S2CR_CBNDX_MASK BIT32_MASK(S2CR_CBNDX_OFF, S2CR_CBNDX_LEN) -#define S2CR_CBNDX(s2cr) bit32_extract(s2cr, S2CR_CBNDX_OFF, S2CR_CBNDX_LEN) +#define S2CR_CBNDX_OFF (0) +#define S2CR_CBNDX_LEN (8) +#define S2CR_CBNDX_MASK BIT32_MASK(S2CR_CBNDX_OFF, S2CR_CBNDX_LEN) +#define S2CR_CBNDX(s2cr) bit32_extract(s2cr, S2CR_CBNDX_OFF, S2CR_CBNDX_LEN) /* Don't clear implementation defined bits, clear everything else. */ -#define S2CR_CLEAR(s2cr) (s2cr & S2CR_IMPL_MASK) -#define S2CR_DFLT (0) +#define S2CR_CLEAR(s2cr) (s2cr & S2CR_IMPL_MASK) +#define S2CR_DFLT (0) struct smmu_glbl_rs0_hw { uint32_t CR0; @@ -199,65 +199,64 @@ struct smmu_glbl_rs0_hw { uint8_t impl3[0x1000 - 0xFD0]; } __attribute__((__packed__, __aligned__(PAGE_SIZE))); -#define SMMUV2_CBAR_TYPE_S2 (0) -#define SMMUV2_CBAR_TYPE_S1_S2FAULT (0x2 << 16) -#define SMMUV2_CBAR_VMID_MASK (0xFF) -#define SMMUV2_CBAR_VMID(ID) ((ID) & SMMUV2_CBAR_VMID_MASK) -#define SMMUV2_CBAR_VA64 (0x1 << 0) +#define SMMUV2_CBAR_TYPE_S2 (0) +#define SMMUV2_CBAR_TYPE_S1_S2FAULT (0x2 << 16) +#define SMMUV2_CBAR_VMID_MASK (0xFF) +#define SMMUV2_CBAR_VMID(ID) ((ID) & SMMUV2_CBAR_VMID_MASK) +#define SMMUV2_CBAR_VA64 (0x1 << 0) -#define SMMUV2_CB_TTBA_END (48) -#define SMMUV2_CB_TTBA(x) BIT64_MASK(x, (SMMUV2_CB_TTBA_END - x)) +#define SMMUV2_CB_TTBA_END (48) +#define SMMUV2_CB_TTBA(x) BIT64_MASK(x, (SMMUV2_CB_TTBA_END - x)) -#define S2CR_IMPL_OFF (30) -#define S2CR_IMPL_LEN (2) -#define S2CR_IMPL_MASK BIT32_MASK(S2CR_IMPL_OFF, S2CR_IMPL_LEN) +#define S2CR_IMPL_OFF (30) +#define S2CR_IMPL_LEN (2) +#define S2CR_IMPL_MASK BIT32_MASK(S2CR_IMPL_OFF, S2CR_IMPL_LEN) -#define S2CR_INSTCFG_OFF (26) -#define S2CR_INSTCFG_LEN (2) -#define S2CR_INSTCFG_MASK BIT32_MASK(S2CR_INSTCFG_OFF, S2CR_INSTCFG_LEN) -#define S2CR_INSTCFG_DATA_ONLY (0x2 << S2CR_INSTCFG_OFF) +#define S2CR_INSTCFG_OFF (26) +#define S2CR_INSTCFG_LEN (2) +#define S2CR_INSTCFG_MASK BIT32_MASK(S2CR_INSTCFG_OFF, S2CR_INSTCFG_LEN) +#define S2CR_INSTCFG_DATA_ONLY (0x2 << S2CR_INSTCFG_OFF) -#define S2CR_PRIVCFG_OFF (24) -#define S2CR_PRIVCFG_LEN (2) -#define S2CR_PRIVCFG_MASK BIT32_MASK(S2CR_PRIVCFG_OFF, S2CR_PRIVCFG_LEN) +#define S2CR_PRIVCFG_OFF (24) +#define S2CR_PRIVCFG_LEN (2) +#define S2CR_PRIVCFG_MASK BIT32_MASK(S2CR_PRIVCFG_OFF, S2CR_PRIVCFG_LEN) -#define S2CR_WACFG_OFF (22) -#define S2CR_WACFG_LEN (2) -#define S2CR_WACFG_MASK BIT32_MASK(S2CR_WACFG_OFF, S2CR_WACFG_LEN) +#define S2CR_WACFG_OFF (22) +#define S2CR_WACFG_LEN (2) +#define S2CR_WACFG_MASK BIT32_MASK(S2CR_WACFG_OFF, S2CR_WACFG_LEN) -#define S2CR_RACFG_OFF (20) -#define S2CR_RACFG_LEN (2) -#define S2CR_RACFG_MASK BIT32_MASK(S2CR_RACFG_OFF, S2CR_RACFG_LEN) +#define S2CR_RACFG_OFF (20) +#define S2CR_RACFG_LEN (2) +#define S2CR_RACFG_MASK BIT32_MASK(S2CR_RACFG_OFF, S2CR_RACFG_LEN) -#define S2CR_NSCFG_OFF (18) -#define S2CR_NSCFG_LEN (2) -#define S2CR_NSCFG_MASK BIT32_MASK(S2CR_NSCFG_OFF, S2CR_NSCFG_LEN) +#define S2CR_NSCFG_OFF (18) +#define S2CR_NSCFG_LEN (2) +#define S2CR_NSCFG_MASK BIT32_MASK(S2CR_NSCFG_OFF, S2CR_NSCFG_LEN) -#define S2CR_TYPE_OFF (16) -#define S2CR_TYPE_LEN (2) -#define S2CR_TYPE_MASK BIT32_MASK(S2CR_TYPE_OFF, S2CR_TYPE_LEN) +#define S2CR_TYPE_OFF (16) +#define S2CR_TYPE_LEN (2) +#define S2CR_TYPE_MASK BIT32_MASK(S2CR_TYPE_OFF, S2CR_TYPE_LEN) -#define S2CR_MemAttr_OFF (12) -#define S2CR_MemAttr_LEN (4) -#define S2CR_MemAttr_MASK BIT32_MASK(S2CR_MemAttr_OFF, S2CR_MemAttr_LEN) +#define S2CR_MemAttr_OFF (12) +#define S2CR_MemAttr_LEN (4) +#define S2CR_MemAttr_MASK BIT32_MASK(S2CR_MemAttr_OFF, S2CR_MemAttr_LEN) -#define S2CR_MTCFG_OFF (11) -#define S2CR_MTCFG_LEN (1) -#define S2CR_MTCFG_MASK BIT32_MASK(S2CR_MTCFG_OFF, S2CR_MTCFG_LEN) +#define S2CR_MTCFG_OFF (11) +#define S2CR_MTCFG_LEN (1) +#define S2CR_MTCFG_MASK BIT32_MASK(S2CR_MTCFG_OFF, S2CR_MTCFG_LEN) -#define S2CR_EXIDVALID_OFF (10) -#define S2CR_EXIDVALID_LEN (1) -#define S2CR_EXIDVALID_MASK BIT32_MASK(S2CR_EXIDVALID_OFF, S2CR_EXIDVALID_LEN) +#define S2CR_EXIDVALID_OFF (10) +#define S2CR_EXIDVALID_LEN (1) +#define S2CR_EXIDVALID_MASK BIT32_MASK(S2CR_EXIDVALID_OFF, S2CR_EXIDVALID_LEN) -#define S2CR_SHCFG_OFF (8) -#define S2CR_SHCFG_LEN (2) -#define S2CR_SHCFG_MASK BIT32_MASK(S2CR_SHCFG_OFF, S2CR_SHCFG_LEN) -#define S2CR_SHCFG_IN_SHR (0x2 << S2CR_SHCFG_OFF) - -#define S2CR_CBNDX_OFF (0) -#define S2CR_CBNDX_LEN (8) -#define S2CR_CBNDX_MASK BIT32_MASK(S2CR_CBNDX_OFF, S2CR_CBNDX_LEN) +#define S2CR_SHCFG_OFF (8) +#define S2CR_SHCFG_LEN (2) +#define S2CR_SHCFG_MASK BIT32_MASK(S2CR_SHCFG_OFF, S2CR_SHCFG_LEN) +#define S2CR_SHCFG_IN_SHR (0x2 << S2CR_SHCFG_OFF) +#define S2CR_CBNDX_OFF (0) +#define S2CR_CBNDX_LEN (8) +#define S2CR_CBNDX_MASK BIT32_MASK(S2CR_CBNDX_OFF, S2CR_CBNDX_LEN) struct smmu_glbl_rs1_hw { uint32_t CBAR[128]; @@ -265,67 +264,66 @@ struct smmu_glbl_rs1_hw { uint32_t CBFRSYNRA[128]; uint8_t res2[0x800 - 0x600]; uint32_t CBA2R[128]; - uint8_t res3[0x1000-0xa00]; -} __attribute__((__packed__,__aligned__(PAGE_SIZE))); - -#define SMMUV2_SCTLR_M (0x1 << 0) -#define SMMUV2_SCTLR_TRE (0x1 << 1) -#define SMMUV2_SCTLR_AFE (0x1 << 2) -#define SMMUV2_SCTLR_AFFD (0x1 << 3) -#define SMMUV2_SCTLR_E (0x1 << 4) -#define SMMUV2_SCTLR_CFRE (0x1 << 5) -#define SMMUV2_SCTLR_CFIE (0x1 << 6) -#define SMMUV2_SCTLR_CFCFG (0x1 << 7) -#define SMMUV2_SCTLR_HUPCF (0x1 << 8) -#define SMMUV2_SCTLR_PTW (0x1 << 13) -#define SMMUV2_SCTLR_BSU_NO (0x0 << 14) -#define SMMUV2_SCTLR_BSU_ISH (0x1 << 14) -#define SMMUV2_SCTLR_BSU_OSH (0x2 << 14) -#define SMMUV2_SCTLR_BSU_SYS (0x3 << 14) - -#define SMMUV2_SCTLR_CLEAR(sctlr) \ - (sctlr & (0xF << 28 | 0x1 << 20 | 0xF << 9 | 0x1 << 11)) - -#define SMMUV2_SCTLR_DEFAULT (SMMUV2_SCTLR_CFCFG | SMMUV2_SCTLR_M) - -#define SMMUV2_TCR_T0SZ_MSK (0x1F) -#define SMMUV2_TCR_T0SZ(SZ) ((SZ) & SMMUV2_TCR_T0SZ_MSK) -#define SMMUV2_TCR_SL0_OFF (6) -#define SMMUV2_TCR_SL0_MSK (0x3 << SMMUV2_TCR_SL0_OFF) -#define SMMUV2_TCR_SL0_0 ((0x2 << SMMUV2_TCR_SL0_OFF) & SMMUV2_TCR_SL0_MSK) -#define SMMUV2_TCR_SL0_1 ((0x1 << SMMUV2_TCR_SL0_OFF) & SMMUV2_TCR_SL0_MSK) -#define SMMUV2_TCR_SL0_2 (0) -#define SMMUV2_TCR_IRGN0_OFF (8) -#define SMMUV2_TCR_IRGN0_MSK (0x3 << SMMUV2_TCR_IRGN0_OFF) -#define SMMUV2_TCR_IRGN0_NC (0x0 << SMMUV2_TCR_IRGN0_OFF) -#define SMMUV2_TCR_IRGN0_WB_RA_WA (0x1 << SMMUV2_TCR_IRGN0_OFF) -#define SMMUV2_TCR_IRGN0_WT_RA_NWA (0x2 << SMMUV2_TCR_IRGN0_OFF) -#define SMMUV2_TCR_IRGN0_WB_RA_NWA (0x3 << SMMUV2_TCR_IRGN0_OFF) -#define SMMUV2_TCR_ORGN0_OFF (10) -#define SMMUV2_TCR_ORGN0_MSK (0x3 << SMMUV2_TCR_ORGN0_OFF) -#define SMMUV2_TCR_ORGN0_NC (0x0 << SMMUV2_TCR_ORGN0_OFF) -#define SMMUV2_TCR_ORGN0_WB_RA_WA (0x1 << SMMUV2_TCR_ORGN0_OFF) -#define SMMUV2_TCR_ORGN0_WT_RA_NWA (0x2 << SMMUV2_TCR_ORGN0_OFF) -#define SMMUV2_TCR_ORGN0_WB_RA_NWA (0x3 << SMMUV2_TCR_ORGN0_OFF) -#define SMMUV2_TCR_SH0_OFF (12) -#define SMMUV2_TCR_SH0_MSK (0x3 << SMMUV2_TCR_SH0_OFF) -#define SMMUV2_TCR_SH0_NS (0x0 << SMMUV2_TCR_SH0_OFF) -#define SMMUV2_TCR_SH0_OS (0x2 << SMMUV2_TCR_SH0_OFF) -#define SMMUV2_TCR_SH0_IS (0x3 << SMMUV2_TCR_SH0_OFF) -#define SMMUV2_TCR_TG0_OFF (14) -#define SMMUV2_TCR_TG0_MSK (0x3 << SMMUV2_TCR_TG0_OFF) -#define SMMUV2_TCR_TG0_4K (0x0 << SMMUV2_TCR_TG0_OFF) -#define SMMUV2_TCR_TG0_16K (0x2 << SMMUV2_TCR_TG0_OFF) -#define SMMUV2_TCR_TG0_64K (0x1 << SMMUV2_TCR_TG0_OFF) -#define SMMUV2_TCR_PS_OFF (16) -#define SMMUV2_TCR_PS_MSK (0x7 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_32B (0x0 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_36B (0x1 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_40B (0x2 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_42B (0x3 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_44B (0x4 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_48B (0x5 << SMMUV2_TCR_PS_OFF) -#define SMMUV2_TCR_PS_52B (0x6 << SMMUV2_TCR_PS_OFF) + uint8_t res3[0x1000 - 0xa00]; +} __attribute__((__packed__, __aligned__(PAGE_SIZE))); + +#define SMMUV2_SCTLR_M (0x1 << 0) +#define SMMUV2_SCTLR_TRE (0x1 << 1) +#define SMMUV2_SCTLR_AFE (0x1 << 2) +#define SMMUV2_SCTLR_AFFD (0x1 << 3) +#define SMMUV2_SCTLR_E (0x1 << 4) +#define SMMUV2_SCTLR_CFRE (0x1 << 5) +#define SMMUV2_SCTLR_CFIE (0x1 << 6) +#define SMMUV2_SCTLR_CFCFG (0x1 << 7) +#define SMMUV2_SCTLR_HUPCF (0x1 << 8) +#define SMMUV2_SCTLR_PTW (0x1 << 13) +#define SMMUV2_SCTLR_BSU_NO (0x0 << 14) +#define SMMUV2_SCTLR_BSU_ISH (0x1 << 14) +#define SMMUV2_SCTLR_BSU_OSH (0x2 << 14) +#define SMMUV2_SCTLR_BSU_SYS (0x3 << 14) + +#define SMMUV2_SCTLR_CLEAR(sctlr) (sctlr & (0xF << 28 | 0x1 << 20 | 0xF << 9 | 0x1 << 11)) + +#define SMMUV2_SCTLR_DEFAULT (SMMUV2_SCTLR_CFCFG | SMMUV2_SCTLR_M) + +#define SMMUV2_TCR_T0SZ_MSK (0x1F) +#define SMMUV2_TCR_T0SZ(SZ) ((SZ) & SMMUV2_TCR_T0SZ_MSK) +#define SMMUV2_TCR_SL0_OFF (6) +#define SMMUV2_TCR_SL0_MSK (0x3 << SMMUV2_TCR_SL0_OFF) +#define SMMUV2_TCR_SL0_0 ((0x2 << SMMUV2_TCR_SL0_OFF) & SMMUV2_TCR_SL0_MSK) +#define SMMUV2_TCR_SL0_1 ((0x1 << SMMUV2_TCR_SL0_OFF) & SMMUV2_TCR_SL0_MSK) +#define SMMUV2_TCR_SL0_2 (0) +#define SMMUV2_TCR_IRGN0_OFF (8) +#define SMMUV2_TCR_IRGN0_MSK (0x3 << SMMUV2_TCR_IRGN0_OFF) +#define SMMUV2_TCR_IRGN0_NC (0x0 << SMMUV2_TCR_IRGN0_OFF) +#define SMMUV2_TCR_IRGN0_WB_RA_WA (0x1 << SMMUV2_TCR_IRGN0_OFF) +#define SMMUV2_TCR_IRGN0_WT_RA_NWA (0x2 << SMMUV2_TCR_IRGN0_OFF) +#define SMMUV2_TCR_IRGN0_WB_RA_NWA (0x3 << SMMUV2_TCR_IRGN0_OFF) +#define SMMUV2_TCR_ORGN0_OFF (10) +#define SMMUV2_TCR_ORGN0_MSK (0x3 << SMMUV2_TCR_ORGN0_OFF) +#define SMMUV2_TCR_ORGN0_NC (0x0 << SMMUV2_TCR_ORGN0_OFF) +#define SMMUV2_TCR_ORGN0_WB_RA_WA (0x1 << SMMUV2_TCR_ORGN0_OFF) +#define SMMUV2_TCR_ORGN0_WT_RA_NWA (0x2 << SMMUV2_TCR_ORGN0_OFF) +#define SMMUV2_TCR_ORGN0_WB_RA_NWA (0x3 << SMMUV2_TCR_ORGN0_OFF) +#define SMMUV2_TCR_SH0_OFF (12) +#define SMMUV2_TCR_SH0_MSK (0x3 << SMMUV2_TCR_SH0_OFF) +#define SMMUV2_TCR_SH0_NS (0x0 << SMMUV2_TCR_SH0_OFF) +#define SMMUV2_TCR_SH0_OS (0x2 << SMMUV2_TCR_SH0_OFF) +#define SMMUV2_TCR_SH0_IS (0x3 << SMMUV2_TCR_SH0_OFF) +#define SMMUV2_TCR_TG0_OFF (14) +#define SMMUV2_TCR_TG0_MSK (0x3 << SMMUV2_TCR_TG0_OFF) +#define SMMUV2_TCR_TG0_4K (0x0 << SMMUV2_TCR_TG0_OFF) +#define SMMUV2_TCR_TG0_16K (0x2 << SMMUV2_TCR_TG0_OFF) +#define SMMUV2_TCR_TG0_64K (0x1 << SMMUV2_TCR_TG0_OFF) +#define SMMUV2_TCR_PS_OFF (16) +#define SMMUV2_TCR_PS_MSK (0x7 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_32B (0x0 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_36B (0x1 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_40B (0x2 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_42B (0x3 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_44B (0x4 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_48B (0x5 << SMMUV2_TCR_PS_OFF) +#define SMMUV2_TCR_PS_52B (0x6 << SMMUV2_TCR_PS_OFF) struct smmu_cntxt_hw { uint32_t SCTLR; @@ -384,7 +382,6 @@ size_t smmu_sme_get_ctx(size_t sme); streamid_t smmu_sme_get_id(size_t sme); streamid_t smmu_sme_get_mask(size_t sme); bool smmu_sme_is_group(size_t sme); -bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, - bool group); +bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, bool group); #endif diff --git a/src/arch/armv8/armv8-a/inc/arch/tlb.h b/src/arch/armv8/armv8-a/inc/arch/tlb.h index 227a21046..41f6161ca 100644 --- a/src/arch/armv8/armv8-a/inc/arch/tlb.h +++ b/src/arch/armv8/armv8-a/inc/arch/tlb.h @@ -30,11 +30,10 @@ static inline void tlb_vm_inv_va(asid_t vmid, vaddr_t va) { uint64_t vttbr = 0; vttbr = sysreg_vttbr_el2_read(); - bool switch_vmid = - bit64_extract(vttbr, VTTBR_VMID_OFF, VTTBR_VMID_LEN) != vmid; + bool switch_vmid = bit64_extract(vttbr, VTTBR_VMID_OFF, VTTBR_VMID_LEN) != vmid; if (switch_vmid) { - sysreg_vttbr_el2_write((((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK)); + sysreg_vttbr_el2_write(((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK); DSB(ish); ISB(); } @@ -43,7 +42,7 @@ static inline void tlb_vm_inv_va(asid_t vmid, vaddr_t va) if (switch_vmid) { DSB(ish); - sysreg_vttbr_el2_write((((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK)); + sysreg_vttbr_el2_write(((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK); } } @@ -51,11 +50,10 @@ static inline void tlb_vm_inv_all(asid_t vmid) { uint64_t vttbr = 0; vttbr = sysreg_vttbr_el2_read(); - bool switch_vmid = - bit64_extract(vttbr, VTTBR_VMID_OFF, VTTBR_VMID_LEN) != vmid; + bool switch_vmid = bit64_extract(vttbr, VTTBR_VMID_OFF, VTTBR_VMID_LEN) != vmid; if (switch_vmid) { - sysreg_vttbr_el2_write((((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK)); + sysreg_vttbr_el2_write(((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK); DSB(ish); ISB(); } @@ -64,7 +62,7 @@ static inline void tlb_vm_inv_all(asid_t vmid) if (switch_vmid) { DSB(ish); - sysreg_vttbr_el2_write((((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK)); + sysreg_vttbr_el2_write(((uint64_t)vmid << VTTBR_VMID_OFF) & VTTBR_VMID_MSK); } } diff --git a/src/arch/armv8/armv8-a/iommu.c b/src/arch/armv8/armv8-a/iommu.c index cc6f9f19a..704133ec5 100644 --- a/src/arch/armv8/armv8-a/iommu.c +++ b/src/arch/armv8/armv8-a/iommu.c @@ -10,8 +10,8 @@ #include bool iommu_arch_init() -{ - if(cpu()->id == CPU_MASTER && platform.arch.smmu.base){ +{ + if (cpu()->id == CPU_MASTER && platform.arch.smmu.base) { smmu_init(); return true; } @@ -19,11 +19,10 @@ bool iommu_arch_init() return false; } -static ssize_t iommu_vm_arch_init_ctx(struct vm *vm) +static ssize_t iommu_vm_arch_init_ctx(struct vm* vm) { ssize_t ctx_id = vm->io.prot.mmu.ctx_id; if (ctx_id < 0) { - /* Set up ctx bank to vm address space in an available ctx. */ ctx_id = smmu_alloc_ctxbnk(); if (ctx_id >= 0) { @@ -40,21 +39,21 @@ static ssize_t iommu_vm_arch_init_ctx(struct vm *vm) return ctx_id; } -static bool iommu_vm_arch_add(struct vm *vm, streamid_t mask, streamid_t id) +static bool iommu_vm_arch_add(struct vm* vm, streamid_t mask, streamid_t id) { ssize_t vm_ctx = iommu_vm_arch_init_ctx(vm); streamid_t glbl_mask = vm->io.prot.mmu.global_mask; streamid_t prep_mask = (mask & SMMU_ID_MSK) | glbl_mask; streamid_t prep_id = (id & SMMU_ID_MSK); - bool group = (bool) mask; - - if(vm_ctx < 0){ + bool group = (bool)mask; + + if (vm_ctx < 0) { return false; } if (!smmu_compatible_sme_exists(prep_mask, prep_id, vm_ctx, group)) { ssize_t sme = smmu_alloc_sme(); - if(sme < 0){ + if (sme < 0) { INFO("iommu: smmuv2 no more free sme available."); return false; } @@ -65,23 +64,22 @@ static bool iommu_vm_arch_add(struct vm *vm, streamid_t mask, streamid_t id) return true; } -inline bool iommu_arch_vm_add_device(struct vm *vm, streamid_t id) +inline bool iommu_arch_vm_add_device(struct vm* vm, streamid_t id) { return iommu_vm_arch_add(vm, 0, id); } -bool iommu_arch_vm_init(struct vm *vm, const struct vm_config *config) +bool iommu_arch_vm_init(struct vm* vm, const struct vm_config* config) { - vm->io.prot.mmu.global_mask = + vm->io.prot.mmu.global_mask = config->platform.arch.smmu.global_mask | platform.arch.smmu.global_mask; vm->io.prot.mmu.ctx_id = -1; /* This section relates only to arm's iommu so we parse it here. */ for (size_t i = 0; i < config->platform.arch.smmu.group_num; i++) { /* Register each group. */ - const struct smmu_group *group = - &config->platform.arch.smmu.groups[i]; - if(!iommu_vm_arch_add(vm, group->mask, group->id)){ + const struct smmu_group* group = &config->platform.arch.smmu.groups[i]; + if (!iommu_vm_arch_add(vm, group->mask, group->id)) { return false; } } diff --git a/src/arch/armv8/armv8-a/mem.c b/src/arch/armv8/armv8-a/mem.c index 8805a108e..8728ea17a 100644 --- a/src/arch/armv8/armv8-a/mem.c +++ b/src/arch/armv8/armv8-a/mem.c @@ -13,9 +13,8 @@ void as_arch_init(struct addr_space* as) size_t index; /* - * If the address space is a copy of an existing hypervisor space it's not - * possible to use the PT_CPU_REC index to navigate it, so we have to use - * the PT_VM_REC_IND. + * If the address space is a copy of an existing hypervisor space it's not possible to use the + * PT_CPU_REC index to navigate it, so we have to use the PT_VM_REC_IND. */ if (as->type == AS_HYP_CPY || as->type == AS_VM) { index = PT_VM_REC_IND; @@ -35,10 +34,11 @@ bool mem_translate(struct addr_space* as, vaddr_t va, paddr_t* pa) par_saved = sysreg_par_el1_read(); - if (as->type == AS_HYP || as->type == AS_HYP_CPY) + if (as->type == AS_HYP || as->type == AS_HYP_CPY) { arm_at_s1e2w(va); - else + } else { arm_at_s12e1w(va); + } ISB(); par = sysreg_par_el1_read(); @@ -46,8 +46,9 @@ bool mem_translate(struct addr_space* as, vaddr_t va, paddr_t* pa) if (par & PAR_F) { return false; } else { - if (pa != NULL) + if (pa != NULL) { *pa = (par & PAR_PA_MSK) | (va & (PAGE_SIZE - 1)); + } return true; } } diff --git a/src/arch/armv8/armv8-a/page_table.c b/src/arch/armv8/armv8-a/page_table.c index a8a28dd46..e1370b9d6 100644 --- a/src/arch/armv8/armv8-a/page_table.c +++ b/src/arch/armv8/armv8-a/page_table.c @@ -12,42 +12,41 @@ struct page_table_dscr armv8_pt_dscr = { .lvls = 3, - .lvl_wdt = (size_t[]){32, 30, 21}, - .lvl_off = (size_t[]){30, 21, 12}, - .lvl_term = (bool[]){true, true, true}, + .lvl_wdt = (size_t[]){ 32, 30, 21 }, + .lvl_off = (size_t[]){ 30, 21, 12 }, + .lvl_term = (bool[]){ true, true, true }, }; - struct page_table_dscr armv8_pt_s2_dscr = { .lvls = 3, - .lvl_wdt = (size_t[]){39, 30, 21}, - .lvl_off = (size_t[]){30, 21, 12}, - .lvl_term = (bool[]){true, true, true}, + .lvl_wdt = (size_t[]){ 39, 30, 21 }, + .lvl_off = (size_t[]){ 30, 21, 12 }, + .lvl_term = (bool[]){ true, true, true }, }; #else struct page_table_dscr armv8_pt_dscr = { .lvls = 4, - .lvl_wdt = (size_t[]){48, 39, 30, 21}, - .lvl_off = (size_t[]){39, 30, 21, 12}, - .lvl_term = (bool[]){false, true, true, true}, + .lvl_wdt = (size_t[]){ 48, 39, 30, 21 }, + .lvl_off = (size_t[]){ 39, 30, 21, 12 }, + .lvl_term = (bool[]){ false, true, true, true }, }; /** - * This might be modified at initialization depending on the - * value of parange and consequently SL0 in VTCR_EL2. + * This might be modified at initialization depending on the value of parange and consequently SL0 + * in VTCR_EL2. */ struct page_table_dscr armv8_pt_s2_dscr = { .lvls = 4, - .lvl_wdt = (size_t[]){48, 39, 30, 21}, - .lvl_off = (size_t[]){39, 30, 21, 12}, - .lvl_term = (bool[]){false, true, true, true}, + .lvl_wdt = (size_t[]){ 48, 39, 30, 21 }, + .lvl_off = (size_t[]){ 39, 30, 21, 12 }, + .lvl_term = (bool[]){ false, true, true, true }, }; #endif -size_t parange_table[] = {32, 36, 40, 42, 44, 48}; +size_t parange_table[] = { 32, 36, 40, 42, 44, 48 }; struct page_table_dscr* hyp_pt_dscr = &armv8_pt_dscr; struct page_table_dscr* vm_pt_dscr = &armv8_pt_s2_dscr; @@ -85,7 +84,9 @@ pte_t* pt_get_pte(struct page_table* pt, size_t lvl, vaddr_t va) pte_t* pt_get(struct page_table* pt, size_t lvl, vaddr_t va) { - if (lvl == 0) return pt->root; + if (lvl == 0) { + return pt->root; + } uintptr_t pte = (uintptr_t)pt_get_pte(pt, lvl, va); pte &= ~(pt_size(pt, lvl) - 1); @@ -94,7 +95,7 @@ pte_t* pt_get(struct page_table* pt, size_t lvl, vaddr_t va) bool pte_page(struct page_table* pt, pte_t* pte, size_t lvl) { - if(!pt_lvl_terminal(pt, lvl)) { + if (!pt_lvl_terminal(pt, lvl)) { return false; } diff --git a/src/arch/armv8/armv8-a/psci.c b/src/arch/armv8/armv8-a/psci.c index bbe19cd05..aff0b8e07 100644 --- a/src/arch/armv8/armv8-a/psci.c +++ b/src/arch/armv8/armv8-a/psci.c @@ -11,8 +11,8 @@ extern uint8_t root_l1_flat_pt; -static void psci_save_state(enum wakeup_reason wakeup_reason){ - +static void psci_save_state(enum wakeup_reason wakeup_reason) +{ cpu()->arch.profile.psci_off_state.tcr_el2 = sysreg_tcr_el2_read(); cpu()->arch.profile.psci_off_state.ttbr0_el2 = sysreg_ttbr0_el2_read(); cpu()->arch.profile.psci_off_state.mair_el2 = sysreg_mair_el2_read(); @@ -22,34 +22,32 @@ static void psci_save_state(enum wakeup_reason wakeup_reason){ cpu()->arch.profile.psci_off_state.vtcr_el2 = sysreg_vtcr_el2_read(); cpu()->arch.profile.psci_off_state.vttbr_el2 = sysreg_vttbr_el2_read(); mem_translate(&cpu()->as, (vaddr_t)&root_l1_flat_pt, - &cpu()->arch.profile.psci_off_state.flat_map); + &cpu()->arch.profile.psci_off_state.flat_map); cpu()->arch.profile.psci_off_state.wakeup_reason = wakeup_reason; /** - * Although the real PSCI implementation is responsible for managing cache - * state, make sure the saved state is in memory as we'll use this on wake - * up before enabling cache to restore basic processor state. + * Although the real PSCI implementation is responsible for managing cache state, make sure the + * saved state is in memory as we'll use this on wake up before enabling cache to restore basic + * processor state. */ cache_flush_range((vaddr_t)&cpu()->arch.profile.psci_off_state, - sizeof(cpu()->arch.profile.psci_off_state)); + sizeof(cpu()->arch.profile.psci_off_state)); gicc_save_state(&cpu()->arch.profile.psci_off_state.gicc_state); } - -static void psci_restore_state(){ - +static void psci_restore_state() +{ /** - * The majority of the state is already restored in assembly routine - * psci_boot_entry. + * The majority of the state is already restored in assembly routine psci_boot_entry. */ - + gicc_restore_state(&cpu()->arch.profile.psci_off_state.gicc_state); } -void psci_wake_from_powerdown(){ - - if(cpu()->vcpu == NULL){ +void psci_wake_from_powerdown() +{ + if (cpu()->vcpu == NULL) { ERROR("cpu woke up but theres no vcpu to run"); } @@ -58,10 +56,9 @@ void psci_wake_from_powerdown(){ vcpu_run(cpu()->vcpu); } -void psci_wake_from_idle(){ - +void psci_wake_from_idle() +{ cpu_idle_wakeup(); - } void psci_wake_from_off(); @@ -73,27 +70,26 @@ void (*psci_wake_handlers[PSCI_WAKEUP_NUM])(void) = { }; void psci_wake(uint32_t handler_id) -{ - +{ psci_restore_state(); - if(handler_id < PSCI_WAKEUP_NUM){ + if (handler_id < PSCI_WAKEUP_NUM) { psci_wake_handlers[handler_id](); } else { ERROR("unkown reason for cpu wake up"); } - } -int32_t psci_standby(){ +int32_t psci_standby() +{ /* only apply request to core level */ uint32_t pwr_state_aux = PSCI_POWER_STATE_LVL_0 | PSCI_STATE_TYPE_STANDBY; return psci_cpu_suspend(pwr_state_aux, 0, 0); } -int32_t psci_power_down(enum wakeup_reason reason){ - +int32_t psci_power_down(enum wakeup_reason reason) +{ extern void psci_boot_entry(unsigned long x0); uint32_t pwr_state_aux = PSCI_POWER_STATE_LVL_0 | PSCI_STATE_TYPE_POWERDOWN; @@ -107,15 +103,12 @@ int32_t psci_power_down(enum wakeup_reason reason){ return psci_cpu_suspend(pwr_state_aux, psci_wakeup_addr, cntxt_paddr); } -int32_t psci_cpu_suspend(uint32_t power_state, unsigned long entrypoint, - unsigned long context_id) +int32_t psci_cpu_suspend(uint32_t power_state, unsigned long entrypoint, unsigned long context_id) { - return smc_call(PSCI_CPU_SUSPEND, power_state, entrypoint, context_id, NULL); } -int32_t psci_cpu_on(unsigned long target_cpu, unsigned long entrypoint, - unsigned long context_id) +int32_t psci_cpu_on(unsigned long target_cpu, unsigned long entrypoint, unsigned long context_id) { return smc_call(PSCI_CPU_ON, target_cpu, entrypoint, context_id, NULL); } diff --git a/src/arch/armv8/armv8-a/smc.c b/src/arch/armv8/armv8-a/smc.c index a4dec34a3..514fdb687 100644 --- a/src/arch/armv8/armv8-a/smc.c +++ b/src/arch/armv8/armv8-a/smc.c @@ -5,8 +5,8 @@ #include -unsigned long smc_call(unsigned long x0, unsigned long x1, unsigned long x2, - unsigned long x3, struct smc_res *res) +unsigned long smc_call(unsigned long x0, unsigned long x1, unsigned long x2, unsigned long x3, + struct smc_res* res) { register unsigned long r0 asm(GPR(0)) = x0; register unsigned long r1 asm(GPR(1)) = x1; diff --git a/src/arch/armv8/armv8-a/smmuv2.c b/src/arch/armv8/armv8-a/smmuv2.c index 84f481a23..34fab7eca 100644 --- a/src/arch/armv8/armv8-a/smmuv2.c +++ b/src/arch/armv8/armv8-a/smmuv2.c @@ -12,14 +12,13 @@ #include #include - #define SME_MAX_NUM 128 #define CTX_MAX_NUM 128 struct smmu_hw { - volatile struct smmu_glbl_rs0_hw *glbl_rs0; - volatile struct smmu_glbl_rs1_hw *glbl_rs1; - volatile struct smmu_cntxt_hw *cntxt; + volatile struct smmu_glbl_rs0_hw* glbl_rs0; + volatile struct smmu_glbl_rs1_hw* glbl_rs1; + volatile struct smmu_cntxt_hw* cntxt; }; struct smmu_priv { @@ -43,11 +42,11 @@ struct smmu_priv smmu; * * @sme: starting point of the loop cursor */ -#define smmu_for_each_sme(sme) \ +#define smmu_for_each_sme(sme) \ for (size_t __bit = bitmap_get(smmu.sme_bitmap, sme); sme < smmu.sme_num; \ - __bit = bitmap_get(smmu.sme_bitmap, ++sme)) \ - if (!__bit) \ - continue; \ + __bit = bitmap_get(smmu.sme_bitmap, ++sme)) \ + if (!__bit) \ + continue; \ else /** @@ -75,8 +74,8 @@ inline streamid_t smmu_sme_get_mask(size_t sme) static void smmu_check_features() { - unsigned version = bit32_extract(smmu.hw.glbl_rs0->IDR7, - SMMUV2_IDR7_MAJOR_OFF, SMMUV2_IDR7_MAJOR_LEN); + unsigned version = + bit32_extract(smmu.hw.glbl_rs0->IDR7, SMMUV2_IDR7_MAJOR_OFF, SMMUV2_IDR7_MAJOR_LEN); if (version != 2) { ERROR("smmu unsupported version: %d", version); } @@ -90,10 +89,9 @@ static void smmu_check_features() } /** - * TODO: the most common smmuv2 implementation (mmu-500) does not provide - * ptw coherency. So we must add some mechanism software-managed - * coherency mechanism for the vms using the smmu according to the - * result of this feature test. + * TODO: the most common smmuv2 implementation (mmu-500) does not provide ptw coherency. So we + * must add some mechanism software-managed coherency mechanism for the vms using the smmu + * according to the result of this feature test. */ if (!(smmu.hw.glbl_rs0->IDR0 & SMMUV2_IDR0_CTTW_BIT)) { WARNING("smmuv2 does not support coherent page table walks"); @@ -107,10 +105,9 @@ static void smmu_check_features() ERROR("smmuv2 does not support 4kb page granule"); } - size_t pasize = bit32_extract(smmu.hw.glbl_rs0->IDR2, SMMUV2_IDR2_OAS_OFF, - SMMUV2_IDR2_OAS_LEN); - size_t ipasize = bit32_extract(smmu.hw.glbl_rs0->IDR2, SMMUV2_IDR2_IAS_OFF, - SMMUV2_IDR2_IAS_LEN); + size_t pasize = bit32_extract(smmu.hw.glbl_rs0->IDR2, SMMUV2_IDR2_OAS_OFF, SMMUV2_IDR2_OAS_LEN); + size_t ipasize = + bit32_extract(smmu.hw.glbl_rs0->IDR2, SMMUV2_IDR2_IAS_OFF, SMMUV2_IDR2_IAS_LEN); if (pasize < parange) { ERROR("smmuv2 does not support the full available pa range"); @@ -124,29 +121,25 @@ void smmu_init() /* * Alloc pages for global address space. * - * Map the first 4k so we can read all the info we need to further - * allocate smmu registers. + * Map the first 4k so we can read all the info we need to further allocate smmu registers. */ vaddr_t smmu_glbl_rs0 = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.smmu.base, NUM_PAGES(sizeof(struct smmu_glbl_rs0_hw))); + platform.arch.smmu.base, NUM_PAGES(sizeof(struct smmu_glbl_rs0_hw))); smmu.hw.glbl_rs0 = (struct smmu_glbl_rs0_hw*)smmu_glbl_rs0; - size_t pg_size = - smmu.hw.glbl_rs0->IDR1 & SMMUV2_IDR1_PAGESIZE_BIT ? 0x10000 : 0x1000; - size_t num_page = - 1ULL << (bit32_extract(smmu.hw.glbl_rs0->IDR1, SMMUV2_IDR1_NUMPAGEDXB_OFF, - SMMUV2_IDR1_NUMPAGEDXB_LEN) + - 1); - size_t ctx_bank_num = bit32_extract( - smmu.hw.glbl_rs0->IDR1, SMMUV2_IDR1_NUMCB_OFF, SMMUV2_IDR1_NUMCB_LEN); + size_t pg_size = smmu.hw.glbl_rs0->IDR1 & SMMUV2_IDR1_PAGESIZE_BIT ? 0x10000 : 0x1000; + size_t num_page = 1ULL << (bit32_extract(smmu.hw.glbl_rs0->IDR1, SMMUV2_IDR1_NUMPAGEDXB_OFF, + SMMUV2_IDR1_NUMPAGEDXB_LEN) + + 1); + size_t ctx_bank_num = + bit32_extract(smmu.hw.glbl_rs0->IDR1, SMMUV2_IDR1_NUMCB_OFF, SMMUV2_IDR1_NUMCB_LEN); vaddr_t smmu_glbl_rs1 = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.arch.smmu.base + pg_size, NUM_PAGES(sizeof(struct smmu_glbl_rs1_hw))); vaddr_t smmu_cntxt = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.smmu.base + (num_page * pg_size), - NUM_PAGES(pg_size * ctx_bank_num)); + platform.arch.smmu.base + (num_page * pg_size), NUM_PAGES(pg_size * ctx_bank_num)); smmu.hw.glbl_rs1 = (struct smmu_glbl_rs1_hw*)smmu_glbl_rs1; smmu.hw.cntxt = (struct smmu_cntxt_hw*)smmu_cntxt; @@ -228,9 +221,8 @@ void smmu_write_ctxbnk(size_t ctx_id, paddr_t root_pt, asid_t vm_id) smmu.hw.glbl_rs1->CBA2R[ctx_id] = SMMUV2_CBAR_VA64; /** - * This should closely match to the VTCR configuration set up in - * vmm_arch_init as we're sharing page table between the VM and its - * smmu context. + * This should closely match to the VTCR configuration set up in vmm_arch_init as we're + * sharing page table between the VM and its smmu context. */ uint32_t tcr = ((parange << SMMUV2_TCR_PS_OFF) & SMMUV2_TCR_PS_MSK); size_t t0sz = 64 - parange_table[parange]; @@ -239,11 +231,9 @@ void smmu_write_ctxbnk(size_t ctx_id, paddr_t root_pt, asid_t vm_id) tcr |= SMMUV2_TCR_IRGN0_WB_RA_WA; tcr |= SMMUV2_TCR_T0SZ(t0sz); tcr |= SMMUV2_TCR_SH0_IS; - tcr |= ((parange_table[parange] < 44) ? SMMUV2_TCR_SL0_1 - : SMMUV2_TCR_SL0_0); + tcr |= ((parange_table[parange] < 44) ? SMMUV2_TCR_SL0_1 : SMMUV2_TCR_SL0_0); smmu.hw.cntxt[ctx_id].TCR = tcr; - smmu.hw.cntxt[ctx_id].TTBR0 = - root_pt & SMMUV2_CB_TTBA(smmu_cb_ttba_offset(t0sz)); + smmu.hw.cntxt[ctx_id].TTBR0 = root_pt & SMMUV2_CB_TTBA(smmu_cb_ttba_offset(t0sz)); uint32_t sctlr = smmu.hw.cntxt[ctx_id].SCTLR; sctlr = SMMUV2_SCTLR_CLEAR(sctlr); @@ -258,7 +248,7 @@ ssize_t smmu_alloc_sme() spin_lock(&smmu.sme_lock); /* Find a free sme. */ ssize_t nth = bitmap_find_nth(smmu.sme_bitmap, smmu.sme_num, 1, 0, false); - if(nth >= 0) { + if (nth >= 0) { bitmap_set(smmu.sme_bitmap, nth); } spin_unlock(&smmu.sme_lock); @@ -272,16 +262,14 @@ ssize_t smmu_alloc_sme() * 1. sme is a group; * 2. sme is a device. * - * Groups can be merged together if one is found to be inclusive or equal of - * the other. + * Groups can be merged together if one is found to be inclusive or equal of the other. * * Devices can be added (i.e. merged into) a group, but not together. * - * This function searches for existing smes that are compatible for merging - * with the new sme, raising an ERROR when conflicting attributes are found. + * This function searches for existing smes that are compatible for merging with the new sme, + * raising an ERROR when conflicting attributes are found. */ -bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, - bool group) +bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, bool group) { bool included = false; size_t sme = 0; @@ -295,16 +283,13 @@ bool smmu_compatible_sme_exists(streamid_t mask, streamid_t id, size_t ctx, if (!diff_id) { /* Only group-to-group or device-to-group can be merged */ - if (((group || smmu_sme_is_group(sme)) && - (mask_r == mask || mask_r == sme_mask)) && + if (((group || smmu_sme_is_group(sme)) && (mask_r == mask || mask_r == sme_mask)) && ctx == smmu_sme_get_ctx(sme)) { - /* Compatible entry found. * - * If the new entry includes an existing one, there is the - * possibility that it will include other existing entries, it - * is therefore necessary to remove the existing entry and keep - * searching. + * If the new entry includes an existing one, there is the possibility that it will + * include other existing entries, it is therefore necessary to remove the existing + * entry and keep searching. */ if (mask > sme_mask) { bitmap_clear(smmu.sme_bitmap, sme); diff --git a/src/arch/armv8/armv8-a/vm.c b/src/arch/armv8/armv8-a/vm.c index 6c00db2c8..33e96aba8 100644 --- a/src/arch/armv8/armv8-a/vm.c +++ b/src/arch/armv8/armv8-a/vm.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -8,12 +8,13 @@ #include #include -void vcpu_arch_profile_init(struct vcpu* vcpu, struct vm* vm) { +void vcpu_arch_profile_init(struct vcpu* vcpu, struct vm* vm) +{ paddr_t root_pt_pa; mem_translate(&cpu()->as, (vaddr_t)vm->as.pt.root, &root_pt_pa); sysreg_vttbr_el2_write((((uint64_t)vm->id << VTTBR_VMID_OFF) & VTTBR_VMID_MSK) | - (root_pt_pa & ~VTTBR_VMID_MSK)); + (root_pt_pa & ~VTTBR_VMID_MSK)); - ISB(); // make sure vmid is commited befor tlbi + ISB(); // make sure vmid is commited befor tlbi tlb_vm_inv_all(vm->id); } diff --git a/src/arch/armv8/armv8-r/aarch32/boot.S b/src/arch/armv8/armv8-r/aarch32/boot.S index 7393f00f0..8a69d18a6 100644 --- a/src/arch/armv8/armv8-r/aarch32/boot.S +++ b/src/arch/armv8/armv8-r/aarch32/boot.S @@ -7,10 +7,10 @@ #include #include -/* In Armv8-R there is no virtual address space (VAS). Notwithstanding, - * we define VAS as an identity map of the PAS with MPU rules enforced - * using an equivalent "page size" of (at least) 64 bytes (minimal MPU - * granularity). +/** + * In Armv8-R there is no virtual address space (VAS). Notwithstanding, we define VAS as an + * identity map of the PAS with MPU rules enforced using an equivalent "page size" of (at least) 64 + * bytes (minimal MPU granularity). */ .section ".boot", "ax" .global boot_arch_profile_init @@ -52,9 +52,8 @@ boot_arch_profile_init: /** * Map loadable image (and possibly unloadable) - * If the vm image section is used and has built-in vm images, we need - * to map the loadble and non-loadble region of the image separately. - * Otherwise we can map it as a single region. + * If the vm image section is used and has built-in vm images, we need to map the loadble and + * non-loadble region of the image separately. Otherwise we can map it as a single region. */ add r4, r4, #1 mcr p15, 4, r4, c6, c2, 1 // HPRSELR diff --git a/src/arch/armv8/armv8-r/aarch64/boot.S b/src/arch/armv8/armv8-r/aarch64/boot.S index 08fb5e4af..75a90f1af 100644 --- a/src/arch/armv8/armv8-r/aarch64/boot.S +++ b/src/arch/armv8/armv8-r/aarch64/boot.S @@ -7,10 +7,10 @@ #include #include -/* In Armv8-R there is no virtual address space (VAS). Notwithstanding, - * we define VAS as an identity map of the PAS with MPU rules enforced - * using an equivalent "page size" of (at least) 64 bytes (minimal MPU - * granularity). +/** + * In Armv8-R there is no virtual address space (VAS). Notwithstanding, we define VAS as an + * identity map of the PAS with MPU rules enforced using an equivalent "page size" of (at least) 64 + * bytes (minimal MPU granularity). */ .section ".boot", "ax" .global boot_arch_profile_init @@ -50,9 +50,8 @@ boot_arch_profile_init: /** * Map loadable image (and possibly unloadable) - * If the vm image section is used and has built-in vm images, we need - * to map the loadble and non-loadble region of the image separately. - * Otherwise we can map it as a single region. + * If the vm image section is used and has built-in vm images, we need to map the loadble and + * non-loadble region of the image separately. Otherwise we can map it as a single region. */ msr prselr_el2, x4 isb diff --git a/src/arch/armv8/armv8-r/cpu.c b/src/arch/armv8/armv8-r/cpu.c index dc16fc75f..fce39ad6a 100644 --- a/src/arch/armv8/armv8-r/cpu.c +++ b/src/arch/armv8/armv8-r/cpu.c @@ -6,10 +6,9 @@ #include #include -void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr) { +void cpu_arch_profile_init(cpuid_t cpuid, paddr_t load_addr) { } -} - -void cpu_arch_profile_idle() { +void cpu_arch_profile_idle() +{ asm volatile("wfi"); } diff --git a/src/arch/armv8/armv8-r/inc/arch/bao.h b/src/arch/armv8/armv8-r/inc/arch/bao.h index d3c70c1b7..f6589268a 100644 --- a/src/arch/armv8/armv8-r/inc/arch/bao.h +++ b/src/arch/armv8/armv8-r/inc/arch/bao.h @@ -6,14 +6,12 @@ #ifndef __ARCH_BAO_H__ #define __ARCH_BAO_H__ -#define BAO_VAS_BASE CONFIG_HYP_BASE_ADDR -#define PAGE_SIZE (64) -#define STACK_SIZE (0x1000) +#define BAO_VAS_BASE CONFIG_HYP_BASE_ADDR +#define PAGE_SIZE (64) +#define STACK_SIZE (0x1000) #ifndef __ASSEMBLER__ - #endif /* !__ASSEMBLER__ */ - #endif /* __ARCH_BAO_H__ */ diff --git a/src/arch/armv8/armv8-r/inc/arch/mem.h b/src/arch/armv8/armv8-r/inc/arch/mem.h index bcb7acb6c..1c4c1a5eb 100644 --- a/src/arch/armv8/armv8-r/inc/arch/mem.h +++ b/src/arch/armv8/armv8-r/inc/arch/mem.h @@ -36,29 +36,25 @@ typedef union { }; } mem_flags_t; -#define PTE_FLAGS(_prbar, _prlar) ((mem_flags_t) { \ - .prbar = (_prbar), \ - .prlar = (_prlar), \ -}) - -#define PTE_INVALID PTE_FLAGS(0, 0) -#define PTE_HYP_FLAGS \ - PTE_FLAGS(PRBAR_AP_RW_EL2 | PRBAR_SH_IS, PRLAR_ATTR(1) | PRLAR_EN) +#define PTE_FLAGS(_prbar, _prlar) \ + ((mem_flags_t){ \ + .prbar = (_prbar), \ + .prlar = (_prlar), \ + }) + +#define PTE_INVALID PTE_FLAGS(0, 0) +#define PTE_HYP_FLAGS PTE_FLAGS(PRBAR_AP_RW_EL2 | PRBAR_SH_IS, PRLAR_ATTR(1) | PRLAR_EN) #define PTE_HYP_DEV_FLAGS \ - PTE_FLAGS(PRBAR_XN | PRBAR_AP_RW_EL2 | PRBAR_SH_IS, \ - PRLAR_ATTR(2) | PRLAR_EN) -#define PTE_VM_FLAGS \ - PTE_FLAGS(PRBAR_AP_RW_EL1_EL2 | PRBAR_SH_IS, PRLAR_ATTR(1) | PRLAR_EN) + PTE_FLAGS(PRBAR_XN | PRBAR_AP_RW_EL2 | PRBAR_SH_IS, PRLAR_ATTR(2) | PRLAR_EN) +#define PTE_VM_FLAGS PTE_FLAGS(PRBAR_AP_RW_EL1_EL2 | PRBAR_SH_IS, PRLAR_ATTR(1) | PRLAR_EN) #define PTE_VM_DEV_FLAGS \ - PTE_FLAGS(PRBAR_XN |PRBAR_AP_RW_EL1_EL2 | PRBAR_SH_IS,\ - PRLAR_ATTR(2) | PRLAR_EN) + PTE_FLAGS(PRBAR_XN | PRBAR_AP_RW_EL1_EL2 | PRBAR_SH_IS, PRLAR_ATTR(2) | PRLAR_EN) -#define MPU_ARCH_MAX_NUM_ENTRIES (64) +#define MPU_ARCH_MAX_NUM_ENTRIES (64) static inline const size_t mpu_granularity() { - return (size_t) PAGE_SIZE; + return (size_t)PAGE_SIZE; } - #endif /* __ARCH_MEM_H__ */ diff --git a/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h b/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h index 3cde4a5ed..996c49a18 100644 --- a/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h +++ b/src/arch/armv8/armv8-r/inc/arch/profile/cpu.h @@ -17,8 +17,7 @@ struct cpu_arch_profile { struct { BITMAP_ALLOC(bitmap, MPU_ARCH_MAX_NUM_ENTRIES); /** - * A locked region means that it can never be removed from the MPU. - * For example, + * A locked region means that it can never be removed from the MPU. For example, */ BITMAP_ALLOC(locked, MPU_ARCH_MAX_NUM_ENTRIES); struct mpu_perms { @@ -26,9 +25,8 @@ struct cpu_arch_profile { perms_t el1; } perms[MPU_ARCH_MAX_NUM_ENTRIES]; /** - * We maintain an ordered list of the regions currently in the mpu - * to simplify the merging algorithm when mapping an overllaping - * region. + * We maintain an ordered list of the regions currently in the mpu to simplify the merging + * algorithm when mapping an overllaping region. */ struct { struct list list; @@ -40,8 +38,9 @@ struct cpu_arch_profile { } mpu; }; -static inline struct cpu* cpu() { - return (struct cpu*) sysreg_tpidr_el2_read(); +static inline struct cpu* cpu() +{ + return (struct cpu*)sysreg_tpidr_el2_read(); } #endif /* ARCH_PROFILE_CPU_H */ diff --git a/src/arch/armv8/armv8-r/mem.c b/src/arch/armv8/armv8-r/mem.c index 77d047626..29bfd6998 100644 --- a/src/arch/armv8/armv8-r/mem.c +++ b/src/arch/armv8/armv8-r/mem.c @@ -5,7 +5,4 @@ #include -void as_arch_init(struct addr_space* as) -{ - -} +void as_arch_init(struct addr_space* as) { } diff --git a/src/arch/armv8/armv8-r/mpu.c b/src/arch/armv8/armv8-r/mpu.c index aad62856d..70d33f5cb 100644 --- a/src/arch/armv8/armv8-r/mpu.c +++ b/src/arch/armv8/armv8-r/mpu.c @@ -10,14 +10,14 @@ static inline const size_t mpu_num_entries() { - return (size_t) MPUIR_REGION(sysreg_mpuir_el2_read()); + return (size_t)MPUIR_REGION(sysreg_mpuir_el2_read()); } -static void mpu_entry_get_region(mpid_t mpid, struct mp_region *mpe) +static void mpu_entry_get_region(mpid_t mpid, struct mp_region* mpe) { sysreg_prselr_el2_write(mpid); ISB(); - unsigned long prbar = sysreg_prbar_el2_read (); + unsigned long prbar = sysreg_prbar_el2_read(); unsigned long prlar = sysreg_prlar_el2_read(); mpe->mem_flags.prbar = PRBAR_FLAGS(prbar); mpe->mem_flags.prlar = PRLAR_FLAGS(prlar); @@ -26,9 +26,10 @@ static void mpu_entry_get_region(mpid_t mpid, struct mp_region *mpe) mpe->as_sec = SEC_UNKNOWN; } -static int mpu_node_cmp(node_t* _n1, node_t* _n2) { - struct mpu_node *n1 = (struct mpu_node*) _n1; - struct mpu_node *n2 = (struct mpu_node*) _n2; +static int mpu_node_cmp(node_t* _n1, node_t* _n2) +{ + struct mpu_node* n1 = (struct mpu_node*)_n1; + struct mpu_node* n2 = (struct mpu_node*)_n2; struct mp_region r1; struct mp_region r2; mpu_entry_get_region(n1->mpid, &r1); @@ -42,32 +43,29 @@ static int mpu_node_cmp(node_t* _n1, node_t* _n2) { } } -static void mpu_entry_set(mpid_t mpid, struct mp_region* mpr) { +static void mpu_entry_set(mpid_t mpid, struct mp_region* mpr) +{ unsigned long lim = mpr->base + mpr->size - 1; - + sysreg_prselr_el2_write(mpid); ISB(); - sysreg_prbar_el2_write((mpr->base & PRBAR_BASE_MSK) | - mpr->mem_flags.prbar); + sysreg_prbar_el2_write((mpr->base & PRBAR_BASE_MSK) | mpr->mem_flags.prbar); sysreg_prlar_el2_write((lim & PRLAR_LIMIT_MSK) | mpr->mem_flags.prlar); - list_insert_ordered(&cpu()->arch.profile.mpu.order.list, - (node_t*)&cpu()->arch.profile.mpu.order.node[mpid], - mpu_node_cmp); + list_insert_ordered(&cpu()->arch.profile.mpu.order.list, + (node_t*)&cpu()->arch.profile.mpu.order.node[mpid], mpu_node_cmp); } -static void mpu_entry_modify(mpid_t mpid, struct mp_region* mpr) { - - list_rm(&cpu()->arch.profile.mpu.order.list, - (node_t*)&cpu()->arch.profile.mpu.order.node[mpid]); +static void mpu_entry_modify(mpid_t mpid, struct mp_region* mpr) +{ + list_rm(&cpu()->arch.profile.mpu.order.list, (node_t*)&cpu()->arch.profile.mpu.order.node[mpid]); mpu_entry_set(mpid, mpr); } static bool mpu_entry_clear(mpid_t mpid) { - list_rm(&cpu()->arch.profile.mpu.order.list, - (node_t*)&cpu()->arch.profile.mpu.order.node[mpid]); + list_rm(&cpu()->arch.profile.mpu.order.list, (node_t*)&cpu()->arch.profile.mpu.order.node[mpid]); sysreg_prselr_el2_write(mpid); ISB(); @@ -82,17 +80,20 @@ static inline void mpu_entry_free(mpid_t mpid) bitmap_clear(cpu()->arch.profile.mpu.bitmap, mpid); } -static inline bool mpu_entry_valid(mpid_t mpid) { +static inline bool mpu_entry_valid(mpid_t mpid) +{ sysreg_prselr_el2_write(mpid); ISB(); return !!(sysreg_prlar_el2_read() & PRLAR_EN); } -static inline bool mpu_entry_locked(mpid_t mpid) { +static inline bool mpu_entry_locked(mpid_t mpid) +{ return !!bitmap_get(cpu()->arch.profile.mpu.locked, mpid); } -static bool mpu_entry_has_priv(mpid_t mpid, priv_t priv) { +static bool mpu_entry_has_priv(mpid_t mpid, priv_t priv) +{ if (priv == PRIV_VM) { return cpu()->arch.profile.mpu.perms[mpid].el1 != PERM_NONE; } else { @@ -100,18 +101,17 @@ static bool mpu_entry_has_priv(mpid_t mpid, priv_t priv) { } } -static inline perms_t mem_vmpu_entry_perms(struct mp_region *mpr) { +static inline perms_t mem_vmpu_entry_perms(struct mp_region* mpr) +{ perms_t perms = PERM_R; perms |= !(mpr->mem_flags.prbar & PRBAR_XN) ? PERM_X : 0; perms |= !(mpr->mem_flags.prbar & PRBAR_NWR_BIT) ? PERM_W : 0; return perms; } -static inline void mpu_entry_set_perms(struct mp_region *mpr, - struct mpu_perms mpu_perms) +static inline void mpu_entry_set_perms(struct mp_region* mpr, struct mpu_perms mpu_perms) { - // TODO: should we check this is following the allowed permission - // combinations? + // TODO: should we check this is following the allowed permission combinations? bool el1_priv = mpu_perms.el1 != PERM_NONE; perms_t perms = mpu_perms.el1 | mpu_perms.el2; @@ -132,7 +132,7 @@ static inline void mpu_entry_set_perms(struct mp_region *mpr, } } -static void mpu_entry_update_priv_perms(priv_t priv, mpid_t mpid, perms_t perms) +static void mpu_entry_update_priv_perms(priv_t priv, mpid_t mpid, perms_t perms) { if (priv == PRIV_VM) { cpu()->arch.profile.mpu.perms[mpid].el1 = perms; @@ -141,16 +141,17 @@ static void mpu_entry_update_priv_perms(priv_t priv, mpid_t mpid, perms_t perms) } } -static inline bool mpu_perms_equivalent(struct mpu_perms *p1, struct mpu_perms *p2) +static inline bool mpu_perms_equivalent(struct mpu_perms* p1, struct mpu_perms* p2) { return (p1->el1 == p2->el1) && (p1->el2 == p2->el2); } -static inline mem_attrs_t mpu_entry_attrs(struct mp_region *mpr) { +static inline mem_attrs_t mpu_entry_attrs(struct mp_region* mpr) +{ mem_flags_t flags = mpr->mem_flags; flags.prbar &= PRBAR_MEM_ATTR_FLAGS_MSK; flags.prbar &= PRLAR_MEM_ATTR_FLAGS_MSK; - return (mem_attrs_t) flags.raw; + return (mem_attrs_t)flags.raw; } static mpid_t mpu_entry_allocate() @@ -166,8 +167,8 @@ static mpid_t mpu_entry_allocate() return reg_num; } -bool mem_region_get_overlap(struct mp_region *reg1, struct mp_region *reg2, - struct mp_region *overlap) +bool mem_region_get_overlap(struct mp_region* reg1, struct mp_region* reg2, + struct mp_region* overlap) { bool regions_overlap = mem_regions_overlap(reg1, reg2); @@ -184,7 +185,6 @@ bool mem_region_get_overlap(struct mp_region *reg1, struct mp_region *reg2, return regions_overlap; } - bool mpu_map(priv_t priv, struct mp_region* mpr) { size_t size_left = mpr->size; @@ -199,15 +199,13 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) mpid_t top_mpid = INVALID_MPID; while (size_left > 0 && !failed) { - /** - * Since we'll be checking for overlapping regions in order, there - * will be at most two regions to map in a given iteration. This - * happens when the previous iteration found an overlapping region - * that is fully contained by the new region. + * Since we'll be checking for overlapping regions in order, there will be at most two + * regions to map in a given iteration. This happens when the previous iteration found an + * overlapping region that is fully contained by the new region. */ - struct mp_region *new_reg; + struct mp_region* new_reg; if (reg1_valid) { new_reg = ®1; } else if (reg2_valid) { @@ -216,17 +214,15 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) break; } - // As Armv8-R does not allow overlapping regions, we must first check - // if usch regions already exist. Specifically, for the case where the - // regions has hypervisor permissions only, and this is a map - // targetting a guest mpu, we just need to flip the guest permission - // bit. This will allow us to share regions between guest and hypevisor - // to, for example, (i) share the use of a peripheral (mainly uart for - // debugging purposes), or (ii) share a RW page between hypervisor and - // guest. Although having a RO page for guest while RW for the - // hypervisor is highly useful, this MPU does not allow it. That said, - // in the case we need it in the future, we'll have to implement a - // mechanism for that based on traps. + // As Armv8-R does not allow overlapping regions, we must first check if usch regions + // already exist. Specifically, for the case where the regions has hypervisor permissions + // only, and this is a map targetting a guest mpu, we just need to flip the guest + // permission bit. This will allow us to share regions between guest and hypevisor to, for + // example, (i) share the use of a peripheral (mainly uart for debugging purposes), or (ii) + // share a RW page between hypervisor and guest. Although having a RO page for guest while + // RW for the hypervisor is highly useful, this MPU does not allow it. That said, in the + // case we need it in the future, we'll have to implement a mechanism for that based on + // traps. bool overlaped = false; perms_t new_perms = mem_vmpu_entry_perms(new_reg); @@ -236,9 +232,8 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) bottom_mpid = INVALID_MPID; top_mpid = INVALID_MPID; - struct list *mpu_order_list = &cpu()->arch.profile.mpu.order.list; - list_foreach((*mpu_order_list), struct mpu_node, entry) { - + struct list* mpu_order_list = &cpu()->arch.profile.mpu.order.list; + list_foreach ((*mpu_order_list), struct mpu_node, entry) { mpid_t mpid = entry->mpid; struct mp_region overlapped_reg; @@ -246,30 +241,28 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) if ((new_reg->base + new_reg->size) <= overlapped_reg.base) { next = mpid; - break; + break; } if (!mem_regions_overlap(new_reg, &overlapped_reg)) { - // If we are not overlapping, continue to search for overlapped - // regions until we check all entries. This should be the most - // frequent case, so the overhead for the checks on overllap - // will rarely execute. + // If we are not overlapping, continue to search for overlapped regions until we + // check all entries. This should be the most frequent case, so the overhead for + // the checks on overllap will rarely execute. prev = mpid; continue; } overlaped = true; if (mpu_entry_has_priv(mpid, priv)) { - // We don't allow overlapping regions of the same privilege. - // This is something that should be checked at the vmpu level, - // but we re-check it here anyway. + // We don't allow overlapping regions of the same privilege. This is something that + // should be checked at the vmpu level, but we re-check it here anyway. failed = true; break; } - // We only allow to bump up permissions if the overlapped region - // is a RO hypervisor region. Otherwise permissions have to be - // RW in both regions. We don't allow to overlap executable regions. + // We only allow to bump up permissions if the overlapped region is a RO hypervisor + // region. Otherwise permissions have to be RW in both regions. We don't allow to + // overlap executable regions. struct mpu_perms overlapped_perms = cpu()->arch.profile.mpu.perms[mpid]; struct mpu_perms overlap_perms = overlapped_perms; priv_t overlapped_priv; @@ -285,43 +278,40 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } if (((overlap_perms.el1 & PERM_RW) == PERM_R) && - ((overlap_perms.el2 & PERM_W) != PERM_NONE)) - { - // We allow promoting read/write privielges of the hypervisor - // region to match the guest's. However, this combination - // promotes the guest privielges, which we don't allow. + ((overlap_perms.el2 & PERM_W) != PERM_NONE)) { + // We allow promoting read/write privielges of the hypervisor region to match the + // guest's. However, this combination promotes the guest privielges, which we don't + // allow. failed = true; break; } - if ((overlap_perms.el1 & PERM_X) != (overlap_perms.el2 & PERM_X)) - { + if ((overlap_perms.el1 & PERM_X) != (overlap_perms.el2 & PERM_X)) { // Unless explicitly mapped, we don't promote execution privileges. failed = true; break; - } + } - // The Armv8-R MPU does not allow us to have different permissions - // for hypervisor and guest. So we must fail if asked to add an - // overlapping mapping with different permissions or attributes + // The Armv8-R MPU does not allow us to have different permissions for hypervisor and + // guest. So we must fail if asked to add an overlapping mapping with different + // permissions or attributes if (mpu_entry_attrs(new_reg) != mpu_entry_attrs(&overlapped_reg)) { failed = true; break; } vaddr_t new_reg_limit = new_reg->base + new_reg->size; - vaddr_t overlapped_reg_limit = - overlapped_reg.base + overlapped_reg.size; - size_t top_size = new_reg_limit >= overlapped_reg_limit ? 0 : - overlapped_reg_limit - new_reg_limit; - size_t bottom_size = new_reg->base <= overlapped_reg.base ? - 0 : new_reg->base - overlapped_reg.base; - size_t top_left = new_reg_limit <= overlapped_reg_limit ? 0 : - new_reg_limit - overlapped_reg_limit; - size_t bottom_left = new_reg->base >= overlapped_reg.base ? - 0 : overlapped_reg.base - new_reg->base; - bool subset = (new_reg->base >= overlapped_reg.base) && - (new_reg_limit <= overlapped_reg_limit); + vaddr_t overlapped_reg_limit = overlapped_reg.base + overlapped_reg.size; + size_t top_size = + new_reg_limit >= overlapped_reg_limit ? 0 : overlapped_reg_limit - new_reg_limit; + size_t bottom_size = + new_reg->base <= overlapped_reg.base ? 0 : new_reg->base - overlapped_reg.base; + size_t top_left = + new_reg_limit <= overlapped_reg_limit ? 0 : new_reg_limit - overlapped_reg_limit; + size_t bottom_left = + new_reg->base >= overlapped_reg.base ? 0 : overlapped_reg.base - new_reg->base; + bool subset = + (new_reg->base >= overlapped_reg.base) && (new_reg_limit <= overlapped_reg_limit); bool superset = (bottom_left > 0) || (top_left > 0); struct mp_region middle; @@ -332,15 +322,15 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) if (bottom_size > 0) { bottom_mpid = mpu_entry_allocate(); - if(bottom_mpid == INVALID_MPID) { + if (bottom_mpid == INVALID_MPID) { failed = true; break; } } - + if (top_size > 0) { top_mpid = mpu_entry_allocate(); - if(top_mpid == INVALID_MPID) { + if (top_mpid == INVALID_MPID) { failed = true; break; } @@ -348,7 +338,7 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) mpu_entry_update_priv_perms(priv, mpid, new_perms); mpu_entry_modify(mpid, &middle); - + if (bottom_size > 0) { struct mp_region bottom; bottom.base = overlapped_reg.base; @@ -389,7 +379,7 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) size_left = (top_left + bottom_left); } else if (subset) { size_left = 0; - } else { + } else { size_left -= middle.size; } @@ -397,11 +387,10 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } if (!overlaped && !failed) { - mpid_t merge_mpid = INVALID_MPID; size_t mem_size = new_reg->size; - struct mpu_perms *prev_perms = &cpu()->arch.profile.mpu.perms[prev]; - struct mpu_perms *next_perms = &cpu()->arch.profile.mpu.perms[next]; + struct mpu_perms* prev_perms = &cpu()->arch.profile.mpu.perms[prev]; + struct mpu_perms* next_perms = &cpu()->arch.profile.mpu.perms[next]; struct mpu_perms new_reg_perms; if (priv == PRIV_VM) { new_reg_perms.el1 = new_perms; @@ -412,10 +401,9 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } /** - * Check if we can merge the current region with the region - * right before and/or right after. This can only be done if - * they are adjacent and have the same exect flags (i.e. - * permissions and memory attribtues). + * Check if we can merge the current region with the region right before and/or right + * after. This can only be done if they are adjacent and have the same exect flags + * (i.e. permissions and memory attribtues). */ if ((prev != INVALID_MPID) && !mpu_entry_locked(prev)) { @@ -423,8 +411,7 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) mpu_entry_get_region(prev, &r); if (((r.base + r.size) == new_reg->base) && (mpu_entry_attrs(&r) == mpu_entry_attrs(new_reg)) && - (mpu_perms_equivalent(prev_perms, &new_reg_perms))) - { + (mpu_perms_equivalent(prev_perms, &new_reg_perms))) { merge_mpid = prev; new_reg->base = r.base; new_reg->size += r.size; @@ -436,8 +423,7 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) mpu_entry_get_region(next, &r); if ((new_reg->base + new_reg->size) == r.base && (mpu_entry_attrs(&r) == mpu_entry_attrs(new_reg)) && - (mpu_perms_equivalent(next_perms, &new_reg_perms))) - { + (mpu_perms_equivalent(next_perms, &new_reg_perms))) { if (merge_mpid == INVALID_MPID) { merge_mpid = next; } else { @@ -448,8 +434,7 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } /** - * If we can merge the region do it. Otherwise, allocate a new - * entry and set it. + * If we can merge the region do it. Otherwise, allocate a new entry and set it. */ if (merge_mpid != INVALID_MPID) { mpu_entry_update_priv_perms(priv, merge_mpid, new_perms); @@ -480,16 +465,15 @@ bool mpu_map(priv_t priv, struct mp_region* mpr) } bool mpu_unmap(priv_t priv, struct mp_region* mpr) -{ +{ size_t size_left = mpr->size; - while(size_left > 0) { - + while (size_left > 0) { mpid_t mpid = INVALID_MPID; struct mp_region reg; - struct list *mpu_order_list = &cpu()->arch.profile.mpu.order.list; - list_foreach((*mpu_order_list), struct mpu_node, entry) { + struct list* mpu_order_list = &cpu()->arch.profile.mpu.order.list; + list_foreach ((*mpu_order_list), struct mpu_node, entry) { mpu_entry_get_region(entry->mpid, ®); if ((mpr->base + mpr->size) < reg.base) { @@ -513,12 +497,11 @@ bool mpu_unmap(priv_t priv, struct mp_region* mpr) vaddr_t mpr_limit = mpr->base + mpr->size; vaddr_t reg_limit = reg.base + reg.size; size_t top_size = mpr_limit >= reg_limit ? 0 : reg_limit - mpr_limit; - size_t bottom_size = mpr->base <= reg.base ? 0 : mpr->base -reg.base; + size_t bottom_size = mpr->base <= reg.base ? 0 : mpr->base - reg.base; struct mpu_perms orig_perms = cpu()->arch.profile.mpu.perms[mpid]; mpu_entry_update_priv_perms(priv, mpid, PERM_NONE); - bool update_perms = - !((cpu()->arch.profile.mpu.perms[mpid].el1 == PERM_NONE) && + bool update_perms = !((cpu()->arch.profile.mpu.perms[mpid].el1 == PERM_NONE) && (cpu()->arch.profile.mpu.perms[mpid].el2 == PERM_NONE)); if (update_perms) { @@ -550,40 +533,35 @@ bool mpu_unmap(priv_t priv, struct mp_region* mpr) size_t overlap_size = reg.size - top_size - bottom_size; size_left -= overlap_size; - } - // TODO: check if we can merge new regions after unmapping a given - // privilege from a shared region + // TODO: check if we can merge new regions after unmapping a given privilege from a shared + // region return size_left == 0; } -void mpu_init() { - +void mpu_init() +{ bitmap_clear_consecutive(cpu()->arch.profile.mpu.bitmap, 0, mpu_num_entries()); list_init(&cpu()->arch.profile.mpu.order.list); for (mpid_t mpid = 0; mpid < mpu_num_entries(); mpid++) { - cpu()->arch.profile.mpu.order.node[mpid].mpid = mpid; - - if (mpu_entry_valid(mpid)) { + if (mpu_entry_valid(mpid)) { bitmap_set(cpu()->arch.profile.mpu.bitmap, mpid); bitmap_set(cpu()->arch.profile.mpu.locked, mpid); /** - * We are assuming all initial regions have all hyp perms. - * This might change in the future. + * We are assuming all initial regions have all hyp perms. This might change in the + * future. */ cpu()->arch.profile.mpu.perms[mpid].el1 = PERM_NONE; cpu()->arch.profile.mpu.perms[mpid].el2 = PERM_RWX; - list_insert_ordered(&cpu()->arch.profile.mpu.order.list, - (node_t*)&cpu()->arch.profile.mpu.order.node[mpid], - mpu_node_cmp); + list_insert_ordered(&cpu()->arch.profile.mpu.order.list, + (node_t*)&cpu()->arch.profile.mpu.order.node[mpid], mpu_node_cmp); } - } } diff --git a/src/arch/armv8/armv8-r/psci.c b/src/arch/armv8/armv8-r/psci.c index e8658e98e..6e13691bf 100644 --- a/src/arch/armv8/armv8-r/psci.c +++ b/src/arch/armv8/armv8-r/psci.c @@ -6,18 +6,19 @@ #include /** - * In Armv8-R systems there is no standard firmware readily available that - * implements PSCI for each platform. Therefore, we provide a minimal - * implementation of the necessary PSCI functions. - * Note this might change in the future, or we might decide to implement - * PSCI in Bao itself for each platform. + * In Armv8-R systems there is no standard firmware readily available that implements PSCI for each + * platform. Therefore, we provide a minimal implementation of the necessary PSCI functions. Note + * this might change in the future, or we might decide to implement PSCI in Bao itself for each + * platform. */ -int32_t psci_standby() { - asm volatile ("wfi"); +int32_t psci_standby() +{ + asm volatile("wfi"); return PSCI_E_SUCCESS; } -int32_t psci_power_down(enum wakeup_reason reason) { +int32_t psci_power_down(enum wakeup_reason reason) +{ return psci_standby(); } diff --git a/src/arch/armv8/armv8-r/vm.c b/src/arch/armv8/armv8-r/vm.c index a4bb26258..fbbfed186 100644 --- a/src/arch/armv8/armv8-r/vm.c +++ b/src/arch/armv8/armv8-r/vm.c @@ -7,8 +7,9 @@ #include #include -void vcpu_arch_profile_init(struct vcpu* vcpu, struct vm* vm) { - sysreg_vsctlr_el2_write(((vm->id << VSCTLR_EL2_VMID_OFF) & VSCTLR_EL2_VMID_MSK)); +void vcpu_arch_profile_init(struct vcpu* vcpu, struct vm* vm) +{ + sysreg_vsctlr_el2_write((vm->id << VSCTLR_EL2_VMID_OFF) & VSCTLR_EL2_VMID_MSK); if (DEFINED(MEM_PROT_MPU) && DEFINED(AARCH64) && vm->config->platform.mmu) { uint64_t vtcr = VTCR_MSA; diff --git a/src/arch/armv8/armv8-r/vmm.c b/src/arch/armv8/armv8-r/vmm.c index 6d4f4718e..17498f958 100644 --- a/src/arch/armv8/armv8-r/vmm.c +++ b/src/arch/armv8/armv8-r/vmm.c @@ -15,15 +15,13 @@ static uint32_t timer_freq = 0; void vmm_arch_profile_init() { if (cpu()->id == CPU_MASTER) { - /** - * Since there is no firmware in cortex-r platforms, we need to - * initialize the system counter. + /** + * Since there is no firmware in cortex-r platforms, we need to initialize the system + * counter. */ - volatile struct generic_timer_cntctrl *timer_ctl; - timer_ctl = (struct generic_timer_cntctrl* ) mem_alloc_map_dev( - &cpu()->as, SEC_HYP_PRIVATE, - platform.arch.generic_timer.base_addr, - platform.arch.generic_timer.base_addr, + volatile struct generic_timer_cntctrl* timer_ctl; + timer_ctl = (struct generic_timer_cntctrl*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_PRIVATE, + platform.arch.generic_timer.base_addr, platform.arch.generic_timer.base_addr, sizeof(struct generic_timer_cntctrl)); timer_ctl->CNTCR |= GENERIC_TIMER_CNTCTL_CNTCR_EN; @@ -31,8 +29,7 @@ void vmm_arch_profile_init() timer_freq = timer_ctl->CNTDIF0; - mem_unmap(&cpu()->as, (vaddr_t) timer_ctl, - sizeof(struct generic_timer_cntctrl), false); + mem_unmap(&cpu()->as, (vaddr_t)timer_ctl, sizeof(struct generic_timer_cntctrl), false); } cpu_sync_barrier(&cpu_glb_sync); diff --git a/src/arch/armv8/asm_defs.c b/src/arch/armv8/asm_defs.c index d24e4fae9..402f10930 100644 --- a/src/arch/armv8/asm_defs.c +++ b/src/arch/armv8/asm_defs.c @@ -34,5 +34,5 @@ void platform_defines() DEFINE_OFFSET(PLAT_ARCH_OFF, struct platform, arch); DEFINE_OFFSET(PLAT_ARCH_CLUSTERS_OFF, struct arch_platform, clusters); DEFINE_OFFSET(PLAT_CLUSTERS_CORES_NUM_OFF, struct clusters, core_num); - DEFINE_SIZE(PLAT_CLUSTERS_CORES_NUM_SIZE, ((struct clusters*)NULL)->core_num[0]); + DEFINE_SIZE(PLAT_CLUSTERS_CORES_NUM_SIZE, ((struct clusters*)NULL)->core_num[0]); } diff --git a/src/arch/armv8/cache.c b/src/arch/armv8/cache.c index dee9ed75e..82bb8b2b4 100644 --- a/src/arch/armv8/cache.c +++ b/src/arch/armv8/cache.c @@ -9,12 +9,12 @@ #include #include -void cache_arch_enumerate(struct cache *dscrp) +void cache_arch_enumerate(struct cache* dscrp) { - if(platform.cache.lvls != 0 ) { + if (platform.cache.lvls != 0) { /** - * No need to probe cache registers, cache topology is described - * in the platform descrption. + * No need to probe cache registers, cache topology is described in the platform + * descrption. */ *dscrp = platform.cache; } @@ -26,75 +26,73 @@ void cache_arch_enumerate(struct cache *dscrp) dscrp->lvls = 0; clidr = sysreg_clidr_el1_read(); - for(size_t i = 0; i < CLIDR_CTYPE_NUM; i++){ - if((temp = bit64_extract(clidr, i*CLIDR_CTYPE_LEN, CLIDR_CTYPE_LEN)) != 0){ + for (size_t i = 0; i < CLIDR_CTYPE_NUM; i++) { + if ((temp = bit64_extract(clidr, i * CLIDR_CTYPE_LEN, CLIDR_CTYPE_LEN)) != 0) { dscrp->lvls++; - switch(temp){ + switch (temp) { case CLIDR_CTYPE_IO: - dscrp->type[i] = INSTRUCTION; - break; + dscrp->type[i] = INSTRUCTION; + break; case CLIDR_CTYPE_DO: - dscrp->type[i] = DATA; - break; + dscrp->type[i] = DATA; + break; case CLIDR_CTYPE_SP: - dscrp->type[i] = SEPARATE; - break; + dscrp->type[i] = SEPARATE; + break; case CLIDR_CTYPE_UN: - dscrp->type[i] = UNIFIED; - break; + dscrp->type[i] = UNIFIED; + break; } } else { break; } } - for(size_t lvl = 0; lvl < dscrp->lvls; lvl++){ - + for (size_t lvl = 0; lvl < dscrp->lvls; lvl++) { uint64_t csselr = 0; uint64_t ccsidr = 0; uint64_t ctr = 0; csselr = bit64_insert(csselr, lvl, CSSELR_LVL_OFF, CSSELR_LVL_LEN); - if(dscrp->type[lvl] == UNIFIED && first_unified == false){ + if (dscrp->type[lvl] == UNIFIED && first_unified == false) { first_unified = true; dscrp->min_shared_lvl = lvl; } - if(dscrp->type[lvl] != INSTRUCTION){ + if (dscrp->type[lvl] != INSTRUCTION) { csselr = bit64_clear(csselr, CSSELR_IND_BIT); sysreg_csselr_el1_write(csselr); ccsidr = sysreg_ccsidr_el1_read(); - dscrp->line_size[lvl][0] = 1UL << (bit64_extract(ccsidr, - CCSIDR_LINESIZE_OFF, CCSIDR_LINESIZE_LEN) + 4); - dscrp->assoc[lvl][0] = bit64_extract(ccsidr, CCSIDR_ASSOCIATIVITY_OFF, - CCSIDR_ASSOCIATIVITY_LEN) + 1; - dscrp->numset[lvl][0] = bit64_extract(ccsidr, CCSIDR_NUMSETS_OFF, - CCSIDR_NUMSETS_LEN) + 1; + dscrp->line_size[lvl][0] = 1UL + << (bit64_extract(ccsidr, CCSIDR_LINESIZE_OFF, CCSIDR_LINESIZE_LEN) + 4); + dscrp->assoc[lvl][0] = + bit64_extract(ccsidr, CCSIDR_ASSOCIATIVITY_OFF, CCSIDR_ASSOCIATIVITY_LEN) + 1; + dscrp->numset[lvl][0] = + bit64_extract(ccsidr, CCSIDR_NUMSETS_OFF, CCSIDR_NUMSETS_LEN) + 1; dscrp->indexed[lvl][0] = PIPT; } - - if(dscrp->type[lvl] == SEPARATE || dscrp->type[lvl] == INSTRUCTION){ + + if (dscrp->type[lvl] == SEPARATE || dscrp->type[lvl] == INSTRUCTION) { csselr = bit64_set(csselr, CSSELR_IND_BIT); sysreg_csselr_el1_write(csselr); ccsidr = sysreg_ccsidr_el1_read(); - dscrp->line_size[lvl][1] = 1UL << (bit64_extract(ccsidr, - CCSIDR_LINESIZE_OFF, CCSIDR_LINESIZE_LEN) + 4); - dscrp->assoc[lvl][1] = bit64_extract(ccsidr, CCSIDR_ASSOCIATIVITY_OFF, - CCSIDR_ASSOCIATIVITY_LEN) + 1; - dscrp->numset[lvl][1] = bit64_extract(ccsidr, CCSIDR_NUMSETS_OFF, - CCSIDR_NUMSETS_LEN) + 1; + dscrp->line_size[lvl][1] = 1UL + << (bit64_extract(ccsidr, CCSIDR_LINESIZE_OFF, CCSIDR_LINESIZE_LEN) + 4); + dscrp->assoc[lvl][1] = + bit64_extract(ccsidr, CCSIDR_ASSOCIATIVITY_OFF, CCSIDR_ASSOCIATIVITY_LEN) + 1; + dscrp->numset[lvl][1] = + bit64_extract(ccsidr, CCSIDR_NUMSETS_OFF, CCSIDR_NUMSETS_LEN) + 1; ctr = sysreg_ctr_el0_read(); - if((ctr & BIT64_MASK(CTR_L1LP_OFF, CTR_L1LP_LEN)) == CTR_L1LP_PIPT){ + if ((ctr & BIT64_MASK(CTR_L1LP_OFF, CTR_L1LP_LEN)) == CTR_L1LP_PIPT) { dscrp->indexed[lvl][1] = PIPT; } else { dscrp->indexed[lvl][1] = VIPT; } } - } } @@ -102,10 +100,9 @@ void cache_flush_range(vaddr_t base, size_t size) { vaddr_t cache_addr = base; uint64_t ctr = sysreg_ctr_el0_read(); - size_t min_line_size = 1UL << bit64_extract(ctr, CTR_DMINLINE_OFF, - CTR_DMINLINE_LEN); + size_t min_line_size = 1UL << bit64_extract(ctr, CTR_DMINLINE_OFF, CTR_DMINLINE_LEN); - while(cache_addr < (base + size)){ + while (cache_addr < (base + size)) { arm_dc_civac(cache_addr); cache_addr += min_line_size; } diff --git a/src/arch/armv8/cpu.c b/src/arch/armv8/cpu.c index 701aa4530..5dc834002 100644 --- a/src/arch/armv8/cpu.c +++ b/src/arch/armv8/cpu.c @@ -12,7 +12,7 @@ cpuid_t CPU_MASTER __attribute__((section(".data"))); /* Perform architecture dependent cpu cores initializations */ void cpu_arch_init(cpuid_t cpuid, paddr_t load_addr) -{ +{ cpu()->arch.mpidr = sysreg_mpidr_el1_read(); cpu_arch_profile_init(cpuid, load_addr); } @@ -27,14 +27,12 @@ void cpu_arch_idle() cpu_arch_profile_idle(); /* - * In case the profile implementation does not jump to a predefined wake-up - * point and just returns from the profile, manually rewind stack and jump - * to idle wake up. Therefore, we should not return after this point. + * In case the profile implementation does not jump to a predefined wake-up point and just + * returns from the profile, manually rewind stack and jump to idle wake up. Therefore, we + * should not return after this point. */ - asm volatile( - "mov sp, %0\n\r" - "b cpu_idle_wakeup\n\r" - ::"r"(&cpu()->stack[STACK_SIZE])); + asm volatile("mov sp, %0\n\r" + "b cpu_idle_wakeup\n\r" ::"r"(&cpu()->stack[STACK_SIZE])); ERROR("returned from idle wake up"); } diff --git a/src/arch/armv8/gic.c b/src/arch/armv8/gic.c index f2027a6af..e4217e137 100644 --- a/src/arch/armv8/gic.c +++ b/src/arch/armv8/gic.c @@ -9,18 +9,17 @@ #include #elif (GIC_VERSION == GICV3) #include -#else +#else #error "unknown GIV version " GIC_VERSION #endif - #include #include #include #include #include -volatile struct gicd_hw *gicd; +volatile struct gicd_hw* gicd; spinlock_t gicd_lock; void gicd_init() @@ -30,8 +29,7 @@ void gicd_init() /* Bring distributor to known state */ for (size_t i = GIC_NUM_PRIVINT_REGS; i < GIC_NUM_INT_REGS(int_num); i++) { /** - * Make sure all interrupts are not enabled, non pending, - * non active. + * Make sure all interrupts are not enabled, non pending, non active. */ gicd->IGROUPR[i] = -1; gicd->ICENABLER[i] = -1; @@ -40,15 +38,13 @@ void gicd_init() } /* All interrupts have lowest priority possible by default */ - for (size_t i = GIC_NUM_PRIO_REGS(GIC_CPU_PRIV); - i < GIC_NUM_PRIO_REGS(int_num); i++) { + for (size_t i = GIC_NUM_PRIO_REGS(GIC_CPU_PRIV); i < GIC_NUM_PRIO_REGS(int_num); i++) { gicd->IPRIORITYR[i] = -1; } if (GIC_VERSION == GICV2) { /* No CPU targets for any interrupt by default */ - for (size_t i = GIC_NUM_TARGET_REGS(GIC_CPU_PRIV); - i < GIC_NUM_TARGET_REGS(int_num); i++) { + for (size_t i = GIC_NUM_TARGET_REGS(GIC_CPU_PRIV); i < GIC_NUM_TARGET_REGS(int_num); i++) { gicd->ITARGETSR[i] = 0; } @@ -68,8 +64,7 @@ void gicd_init() /* No need to setup gicd->NSACR as all interrupts are setup to group 1 */ - if(!interrupts_reserve(platform.arch.gic.maintenance_id, - gic_maintenance_handler)) { + if (!interrupts_reserve(platform.arch.gic.maintenance_id, gic_maintenance_handler)) { ERROR("Failed to reserve GIC maintenance interrupt"); } } @@ -102,7 +97,9 @@ void gic_handle() if (id < GIC_FIRST_SPECIAL_INTID) { enum irq_res res = interrupts_handle(id); gicc_eoir(ack); - if (res == HANDLED_BY_HYP) gicc_dir(ack); + if (res == HANDLED_BY_HYP) { + gicc_dir(ack); + } } } @@ -111,8 +108,7 @@ uint8_t gicd_get_prio(irqid_t int_id) size_t reg_ind = GIC_PRIO_REG(int_id); size_t off = GIC_PRIO_OFF(int_id); - uint8_t prio = - gicd->IPRIORITYR[reg_ind] >> off & BIT32_MASK(off, GIC_PRIO_BITS); + uint8_t prio = gicd->IPRIORITYR[reg_ind] >> off & BIT32_MASK(off, GIC_PRIO_BITS); return prio; } @@ -138,8 +134,7 @@ void gicd_set_prio(irqid_t int_id, uint8_t prio) spin_lock(&gicd_lock); - gicd->IPRIORITYR[reg_ind] = - (gicd->IPRIORITYR[reg_ind] & ~mask) | ((prio << off) & mask); + gicd->IPRIORITYR[reg_ind] = (gicd->IPRIORITYR[reg_ind] & ~mask) | ((prio << off) & mask); spin_unlock(&gicd_lock); } diff --git a/src/arch/armv8/gicv2.c b/src/arch/armv8/gicv2.c index 4d93ce7e9..e5aa98ecd 100644 --- a/src/arch/armv8/gicv2.c +++ b/src/arch/armv8/gicv2.c @@ -13,11 +13,11 @@ #include #include -extern volatile struct gicd_hw *gicd; +extern volatile struct gicd_hw* gicd; extern spinlock_t gicd_lock; -volatile struct gicc_hw *gicc; -volatile struct gich_hw *gich; +volatile struct gicc_hw* gicc; +volatile struct gich_hw* gich; static cpuid_t gic_cpu_map[GIC_MAX_TARGETS]; @@ -38,17 +38,17 @@ static inline void gicc_init() gicc->CTLR |= GICC_CTLR_EN_BIT | GICC_CTLR_EOImodeNS_BIT; gich->HCR |= GICH_HCR_LRENPIE_BIT; - + uint32_t sgi_targets = gicd->ITARGETSR[0] & BIT32_MASK(0, GIC_TARGET_BITS); ssize_t gic_cpu_id = bit32_ffs(sgi_targets); - if(gic_cpu_id < 0) { + if (gic_cpu_id < 0) { ERROR("cant find gic cpu id"); } gic_cpu_map[cpu()->id] = (cpuid_t)gic_cpu_id; } -void gicc_save_state(struct gicc_state *state) +void gicc_save_state(struct gicc_state* state) { state->CTLR = gicc->CTLR; state->PMR = gicc->PMR; @@ -69,7 +69,7 @@ void gicc_save_state(struct gicc_state *state) } } -void gicc_restore_state(struct gicc_state *state) +void gicc_restore_state(struct gicc_state* state) { gicc->CTLR = state->CTLR; gicc->PMR = state->PMR; @@ -94,8 +94,7 @@ void gic_cpu_init() { for (size_t i = 0; i < GIC_NUM_INT_REGS(GIC_CPU_PRIV); i++) { /** - * Make sure all private interrupts are not enabled, non pending, - * non active. + * Make sure all private interrupts are not enabled, non pending, non active. */ gicd->ICENABLER[i] = -1; gicd->ICPENDR[i] = -1; @@ -116,27 +115,27 @@ void gic_cpu_init() void gic_map_mmio() { - gicc = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + gicc = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.arch.gic.gicc_addr, NUM_PAGES(sizeof(struct gicc_hw))); - gich = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + gich = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.arch.gic.gich_addr, NUM_PAGES(sizeof(struct gich_hw))); - gicd = (void*) mem_alloc_map_dev(&cpu()->as,SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.gic.gicd_addr, NUM_PAGES(sizeof(struct gicd_hw))); + gicd = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.gic.gicd_addr, NUM_PAGES(sizeof(struct gicd_hw))); } void gic_send_sgi(cpuid_t cpu_target, irqid_t sgi_num) { if (sgi_num < GIC_MAX_SGIS && cpu_target < GIC_MAX_TARGETS) { - gicd->SGIR = - (1UL << (GICD_SGIR_CPUTRGLST_OFF + gic_cpu_map[cpu_target])) | + gicd->SGIR = (1UL << (GICD_SGIR_CPUTRGLST_OFF + gic_cpu_map[cpu_target])) | (sgi_num & GICD_SGIR_SGIINTID_MSK); } } -static inline uint8_t gic_translate_cpu_to_trgt(uint8_t cpu_targets) { +static inline uint8_t gic_translate_cpu_to_trgt(uint8_t cpu_targets) +{ uint8_t gic_targets = 0; - for(size_t i = 0; i < GIC_MAX_TARGETS; i++) { - if((1 << i) & cpu_targets) { + for (size_t i = 0; i < GIC_MAX_TARGETS; i++) { + if ((1 << i) & cpu_targets) { gic_targets |= (1 << gic_cpu_map[i]); } } @@ -151,7 +150,7 @@ void gicd_set_trgt(irqid_t int_id, uint8_t cpu_targets) spin_lock(&gicd_lock); - gicd->ITARGETSR[reg_ind] = (gicd->ITARGETSR[reg_ind] & ~mask) | + gicd->ITARGETSR[reg_ind] = (gicd->ITARGETSR[reg_ind] & ~mask) | ((gic_translate_cpu_to_trgt(cpu_targets) << off) & mask); spin_unlock(&gicd_lock); diff --git a/src/arch/armv8/gicv3.c b/src/arch/armv8/gicv3.c index 002fc1de1..dba0dba87 100644 --- a/src/arch/armv8/gicv3.c +++ b/src/arch/armv8/gicv3.c @@ -12,8 +12,8 @@ #include #include -extern volatile struct gicd_hw *gicd; -volatile struct gicr_hw *gicr; +extern volatile struct gicd_hw* gicd; +volatile struct gicr_hw* gicr; static spinlock_t gicd_lock = SPINLOCK_INITVAL; static spinlock_t gicr_lock = SPINLOCK_INITVAL; @@ -41,7 +41,7 @@ static inline void gicc_init() static inline void gicr_init() { gicr[cpu()->id].WAKER &= ~GICR_WAKER_ProcessorSleep_BIT; - while(gicr[cpu()->id].WAKER & GICR_WAKER_ChildrenASleep_BIT); + while (gicr[cpu()->id].WAKER & GICR_WAKER_ChildrenASleep_BIT) { } gicr[cpu()->id].IGROUPR0 = -1; gicr[cpu()->id].ICENABLER0 = -1; @@ -53,7 +53,7 @@ static inline void gicr_init() } } -void gicc_save_state(struct gicc_state *state) +void gicc_save_state(struct gicc_state* state) { state->PMR = sysreg_icc_pmr_el1_read(); state->BPR = sysreg_icc_bpr1_el1_read(); @@ -69,7 +69,7 @@ void gicc_save_state(struct gicc_state *state) } } -void gicc_restore_state(struct gicc_state *state) +void gicc_restore_state(struct gicc_state* state) { sysreg_icc_sre_el2_write(ICC_SRE_SRE_BIT); sysreg_icc_ctlr_el1_write(ICC_CTLR_EOIMode_BIT); @@ -96,11 +96,10 @@ void gic_cpu_init() void gic_map_mmio() { - gicd = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + gicd = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.arch.gic.gicd_addr, NUM_PAGES(sizeof(struct gicd_hw))); - gicr = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.gic.gicr_addr, - NUM_PAGES(sizeof(struct gicr_hw) * PLAT_CPU_NUM)); + gicr = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.gic.gicr_addr, NUM_PAGES(sizeof(struct gicr_hw) * PLAT_CPU_NUM)); } void gicr_set_prio(irqid_t int_id, uint8_t prio, cpuid_t gicr_id) @@ -124,8 +123,7 @@ uint8_t gicr_get_prio(irqid_t int_id, cpuid_t gicr_id) spin_lock(&gicr_lock); - uint8_t prio = - gicr[gicr_id].IPRIORITYR[reg_ind] >> off & BIT32_MASK(off, GIC_PRIO_BITS); + uint8_t prio = gicr[gicr_id].IPRIORITYR[reg_ind] >> off & BIT32_MASK(off, GIC_PRIO_BITS); spin_unlock(&gicr_lock); @@ -141,11 +139,9 @@ void gicr_set_icfgr(irqid_t int_id, uint8_t cfg, cpuid_t gicr_id) spin_lock(&gicr_lock); if (reg_ind == 0) { - gicr[gicr_id].ICFGR0 = - (gicr[gicr_id].ICFGR0 & ~mask) | ((cfg << off) & mask); + gicr[gicr_id].ICFGR0 = (gicr[gicr_id].ICFGR0 & ~mask) | ((cfg << off) & mask); } else { - gicr[gicr_id].ICFGR1 = - (gicr[gicr_id].ICFGR1 & ~mask) | ((cfg << off) & mask); + gicr[gicr_id].ICFGR1 = (gicr[gicr_id].ICFGR1 & ~mask) | ((cfg << off) & mask); } spin_unlock(&gicr_lock); @@ -198,16 +194,19 @@ void gicr_set_enable(irqid_t int_id, bool en, cpuid_t gicr_id) uint32_t bit = GIC_INT_MASK(int_id); spin_lock(&gicr_lock); - if (en) + if (en) { gicr[gicr_id].ISENABLER0 = bit; - else + } else { gicr[gicr_id].ICENABLER0 = bit; + } spin_unlock(&gicr_lock); } void gicd_set_route(irqid_t int_id, unsigned long route) { - if (gic_is_priv(int_id)) return; + if (gic_is_priv(int_id)) { + return; + } spin_lock(&gicd_lock); @@ -222,8 +221,7 @@ void gic_send_sgi(cpuid_t cpu_target, irqid_t sgi_num) unsigned long mpidr = cpu_id_to_mpidr(cpu_target) & MPIDR_AFF_MSK; /* We only support two affinity levels */ uint64_t sgi = (MPIDR_AFF_LVL(mpidr, 1) << ICC_SGIR_AFF1_OFFSET) | - (1UL << MPIDR_AFF_LVL(mpidr, 0)) | - (sgi_num << ICC_SGIR_SGIINTID_OFF); + (1UL << MPIDR_AFF_LVL(mpidr, 0)) | (sgi_num << ICC_SGIR_SGIINTID_OFF); sysreg_icc_sgi1r_el1_write(sgi); } } diff --git a/src/arch/armv8/inc/arch/fences.h b/src/arch/armv8/inc/arch/fences.h index 3c2298e94..b4281e44f 100644 --- a/src/arch/armv8/inc/arch/fences.h +++ b/src/arch/armv8/inc/arch/fences.h @@ -12,7 +12,7 @@ #define DSB(shdmn) asm volatile("dsb " XSTR(shdmn) "\n\t" ::: "memory") -#define ISB() asm volatile("isb\n\t" ::: "memory") +#define ISB() asm volatile("isb\n\t" ::: "memory") static inline void fence_ord_write() { diff --git a/src/arch/armv8/inc/arch/generic_timer.h b/src/arch/armv8/inc/arch/generic_timer.h index 57c47207b..ab78116ea 100644 --- a/src/arch/armv8/inc/arch/generic_timer.h +++ b/src/arch/armv8/inc/arch/generic_timer.h @@ -8,16 +8,16 @@ #include -#define GENERIC_TIMER_CNTCTL_CNTCR_EN (0x1) +#define GENERIC_TIMER_CNTCTL_CNTCR_EN (0x1) -struct generic_timer_cntctrl { +struct generic_timer_cntctrl { uint32_t CNTCR; uint32_t CNTSR; uint64_t CNTCV; - uint8_t res0[0x20-0x10]; + uint8_t res0[0x20 - 0x10]; uint32_t CNTDIF0; uint32_t CNTDIF[0]; - uint8_t res1[0xfd0-0x24]; + uint8_t res1[0xfd0 - 0x24]; uint32_t CounterID[12]; } __attribute__((packed, aligned(PAGE_SIZE))); diff --git a/src/arch/armv8/inc/arch/gic.h b/src/arch/armv8/inc/arch/gic.h index 3105723e3..a4c2ce83e 100644 --- a/src/arch/armv8/inc/arch/gic.h +++ b/src/arch/armv8/inc/arch/gic.h @@ -12,85 +12,82 @@ #include #include -#define GICV2 (2) -#define GICV3 (3) - -#define GIC_FIRST_SPECIAL_INTID (1020) -#define GIC_MAX_INTERUPTS 1024 -#define GIC_MAX_VALID_INTERRUPTS (GIC_FIRST_SPECIAL_INTID) -#define GIC_MAX_SGIS 16 -#define GIC_MAX_PPIS 16 -#define GIC_CPU_PRIV (GIC_MAX_SGIS + GIC_MAX_PPIS) -#define GIC_MAX_SPIS (GIC_MAX_INTERUPTS - GIC_CPU_PRIV) -#define GIC_PRIO_BITS 8 -#define GIC_TARGET_BITS 8 -#define GIC_MAX_TARGETS GIC_TARGET_BITS -#define GIC_CONFIG_BITS 2 -#define GIC_SEC_BITS 2 -#define GIC_SGI_BITS 8 -#define GICD_IROUTER_INV (~MPIDR_AFF_MSK) -#define GIC_LOWEST_PRIO (0xff) - -#define GIC_INT_REG(NINT) (NINT / (sizeof(uint32_t) * 8)) -#define GIC_INT_MASK(NINT) (1U << NINT % (sizeof(uint32_t) * 8)) -#define GIC_NUM_INT_REGS(NINT) GIC_INT_REG(NINT) -#define GIC_NUM_PRIVINT_REGS (GIC_CPU_PRIV / (sizeof(uint32_t) * 8)) - -#define GIC_PRIO_REG(NINT) ((NINT * GIC_PRIO_BITS) / (sizeof(uint32_t) * 8)) -#define GIC_NUM_PRIO_REGS(NINT) GIC_PRIO_REG(NINT) -#define GIC_PRIO_OFF(NINT) (NINT * GIC_PRIO_BITS) % (sizeof(uint32_t) * 8) - -#define GIC_TARGET_REG(NINT) ((NINT * GIC_TARGET_BITS) / (sizeof(uint32_t) * 8)) +#define GICV2 (2) +#define GICV3 (3) + +#define GIC_FIRST_SPECIAL_INTID (1020) +#define GIC_MAX_INTERUPTS 1024 +#define GIC_MAX_VALID_INTERRUPTS (GIC_FIRST_SPECIAL_INTID) +#define GIC_MAX_SGIS 16 +#define GIC_MAX_PPIS 16 +#define GIC_CPU_PRIV (GIC_MAX_SGIS + GIC_MAX_PPIS) +#define GIC_MAX_SPIS (GIC_MAX_INTERUPTS - GIC_CPU_PRIV) +#define GIC_PRIO_BITS 8 +#define GIC_TARGET_BITS 8 +#define GIC_MAX_TARGETS GIC_TARGET_BITS +#define GIC_CONFIG_BITS 2 +#define GIC_SEC_BITS 2 +#define GIC_SGI_BITS 8 +#define GICD_IROUTER_INV (~MPIDR_AFF_MSK) +#define GIC_LOWEST_PRIO (0xff) + +#define GIC_INT_REG(NINT) (NINT / (sizeof(uint32_t) * 8)) +#define GIC_INT_MASK(NINT) (1U << NINT % (sizeof(uint32_t) * 8)) +#define GIC_NUM_INT_REGS(NINT) GIC_INT_REG(NINT) +#define GIC_NUM_PRIVINT_REGS (GIC_CPU_PRIV / (sizeof(uint32_t) * 8)) + +#define GIC_PRIO_REG(NINT) ((NINT * GIC_PRIO_BITS) / (sizeof(uint32_t) * 8)) +#define GIC_NUM_PRIO_REGS(NINT) GIC_PRIO_REG(NINT) +#define GIC_PRIO_OFF(NINT) (NINT * GIC_PRIO_BITS) % (sizeof(uint32_t) * 8) + +#define GIC_TARGET_REG(NINT) ((NINT * GIC_TARGET_BITS) / (sizeof(uint32_t) * 8)) #define GIC_NUM_TARGET_REGS(NINT) GIC_TARGET_REG(NINT) -#define GIC_TARGET_OFF(NINT) (NINT * GIC_TARGET_BITS) % (sizeof(uint32_t) * 8) +#define GIC_TARGET_OFF(NINT) (NINT * GIC_TARGET_BITS) % (sizeof(uint32_t) * 8) -#define GIC_CONFIG_REG(NINT) ((NINT * GIC_CONFIG_BITS) / (sizeof(uint32_t) * 8)) +#define GIC_CONFIG_REG(NINT) ((NINT * GIC_CONFIG_BITS) / (sizeof(uint32_t) * 8)) #define GIC_NUM_CONFIG_REGS(NINT) GIC_CONFIG_REG(NINT) -#define GIC_CONFIG_OFF(NINT) (NINT * GIC_CONFIG_BITS) % (sizeof(uint32_t) * 8) +#define GIC_CONFIG_OFF(NINT) (NINT * GIC_CONFIG_BITS) % (sizeof(uint32_t) * 8) -#define GIC_NUM_SEC_REGS(NINT) ((NINT * GIC_SEC_BITS) / (sizeof(uint32_t) * 8)) +#define GIC_NUM_SEC_REGS(NINT) ((NINT * GIC_SEC_BITS) / (sizeof(uint32_t) * 8)) -#define GIC_NUM_SGI_REGS \ - ((GIC_MAX_SGIS * GIC_SGI_BITS) / (sizeof(uint32_t) * 8)) -#define GICD_SGI_REG(NINT) (NINT / 4) -#define GICD_SGI_OFF(NINT) ((NINT % 4) * 8) +#define GIC_NUM_SGI_REGS ((GIC_MAX_SGIS * GIC_SGI_BITS) / (sizeof(uint32_t) * 8)) +#define GICD_SGI_REG(NINT) (NINT / 4) +#define GICD_SGI_OFF(NINT) ((NINT % 4) * 8) -#define GIC_NUM_APR_REGS ((1UL << (GIC_PRIO_BITS - 1)) / (sizeof(uint32_t) * 8)) -#define GIC_NUM_LIST_REGS (64) +#define GIC_NUM_APR_REGS ((1UL << (GIC_PRIO_BITS - 1)) / (sizeof(uint32_t) * 8)) +#define GIC_NUM_LIST_REGS (64) /* Distributor Control Register, GICD_CTLR */ -#define GICD_CTLR_EN_BIT (0x1) -#define GICD_CTLR_ENA_BIT (0x2) -#define GICD_CTLR_ARE_NS_BIT (0x10) +#define GICD_CTLR_EN_BIT (0x1) +#define GICD_CTLR_ENA_BIT (0x2) +#define GICD_CTLR_ARE_NS_BIT (0x10) /* Interrupt Controller Type Register, GICD_TYPER */ -#define GICD_TYPER_ITLINENUM_OFF (0) -#define GICD_TYPER_ITLINENUM_LEN (5) -#define GICD_TYPER_CPUNUM_OFF (5) -#define GICD_TYPER_CPUNUM_LEN (3) -#define GICD_TYPER_CPUNUM_MSK BIT32_MASK(GICD_TYPER_CPUNUM_OFF, GICD_TYPER_CPUNUM_LEN) -#define GICD_TYPER_SECUREXT_BIT (1UL << 10) -#define GICD_TYPER_LSPI_OFF (11) -#define GICD_TYPER_LSPI_LEN (6) -#define GICD_TYPER_ITLN_OFF 0 -#define GICD_TYPER_ITLN_LEN 5 -#define GICD_TYPER_ITLN_MSK BIT32_MASK(GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN) -#define GICD_TYPER_IDBITS_OFF (19) -#define GICD_TYPER_IDBITS_LEN (5) -#define GICD_TYPER_IDBITS_MSK BIT32_MASK(GICD_TYPER_IDBITS_OFF, GICD_TYPER_IDBITS_LEN) +#define GICD_TYPER_ITLINENUM_OFF (0) +#define GICD_TYPER_ITLINENUM_LEN (5) +#define GICD_TYPER_CPUNUM_OFF (5) +#define GICD_TYPER_CPUNUM_LEN (3) +#define GICD_TYPER_CPUNUM_MSK BIT32_MASK(GICD_TYPER_CPUNUM_OFF, GICD_TYPER_CPUNUM_LEN) +#define GICD_TYPER_SECUREXT_BIT (1UL << 10) +#define GICD_TYPER_LSPI_OFF (11) +#define GICD_TYPER_LSPI_LEN (6) +#define GICD_TYPER_ITLN_OFF 0 +#define GICD_TYPER_ITLN_LEN 5 +#define GICD_TYPER_ITLN_MSK BIT32_MASK(GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN) +#define GICD_TYPER_IDBITS_OFF (19) +#define GICD_TYPER_IDBITS_LEN (5) +#define GICD_TYPER_IDBITS_MSK BIT32_MASK(GICD_TYPER_IDBITS_OFF, GICD_TYPER_IDBITS_LEN) /* Software Generated Interrupt Register, GICD_SGIR */ -#define GICD_SGIR_SGIINTID_OFF 0 -#define GICD_SGIR_SGIINTID_LEN 4 -#define GICD_SGIR_SGIINTID_MSK \ - (BIT32_MASK(GICD_SGIR_SGIINTID_OFF, GICD_SGIR_SGIINTID_LEN)) -#define GICD_SGIR_SGIINTID(sgir) \ - bit32_extract(sgir, GICD_SGIR_SGIINTID_OFF, GICD_SGIR_SGIINTID_LEN) -#define GICD_SGIR_CPUTRGLST_OFF 16 -#define GICD_SGIR_CPUTRGLST_LEN 8 +#define GICD_SGIR_SGIINTID_OFF 0 +#define GICD_SGIR_SGIINTID_LEN 4 +#define GICD_SGIR_SGIINTID_MSK (BIT32_MASK(GICD_SGIR_SGIINTID_OFF, GICD_SGIR_SGIINTID_LEN)) +#define GICD_SGIR_SGIINTID(sgir) bit32_extract(sgir, GICD_SGIR_SGIINTID_OFF, GICD_SGIR_SGIINTID_LEN) +#define GICD_SGIR_CPUTRGLST_OFF 16 +#define GICD_SGIR_CPUTRGLST_LEN 8 #define GICD_SGIR_CPUTRGLST(sgir) \ bit32_extract(sgir, GICD_SGIR_CPUTRGLST_OFF, GICD_SGIR_CPUTRGLST_LEN) #define GICD_SGIR_TRGLSTFLT_OFF 24 @@ -100,9 +97,9 @@ /* Interrupt Routing Registers, GICD_IROUTER */ -#define GICD_IROUTER_RES0_MSK ((1ULL << 40)-1) -#define GICD_IROUTER_IRM_BIT (1ULL << 31) -#define GICD_IROUTER_AFF_MSK (GICD_IROUTER_RES0_MSK & ~GICD_IROUTER_IRM_BIT) +#define GICD_IROUTER_RES0_MSK ((1ULL << 40) - 1) +#define GICD_IROUTER_IRM_BIT (1ULL << 31) +#define GICD_IROUTER_AFF_MSK (GICD_IROUTER_RES0_MSK & ~GICD_IROUTER_IRM_BIT) struct gicd_hw { uint32_t CTLR; @@ -119,7 +116,7 @@ struct gicd_hw { uint8_t pad4[0x0058 - 0x0054]; uint32_t CLRSPI_SR; uint8_t pad9[0x0080 - 0x005C]; - uint32_t IGROUPR[GIC_NUM_INT_REGS(GIC_MAX_INTERUPTS)]; // banked CPU + uint32_t IGROUPR[GIC_NUM_INT_REGS(GIC_MAX_INTERUPTS)]; // banked CPU uint32_t ISENABLER[GIC_NUM_INT_REGS(GIC_MAX_INTERUPTS)]; uint32_t ICENABLER[GIC_NUM_INT_REGS(GIC_MAX_INTERUPTS)]; uint32_t ISPENDR[GIC_NUM_INT_REGS(GIC_MAX_INTERUPTS)]; @@ -144,11 +141,11 @@ struct gicd_hw { /* Redistributor Wake Register, GICD_WAKER */ -#define GICR_CTRL_DS_BIT (1 << 6) -#define GICR_CTRL_DS_DPG1NS (1 << 25) -#define GICR_TYPER_LAST_OFF (4) -#define GICR_TYPER_PRCNUM_OFF (8) -#define GICR_TYPER_AFFVAL_OFF (32) +#define GICR_CTRL_DS_BIT (1 << 6) +#define GICR_CTRL_DS_DPG1NS (1 << 25) +#define GICR_TYPER_LAST_OFF (4) +#define GICR_TYPER_PRCNUM_OFF (8) +#define GICR_TYPER_AFFVAL_OFF (32) #define GICR_WAKER_ProcessorSleep_BIT (0x2) #define GICR_WAKER_ChildrenASleep_BIT (0x4) @@ -203,38 +200,36 @@ struct gicr_hw { /* CPU Interface Control Register, GICC_CTLR */ -#define GICC_CTLR_EN_BIT (0x1) +#define GICC_CTLR_EN_BIT (0x1) #define GICC_CTLR_EOImodeNS_BIT (1UL << 9) -#define GICC_CTLR_WR_MSK (0x1) -#define GICC_IAR_ID_OFF (0) +#define GICC_CTLR_WR_MSK (0x1) +#define GICC_IAR_ID_OFF (0) #if (GIC_VERSION == GICV2) -#define GICC_IAR_ID_LEN (10) +#define GICC_IAR_ID_LEN (10) #define GICC_IAR_CPU_OFF (10) #define GICC_IAR_CPU_LEN (3) #define GICC_IAR_CPU_MSK (BIT32_MASK(GICC_IAR_CPU_OFF, GICC_IAR_CPU_LEN)) -#else +#else #define GICC_IAR_ID_LEN (24) #endif -#define GICC_IAR_ID_MSK (BIT32_MASK(GICC_IAR_ID_OFF, GICC_IAR_ID_LEN)) - -#define ICC_CTLR_EOIMode_BIT (0x1ULL << 1) -#define ICC_SGIR_SGIINTID_OFF 24 -#define ICC_SGIR_SGIINTID_LEN 4 -#define ICC_SGIR_SGIINTID(sgir) \ - bit64_extract(sgir, ICC_SGIR_SGIINTID_OFF, ICC_SGIR_SGIINTID_LEN) -#define ICC_SGIR_IRM_BIT (1ull << 40) -#define ICC_SGIR_TRGLSTFLT_OFF 0 -#define ICC_SGIR_TRGLSTFLT_LEN 16 -#define ICC_SGIR_TRGLSTFLT_MSK BIT64_MASK(ICC_SGIR_TRGLSTFLT_OFF, ICC_SGIR_TRGLSTFLT_LEN) -#define ICC_SGIR_TRGLSTFLT(sgir) \ - bit64_extract(sgir, ICC_SGIR_TRGLSTFLT_OFF, ICC_SGIR_TRGLSTFLT_LEN) -#define ICC_SGIR_AFF1_OFFSET (16) - -#define ICC_SRE_ENB_BIT (0x8) -#define ICC_SRE_DIB_BIT (0x4) -#define ICC_SRE_DFB_BIT (0x2) -#define ICC_SRE_SRE_BIT (0x1) -#define ICC_IGRPEN_EL1_ENB_BIT (0x1) +#define GICC_IAR_ID_MSK (BIT32_MASK(GICC_IAR_ID_OFF, GICC_IAR_ID_LEN)) + +#define ICC_CTLR_EOIMode_BIT (0x1ULL << 1) +#define ICC_SGIR_SGIINTID_OFF 24 +#define ICC_SGIR_SGIINTID_LEN 4 +#define ICC_SGIR_SGIINTID(sgir) bit64_extract(sgir, ICC_SGIR_SGIINTID_OFF, ICC_SGIR_SGIINTID_LEN) +#define ICC_SGIR_IRM_BIT (1ull << 40) +#define ICC_SGIR_TRGLSTFLT_OFF 0 +#define ICC_SGIR_TRGLSTFLT_LEN 16 +#define ICC_SGIR_TRGLSTFLT_MSK BIT64_MASK(ICC_SGIR_TRGLSTFLT_OFF, ICC_SGIR_TRGLSTFLT_LEN) +#define ICC_SGIR_TRGLSTFLT(sgir) bit64_extract(sgir, ICC_SGIR_TRGLSTFLT_OFF, ICC_SGIR_TRGLSTFLT_LEN) +#define ICC_SGIR_AFF1_OFFSET (16) + +#define ICC_SRE_ENB_BIT (0x8) +#define ICC_SRE_DIB_BIT (0x4) +#define ICC_SRE_DFB_BIT (0x2) +#define ICC_SRE_SRE_BIT (0x1) +#define ICC_IGRPEN_EL1_ENB_BIT (0x1) struct gicc_hw { uint32_t CTLR; @@ -257,92 +252,88 @@ struct gicc_hw { uint32_t DIR; } __attribute__((__packed__, aligned(0x1000))); -#define GICH_HCR_En_BIT (1 << 0) -#define GICH_HCR_UIE_BIT (1 << 1) -#define GICH_HCR_LRENPIE_BIT (1 << 2) -#define GICH_HCR_NPIE_BIT (1 << 3) -#define GICH_HCR_VGrp0DIE_BIT (1 << 4) -#define GICH_HCR_VGrp0EIE_BIT (1 << 5) -#define GICH_HCR_VGrp1EIE_BIT (1 << 6) -#define GICH_HCR_VGrp1DIE_BIT (1 << 7) -#define GICH_HCR_EOICount_OFF (27) -#define GICH_HCR_EOICount_LEN (5) -#define GICH_HCR_EOICount_MASK \ - BIT32_MASK(GICH_HCR_EOICount_OFF, GICH_HCR_EOICount_LEN) - -#define ICH_HCR_VGrp1EIE_BIT (1ULL << 6) -#define ICH_HCR_LRENPIE_BIT GICH_HCR_LRENPIE_BIT - -#define GICH_VTR_OFF (0) -#define GICH_VTR_LEN (6) -#define GICH_VTR_MSK BIT32_MASK(GICH_VTR_OFF, GICH_VTR_LEN) - -#define ICH_VTR_OFF GICH_VTR_OFF -#define ICH_VTR_LEN GICH_VTR_LEN -#define ICH_VTR_MSK GICH_VTR_MSK +#define GICH_HCR_En_BIT (1 << 0) +#define GICH_HCR_UIE_BIT (1 << 1) +#define GICH_HCR_LRENPIE_BIT (1 << 2) +#define GICH_HCR_NPIE_BIT (1 << 3) +#define GICH_HCR_VGrp0DIE_BIT (1 << 4) +#define GICH_HCR_VGrp0EIE_BIT (1 << 5) +#define GICH_HCR_VGrp1EIE_BIT (1 << 6) +#define GICH_HCR_VGrp1DIE_BIT (1 << 7) +#define GICH_HCR_EOICount_OFF (27) +#define GICH_HCR_EOICount_LEN (5) +#define GICH_HCR_EOICount_MASK BIT32_MASK(GICH_HCR_EOICount_OFF, GICH_HCR_EOICount_LEN) + +#define ICH_HCR_VGrp1EIE_BIT (1ULL << 6) +#define ICH_HCR_LRENPIE_BIT GICH_HCR_LRENPIE_BIT + +#define GICH_VTR_OFF (0) +#define GICH_VTR_LEN (6) +#define GICH_VTR_MSK BIT32_MASK(GICH_VTR_OFF, GICH_VTR_LEN) + +#define ICH_VTR_OFF GICH_VTR_OFF +#define ICH_VTR_LEN GICH_VTR_LEN +#define ICH_VTR_MSK GICH_VTR_MSK #if (GIC_VERSION == GICV2) -#define GICH_LR_VID_OFF (0) -#define GICH_LR_VID_LEN (10) -#define GICH_LR_PID_OFF (10) -#define GICH_LR_PID_LEN (10) -#define GICH_LR_PRIO_OFF (23) -#define GICH_LR_PRIO_LEN (5) +#define GICH_LR_VID_OFF (0) +#define GICH_LR_VID_LEN (10) +#define GICH_LR_PID_OFF (10) +#define GICH_LR_PID_LEN (10) +#define GICH_LR_PRIO_OFF (23) +#define GICH_LR_PRIO_LEN (5) #define GICH_LR_STATE_OFF (28) #define GICH_LR_STATE_LEN (2) -#define GICH_LR_HW_BIT (1U << 31) -#define GICH_LR_EOI_BIT (1U << 19) -#define GICH_NUM_ELRSR (2) -#define GICH_LR_PRIO_MSK BIT32_MASK(GICH_LR_PRIO_OFF, GICH_LR_PRIO_LEN) -#define GICH_LR_PID_MSK BIT32_MASK(GICH_LR_PID_OFF, GICH_LR_PID_LEN) +#define GICH_LR_HW_BIT (1U << 31) +#define GICH_LR_EOI_BIT (1U << 19) +#define GICH_NUM_ELRSR (2) +#define GICH_LR_PRIO_MSK BIT32_MASK(GICH_LR_PRIO_OFF, GICH_LR_PRIO_LEN) +#define GICH_LR_PID_MSK BIT32_MASK(GICH_LR_PID_OFF, GICH_LR_PID_LEN) #define GICH_LR_STATE_MSK BIT32_MASK(GICH_LR_STATE_OFF, GICH_LR_STATE_LEN) -#define GICH_LR_STATE(LR) \ - (bit32_extract(LR, GICH_LR_STATE_OFF, GICH_LR_STATE_LEN)) +#define GICH_LR_STATE(LR) (bit32_extract(LR, GICH_LR_STATE_OFF, GICH_LR_STATE_LEN)) typedef uint32_t gic_lr_t; #else -#define GICH_LR_VID_OFF (0) -#define GICH_LR_VID_LEN (32) -#define GICH_LR_PID_OFF (32) -#define GICH_LR_PID_LEN (10) -#define GICH_LR_PRIO_OFF (48) -#define GICH_LR_PRIO_LEN (8) +#define GICH_LR_VID_OFF (0) +#define GICH_LR_VID_LEN (32) +#define GICH_LR_PID_OFF (32) +#define GICH_LR_PID_LEN (10) +#define GICH_LR_PRIO_OFF (48) +#define GICH_LR_PRIO_LEN (8) #define GICH_LR_STATE_OFF (62) #define GICH_LR_STATE_LEN (2) -#define GICH_LR_GRP_BIT (1ULL << 60) -#define GICH_LR_HW_BIT (1ULL << 61) -#define GICH_LR_EOI_BIT (1ULL << 41) -#define GICH_NUM_ELRSR (1) -#define GICH_LR_PRIO_MSK BIT64_MASK(GICH_LR_PRIO_OFF, GICH_LR_PRIO_LEN) -#define GICH_LR_PID_MSK BIT64_MASK(GICH_LR_PID_OFF, GICH_LR_PID_LEN) +#define GICH_LR_GRP_BIT (1ULL << 60) +#define GICH_LR_HW_BIT (1ULL << 61) +#define GICH_LR_EOI_BIT (1ULL << 41) +#define GICH_NUM_ELRSR (1) +#define GICH_LR_PRIO_MSK BIT64_MASK(GICH_LR_PRIO_OFF, GICH_LR_PRIO_LEN) +#define GICH_LR_PID_MSK BIT64_MASK(GICH_LR_PID_OFF, GICH_LR_PID_LEN) #define GICH_LR_STATE_MSK BIT64_MASK(GICH_LR_STATE_OFF, GICH_LR_STATE_LEN) -#define GICH_LR_STATE(LR) \ - (bit64_extract(LR, GICH_LR_STATE_OFF, GICH_LR_STATE_LEN)) +#define GICH_LR_STATE(LR) (bit64_extract(LR, GICH_LR_STATE_OFF, GICH_LR_STATE_LEN)) typedef uint64_t gic_lr_t; #endif -#define GICH_LR_CPUID_OFF (10) -#define GICH_LR_CPUID_LEN (3) +#define GICH_LR_CPUID_OFF (10) +#define GICH_LR_CPUID_LEN (3) -#define GICH_LR_VID_MSK BIT_MASK(GICH_LR_VID_OFF, GICH_LR_VID_LEN) -#define GICH_LR_VID(LR) (bit_extract(LR, GICH_LR_VID_OFF, GICH_LR_VID_LEN)) +#define GICH_LR_VID_MSK BIT_MASK(GICH_LR_VID_OFF, GICH_LR_VID_LEN) +#define GICH_LR_VID(LR) (bit_extract(LR, GICH_LR_VID_OFF, GICH_LR_VID_LEN)) -#define GICH_LR_CPUID_MSK BIT_MASK(GICH_LR_CPUID_OFF, GICH_LR_CPUID_LEN) -#define GICH_LR_CPUID(LR) \ - (bit_extract(LR, GICH_LR_CPUID_OFF, GICH_LR_CPUID_LEN)) +#define GICH_LR_CPUID_MSK BIT_MASK(GICH_LR_CPUID_OFF, GICH_LR_CPUID_LEN) +#define GICH_LR_CPUID(LR) (bit_extract(LR, GICH_LR_CPUID_OFF, GICH_LR_CPUID_LEN)) -#define GICH_LR_STATE_INV ((0ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) -#define GICH_LR_STATE_PND ((1ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) -#define GICH_LR_STATE_ACT ((2ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) +#define GICH_LR_STATE_INV ((0ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) +#define GICH_LR_STATE_PND ((1ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) +#define GICH_LR_STATE_ACT ((2ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) #define GICH_LR_STATE_ACTPEND ((3ULL << GICH_LR_STATE_OFF) & GICH_LR_STATE_MSK) -#define GICH_MISR_EOI (1U << 0) -#define GICH_MISR_U (1U << 1) -#define GICH_MISR_LRPEN (1U << 2) -#define GICH_MISR_NP (1U << 3) -#define GICH_MISR_VGrp0E (1U << 4) -#define GICH_MISR_VGrp0D (1U << 5) -#define GICH_MISR_VGrp1E (1U << 6) -#define GICH_MISR_VGrp1D (1U << 7) +#define GICH_MISR_EOI (1U << 0) +#define GICH_MISR_U (1U << 1) +#define GICH_MISR_LRPEN (1U << 2) +#define GICH_MISR_NP (1U << 3) +#define GICH_MISR_VGrp0E (1U << 4) +#define GICH_MISR_VGrp0D (1U << 5) +#define GICH_MISR_VGrp1E (1U << 6) +#define GICH_MISR_VGrp1D (1U << 7) struct gich_hw { uint32_t HCR; @@ -380,9 +371,9 @@ struct gicv_hw { uint32_t DIR; } __attribute__((__packed__, aligned(0x1000))); -extern volatile struct gicd_hw *gicd; -extern volatile struct gicc_hw *gicc; -extern volatile struct gich_hw *gich; +extern volatile struct gicd_hw* gicd; +extern volatile struct gicc_hw* gicc; +extern volatile struct gich_hw* gich; enum int_state { INV, PEND, ACT, PENDACT }; @@ -407,8 +398,8 @@ void gic_init(); void gic_cpu_init(); void gic_send_sgi(cpuid_t cpu_target, irqid_t sgi_num); -void gicc_save_state(struct gicc_state *state); -void gicc_restore_state(struct gicc_state *state); +void gicc_save_state(struct gicc_state* state); +void gicc_restore_state(struct gicc_state* state); void gic_set_enable(irqid_t int_id, bool en); void gic_set_prio(irqid_t int_id, uint8_t prio); @@ -439,15 +430,14 @@ uint8_t gicr_get_prio(irqid_t int_id, cpuid_t gicr_id); void gic_maintenance_handler(irqid_t irq_id); -extern volatile struct gicd_hw *gicd; -extern volatile struct gicr_hw *gicr; +extern volatile struct gicd_hw* gicd; +extern volatile struct gicr_hw* gicr; size_t gich_num_lrs(); static inline size_t gic_num_irqs() { - size_t itlinenumber = - bit32_extract(gicd->TYPER, GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN); + size_t itlinenumber = bit32_extract(gicd->TYPER, GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN); return 32 * itlinenumber + 1; } diff --git a/src/arch/armv8/inc/arch/gicv2.h b/src/arch/armv8/inc/arch/gicv2.h index 9f065c537..10c109bf1 100644 --- a/src/arch/armv8/inc/arch/gicv2.h +++ b/src/arch/armv8/inc/arch/gicv2.h @@ -44,27 +44,34 @@ static inline uint32_t gich_get_misr() static inline uint64_t gich_get_eisr() { uint64_t eisr = gich->EISR[0]; - if (NUM_LRS > 32) eisr |= (((uint64_t)gich->EISR[1] << 32)); + if (NUM_LRS > 32) { + eisr |= ((uint64_t)gich->EISR[1] << 32); + } return eisr; } static inline uint64_t gich_get_elrsr() { uint64_t elsr = gich->ELSR[0]; - if (NUM_LRS > 32) elsr |= (((uint64_t)gich->ELSR[1] << 32)); + if (NUM_LRS > 32) { + elsr |= ((uint64_t)gich->ELSR[1] << 32); + } return elsr; } -static inline uint32_t gicc_iar() { +static inline uint32_t gicc_iar() +{ return gicc->IAR; } -static inline void gicc_eoir(uint32_t eoir) { - gicc->EOIR = eoir; +static inline void gicc_eoir(uint32_t eoir) +{ + gicc->EOIR = eoir; } -static inline void gicc_dir(uint32_t dir) { - gicc->DIR = dir; +static inline void gicc_dir(uint32_t dir) +{ + gicc->DIR = dir; } #endif /* __GICV2_H__ */ diff --git a/src/arch/armv8/inc/arch/gicv3.h b/src/arch/armv8/inc/arch/gicv3.h index 508a3b13b..74e507dbd 100644 --- a/src/arch/armv8/inc/arch/gicv3.h +++ b/src/arch/armv8/inc/arch/gicv3.h @@ -15,23 +15,40 @@ static inline uint64_t gich_read_lr(size_t i) } switch (i) { - case 0: return sysreg_ich_lr0_el2_read(); - case 1: return sysreg_ich_lr1_el2_read(); - case 2: return sysreg_ich_lr2_el2_read(); - case 3: return sysreg_ich_lr3_el2_read(); - case 4: return sysreg_ich_lr4_el2_read(); - case 5: return sysreg_ich_lr5_el2_read(); - case 6: return sysreg_ich_lr6_el2_read(); - case 7: return sysreg_ich_lr7_el2_read(); - case 8: return sysreg_ich_lr8_el2_read(); - case 9: return sysreg_ich_lr9_el2_read(); - case 10: return sysreg_ich_lr10_el2_read(); - case 11: return sysreg_ich_lr11_el2_read(); - case 12: return sysreg_ich_lr12_el2_read(); - case 13: return sysreg_ich_lr13_el2_read(); - case 14: return sysreg_ich_lr14_el2_read(); - case 15: return sysreg_ich_lr15_el2_read(); - default: return 0; + case 0: + return sysreg_ich_lr0_el2_read(); + case 1: + return sysreg_ich_lr1_el2_read(); + case 2: + return sysreg_ich_lr2_el2_read(); + case 3: + return sysreg_ich_lr3_el2_read(); + case 4: + return sysreg_ich_lr4_el2_read(); + case 5: + return sysreg_ich_lr5_el2_read(); + case 6: + return sysreg_ich_lr6_el2_read(); + case 7: + return sysreg_ich_lr7_el2_read(); + case 8: + return sysreg_ich_lr8_el2_read(); + case 9: + return sysreg_ich_lr9_el2_read(); + case 10: + return sysreg_ich_lr10_el2_read(); + case 11: + return sysreg_ich_lr11_el2_read(); + case 12: + return sysreg_ich_lr12_el2_read(); + case 13: + return sysreg_ich_lr13_el2_read(); + case 14: + return sysreg_ich_lr14_el2_read(); + case 15: + return sysreg_ich_lr15_el2_read(); + default: + return 0; } } @@ -42,22 +59,54 @@ static inline void gich_write_lr(size_t i, uint64_t val) } switch (i) { - case 0: sysreg_ich_lr0_el2_write(val); break; - case 1: sysreg_ich_lr1_el2_write(val); break; - case 2: sysreg_ich_lr2_el2_write(val); break; - case 3: sysreg_ich_lr3_el2_write(val); break; - case 4: sysreg_ich_lr4_el2_write(val); break; - case 5: sysreg_ich_lr5_el2_write(val); break; - case 6: sysreg_ich_lr6_el2_write(val); break; - case 7: sysreg_ich_lr7_el2_write(val); break; - case 8: sysreg_ich_lr8_el2_write(val); break; - case 9: sysreg_ich_lr9_el2_write(val); break; - case 10: sysreg_ich_lr10_el2_write(val); break; - case 11: sysreg_ich_lr11_el2_write(val); break; - case 12: sysreg_ich_lr12_el2_write(val); break; - case 13: sysreg_ich_lr13_el2_write(val); break; - case 14: sysreg_ich_lr14_el2_write(val); break; - case 15: sysreg_ich_lr15_el2_write(val); break; + case 0: + sysreg_ich_lr0_el2_write(val); + break; + case 1: + sysreg_ich_lr1_el2_write(val); + break; + case 2: + sysreg_ich_lr2_el2_write(val); + break; + case 3: + sysreg_ich_lr3_el2_write(val); + break; + case 4: + sysreg_ich_lr4_el2_write(val); + break; + case 5: + sysreg_ich_lr5_el2_write(val); + break; + case 6: + sysreg_ich_lr6_el2_write(val); + break; + case 7: + sysreg_ich_lr7_el2_write(val); + break; + case 8: + sysreg_ich_lr8_el2_write(val); + break; + case 9: + sysreg_ich_lr9_el2_write(val); + break; + case 10: + sysreg_ich_lr10_el2_write(val); + break; + case 11: + sysreg_ich_lr11_el2_write(val); + break; + case 12: + sysreg_ich_lr12_el2_write(val); + break; + case 13: + sysreg_ich_lr13_el2_write(val); + break; + case 14: + sysreg_ich_lr14_el2_write(val); + break; + case 15: + sysreg_ich_lr15_el2_write(val); + break; } } @@ -86,15 +135,18 @@ static inline uint64_t gich_get_elrsr() return sysreg_ich_elrsr_el2_read(); } -static inline uint32_t gicc_iar() { +static inline uint32_t gicc_iar() +{ return sysreg_icc_iar1_el1_read(); } -static inline void gicc_eoir(uint32_t eoir) { +static inline void gicc_eoir(uint32_t eoir) +{ sysreg_icc_eoir1_el1_write(eoir); } -static inline void gicc_dir(uint32_t dir) { +static inline void gicc_dir(uint32_t dir) +{ sysreg_icc_dir_el1_write(dir); } diff --git a/src/arch/armv8/inc/arch/hypercall.h b/src/arch/armv8/inc/arch/hypercall.h index bf1bfa5da..e39857a78 100644 --- a/src/arch/armv8/inc/arch/hypercall.h +++ b/src/arch/armv8/inc/arch/hypercall.h @@ -6,6 +6,6 @@ #ifndef ARCH_HYPERCALL_H #define ARCH_HYPERCALL_H -#define HYPCALL_ARG_REG(ARG) ((ARG) + 1) +#define HYPCALL_ARG_REG(ARG) ((ARG) + 1) #endif /* ARCH_HYPERCALL_H */ diff --git a/src/arch/armv8/inc/arch/interrupts.h b/src/arch/armv8/inc/arch/interrupts.h index ad4e684a1..59dc4bb9f 100644 --- a/src/arch/armv8/inc/arch/interrupts.h +++ b/src/arch/armv8/inc/arch/interrupts.h @@ -8,7 +8,7 @@ #include -#define IPI_CPU_MSG 1 +#define IPI_CPU_MSG 1 #define MAX_INTERRUPTS GIC_MAX_INTERUPTS #endif /* __ARCH_INTERRUPTS_H__ */ diff --git a/src/arch/armv8/inc/arch/platform.h b/src/arch/armv8/inc/arch/platform.h index 558315007..ff322c06d 100644 --- a/src/arch/armv8/inc/arch/platform.h +++ b/src/arch/armv8/inc/arch/platform.h @@ -7,7 +7,7 @@ #define __ARCH_PLATFORM_H__ #include -#ifdef MEM_PROT_MMU +#ifdef MEM_PROT_MMU #include #endif @@ -22,7 +22,7 @@ struct arch_platform { irqid_t maintenance_id; } gic; -#ifdef MEM_PROT_MMU +#ifdef MEM_PROT_MMU struct { paddr_t base; irqid_t interrupt_id; @@ -41,6 +41,5 @@ struct arch_platform { }; struct platform; -unsigned long platform_arch_cpuid_to_mpidr(const struct platform* plat, - cpuid_t cpuid); +unsigned long platform_arch_cpuid_to_mpidr(const struct platform* plat, cpuid_t cpuid); #endif /* __ARCH_PLATFORM_H__ */ diff --git a/src/arch/armv8/inc/arch/psci.h b/src/arch/armv8/inc/arch/psci.h index da4f43819..c500173d1 100644 --- a/src/arch/armv8/inc/arch/psci.h +++ b/src/arch/armv8/inc/arch/psci.h @@ -11,67 +11,63 @@ #include #include -#define PSCI_VERSION (0x84000000) -#define PSCI_CPU_SUSPEND_SMC32 (0x84000001) -#define PSCI_CPU_SUSPEND_SMC64 (0xc4000001) -#define PSCI_CPU_OFF (0x84000002) -#define PSCI_CPU_ON_SMC32 (0x84000003) -#define PSCI_CPU_ON_SMC64 (0xc4000003) +#define PSCI_VERSION (0x84000000) +#define PSCI_CPU_SUSPEND_SMC32 (0x84000001) +#define PSCI_CPU_SUSPEND_SMC64 (0xc4000001) +#define PSCI_CPU_OFF (0x84000002) +#define PSCI_CPU_ON_SMC32 (0x84000003) +#define PSCI_CPU_ON_SMC64 (0xc4000003) #define PSCI_AFFINITY_INFO_SMC32 (0x84000004) #define PSCI_AFFINITY_INFO_SMC64 (0xc4000004) -#define PSCI_FEATURES (0x8400000A) -#define PSCI_MIG_INFO_TYPE (0x84000006) +#define PSCI_FEATURES (0x8400000A) +#define PSCI_MIG_INFO_TYPE (0x84000006) #ifdef AARCH32 -#define PSCI_CPU_SUSPEND PSCI_CPU_SUSPEND_SMC32 -#define PSCI_CPU_ON PSCI_CPU_ON_SMC32 -#define PSCI_AFFINITY_INFO PSCI_AFFINITY_INFO_SMC32 -#else -#define PSCI_CPU_SUSPEND PSCI_CPU_SUSPEND_SMC64 -#define PSCI_CPU_ON PSCI_CPU_ON_SMC64 -#define PSCI_AFFINITY_INFO PSCI_AFFINITY_INFO_SMC64 +#define PSCI_CPU_SUSPEND PSCI_CPU_SUSPEND_SMC32 +#define PSCI_CPU_ON PSCI_CPU_ON_SMC32 +#define PSCI_AFFINITY_INFO PSCI_AFFINITY_INFO_SMC32 +#else +#define PSCI_CPU_SUSPEND PSCI_CPU_SUSPEND_SMC64 +#define PSCI_CPU_ON PSCI_CPU_ON_SMC64 +#define PSCI_AFFINITY_INFO PSCI_AFFINITY_INFO_SMC64 #endif -#define PSCI_TOS_NOT_PRESENT_MP 2 +#define PSCI_TOS_NOT_PRESENT_MP 2 -#define PSCI_CPU_IS_ON 0 -#define PSCI_CPU_IS_OFF 1 +#define PSCI_CPU_IS_ON 0 +#define PSCI_CPU_IS_OFF 1 -#define PSCI_INVALID_ADDRESS (-1L) +#define PSCI_INVALID_ADDRESS (-1L) -#define PSCI_VERSION_0_2 (2U) -#define PSCI_GET_VERSION_MAJOR(ver) (u16)((ver) >> 16) +#define PSCI_VERSION_0_2 (2U) +#define PSCI_GET_VERSION_MAJOR(ver) (u16)((ver) >> 16) #define PSCI_GET_VERSION(major, minor) (((major) << 16) | (minor)) -#define PSCI_E_SUCCESS 0 -#define PSCI_E_NOT_SUPPORTED -1 -#define PSCI_E_INVALID_PARAMS -2 -#define PSCI_E_DENIED -3 -#define PSCI_E_ALREADY_ON -4 -#define PSCI_E_ON_PENDING -5 -#define PSCI_E_INTERN_FAIL -6 -#define PSCI_E_NOT_PRESENT -7 -#define PSCI_E_DISABLED -8 -#define PSCI_E_INVALID_ADDRESS -9 +#define PSCI_E_SUCCESS 0 +#define PSCI_E_NOT_SUPPORTED -1 +#define PSCI_E_INVALID_PARAMS -2 +#define PSCI_E_DENIED -3 +#define PSCI_E_ALREADY_ON -4 +#define PSCI_E_ON_PENDING -5 +#define PSCI_E_INTERN_FAIL -6 +#define PSCI_E_NOT_PRESENT -7 +#define PSCI_E_DISABLED -8 +#define PSCI_E_INVALID_ADDRESS -9 /* The macros below are used to identify PSCI calls from the SMC function ID */ -#define SMC_FID_MASK (0xff000000) +#define SMC_FID_MASK (0xff000000) -#define SMC32_STDSRVC_FID_VALUE (0x84000000) -#define is_smc32_stdsrvc_fid(_fid) \ - (((_fid)&SMC_FID_MASK) == SMC32_STDSRVC_FID_VALUE) +#define SMC32_STDSRVC_FID_VALUE (0x84000000) +#define is_smc32_stdsrvc_fid(_fid) (((_fid) & SMC_FID_MASK) == SMC32_STDSRVC_FID_VALUE) -#define SMC64_STDSRVC_FID_VALUE (0xc4000000) -#define is_smc64_stdsrvc_fid(_fid) \ - (((_fid)&SMC_FID_MASK) == SMC64_STDSRVC_FID_VALUE) +#define SMC64_STDSRVC_FID_VALUE (0xc4000000) +#define is_smc64_stdsrvc_fid(_fid) (((_fid) & SMC_FID_MASK) == SMC64_STDSRVC_FID_VALUE) -#define is_smc_stdsrvc_fid(_fid) \ - (is_smc64_stdsrvc_fid(_fid) || is_smc32_stdsrvc_fid(_fid)) +#define is_smc_stdsrvc_fid(_fid) (is_smc64_stdsrvc_fid(_fid) || is_smc32_stdsrvc_fid(_fid)) -#define PSCI_FID_MASK (0xffe0) -#define PSCI_FID_VALUE (00) -#define is_psci_fid(_fid) \ - (is_smc_stdsrvc_fid(_fid) && (((_fid)&PSCI_FID_MASK) == PSCI_FID_VALUE)) +#define PSCI_FID_MASK (0xffe0) +#define PSCI_FID_VALUE (00) +#define is_psci_fid(_fid) (is_smc_stdsrvc_fid(_fid) && (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE)) struct psci_ctx { spinlock_t lock; @@ -105,8 +101,7 @@ enum wakeup_reason { SMC Trapping --------------------------------- */ -int32_t psci_smc_handler(uint32_t smc_fid, unsigned long x1, unsigned long x2, - unsigned long x3); +int32_t psci_smc_handler(uint32_t smc_fid, unsigned long x1, unsigned long x2, unsigned long x3); int32_t psci_standby(); int32_t psci_power_down(enum wakeup_reason reason); @@ -115,10 +110,8 @@ int32_t psci_power_down(enum wakeup_reason reason); SMC PSCI interface --------------------------------- */ -int32_t psci_cpu_suspend(uint32_t power_state, unsigned long entrypoint, - unsigned long context_id); +int32_t psci_cpu_suspend(uint32_t power_state, unsigned long entrypoint, unsigned long context_id); -int32_t psci_cpu_on(unsigned long target_cpu, unsigned long entrypoint, - unsigned long context_id); +int32_t psci_cpu_on(unsigned long target_cpu, unsigned long entrypoint, unsigned long context_id); #endif /* __PSCI_H__ */ diff --git a/src/arch/armv8/inc/arch/smcc.h b/src/arch/armv8/inc/arch/smcc.h index 0abbf78cb..ff75756f3 100644 --- a/src/arch/armv8/inc/arch/smcc.h +++ b/src/arch/armv8/inc/arch/smcc.h @@ -6,14 +6,14 @@ #ifndef SMCC_H #define SMCC_H -#define SMCC64_BIT (0x40000000) +#define SMCC64_BIT (0x40000000) -#define SMCC_E_NOT_SUPPORTED (-1) +#define SMCC_E_NOT_SUPPORTED (-1) #define SMCC32_FID_STD_SRVC (0x84000000) #define SMCC64_FID_STD_SRVC (SMCC32_FID_STD_SRVC | SMCC64_BIT) #define SMCC32_FID_VND_HYP_SRVC (0x86000000) -#define SMCC64_FID_VND_HYP_SRVC (SMCC32_FID_VND_HYP_SRVC | SMCC64_BIT) +#define SMCC64_FID_VND_HYP_SRVC (SMCC32_FID_VND_HYP_SRVC | SMCC64_BIT) #define SMCC_FID_FN_NUM_MSK (0xFFFF) #endif /* SMCC_H */ diff --git a/src/arch/armv8/inc/arch/sysregs.h b/src/arch/armv8/inc/arch/sysregs.h index ba0f11ee1..8d8414fc7 100644 --- a/src/arch/armv8/inc/arch/sysregs.h +++ b/src/arch/armv8/inc/arch/sysregs.h @@ -11,524 +11,515 @@ #include /* ID_AA64MMFR0_EL1, AArch64 Memory Model Feature Register 0 */ -#define ID_AA64MMFR0_PAR_OFF 0 -#define ID_AA64MMFR0_PAR_LEN 4 -#define ID_AA64MMFR0_PAR_MSK \ - BIT64_MASK(ID_AA64MMFR0_PAR_OFF, ID_AA64MMFR0_PAR_LEN) +#define ID_AA64MMFR0_PAR_OFF 0 +#define ID_AA64MMFR0_PAR_LEN 4 +#define ID_AA64MMFR0_PAR_MSK BIT64_MASK(ID_AA64MMFR0_PAR_OFF, ID_AA64MMFR0_PAR_LEN) -#define PAR_32BIT (0) +#define PAR_32BIT (0) -#define SPSel_SP (1 << 0) +#define SPSel_SP (1 << 0) /* PSTATE */ -#define PSTATE_DAIF_I_BIT (1UL << 1) +#define PSTATE_DAIF_I_BIT (1UL << 1) /* MPIDR_EL1, Multiprocessor Affinity Register */ -#define MPIDR_RES1 (0x80000000) -#define MPIDR_RES0_MSK ~(0x1ful << 25) -#define MPIDR_AFFINITY_BITS (8) -#define MPIDR_U_BIT (1UL << 30) -#define MPIDR_AFF_MSK (0xffff) //we are only supporting 2 affinity levels -#define MPIDR_AFF_LVL(MPIDR, LVL) (((MPIDR) >> (8*(LVL))) & 0xff) +#define MPIDR_RES1 (0x80000000) +#define MPIDR_RES0_MSK ~(0x1ful << 25) +#define MPIDR_AFFINITY_BITS (8) +#define MPIDR_U_BIT (1UL << 30) +#define MPIDR_AFF_MSK (0xffff) // we are only supporting 2 affinity levels +#define MPIDR_AFF_LVL(MPIDR, LVL) (((MPIDR) >> (8 * (LVL))) & 0xff) /* SPSR - Saved Program Status Register */ -#define SPSR_EL_MSK (0x0f) -#define SPSR_EL0t (0x0) -#define SPSR_EL1t (0x4) -#define SPSR_EL1h (0x5) -#define SPSR_EL2t (0x8) -#define SPSR_EL2h (0x9) -#define SPSR_EL3t (0xc) -#define SPSR_EL3h (0xd) - -#define SPSR_F (1 << 6) -#define SPSR_I (1 << 7) -#define SPSR_A (1 << 8) -#define SPSR_D (1 << 9) -#define SPSR_IL (1 << 20) -#define SPSR_SS (1 << 21) - -#define SPSR_USR (0x10) -#define SPSR_IRQ (0x12) -#define SPSR_SVC (0x13) -#define SPSR_ABT (0x17) -#define SPSR_UND (0x1b) -#define SPSR_SYS (0x1f) +#define SPSR_EL_MSK (0x0f) +#define SPSR_EL0t (0x0) +#define SPSR_EL1t (0x4) +#define SPSR_EL1h (0x5) +#define SPSR_EL2t (0x8) +#define SPSR_EL2h (0x9) +#define SPSR_EL3t (0xc) +#define SPSR_EL3h (0xd) + +#define SPSR_F (1 << 6) +#define SPSR_I (1 << 7) +#define SPSR_A (1 << 8) +#define SPSR_D (1 << 9) +#define SPSR_IL (1 << 20) +#define SPSR_SS (1 << 21) + +#define SPSR_USR (0x10) +#define SPSR_IRQ (0x12) +#define SPSR_SVC (0x13) +#define SPSR_ABT (0x17) +#define SPSR_UND (0x1b) +#define SPSR_SYS (0x1f) /* SCR - Secure Configuration Register */ -#define SCR_NS (1 << 0) -#define SCR_IRQ (1 << 1) -#define SCR_FIQ (1 << 2) -#define SCR_EA (1 << 3) -#define SCR_SMD (1 << 7) -#define SCR_HCE (1 << 8) -#define SCR_SIF (1 << 9) -#define SCR_RW (1 << 10) -#define SCR_ST (1 << 11) -#define SCR_TWI (1 << 12) -#define SCR_TWE (1 << 13) -#define SCR_TLOR (1 << 14) -#define SCR_TERR (1 << 15) -#define SCR_APK (1 << 16) -#define SCR_API (1 << 17) +#define SCR_NS (1 << 0) +#define SCR_IRQ (1 << 1) +#define SCR_FIQ (1 << 2) +#define SCR_EA (1 << 3) +#define SCR_SMD (1 << 7) +#define SCR_HCE (1 << 8) +#define SCR_SIF (1 << 9) +#define SCR_RW (1 << 10) +#define SCR_ST (1 << 11) +#define SCR_TWI (1 << 12) +#define SCR_TWE (1 << 13) +#define SCR_TLOR (1 << 14) +#define SCR_TERR (1 << 15) +#define SCR_APK (1 << 16) +#define SCR_API (1 << 17) /* TCR - Translation Control Register */ -#define TCR_RES1 ((1 << 23) | (1 << 31)) -#define TCR_T0SZ_MSK (0x1f << 0) -#define TCR_T0SZ_OFF (0) -#define TCR_T0SZ(SZ) ((SZ << TCR_T0SZ_OFF) & TCR_T0SZ_MSK) -#define TCR_IRGN0_MSK (0x3 << 8) -#define TCR_IRGN0_NC (0 << 8) -#define TCR_IRGN0_WB_RA_WA (1 << 8) -#define TCR_IRGN0_WT_RA_NWA (2 << 8) -#define TCR_IRGN0_WB_RA_NWA (3 << 8) -#define TCR_ORGN0_MSK (0x3 << 10) -#define TCR_ORGN0_NC (0 << 10) -#define TCR_ORGN0_WB_RA_WA (1 << 10) -#define TCR_ORGN0_WT_RA_NWA (2 << 10) -#define TCR_ORGN0_WB_RA_NWA (3 << 10) -#define TCR_SH0_MSK (0x3 << 12) -#define TCR_SH0_NS (0 << 12) -#define TCR_SH0_OS (2 << 12) -#define TCR_SH0_IS (3 << 12) -#define TCR_TG0_MSK (0x3 << 14) -#define TCR_TG0_4K (0 << 14) -#define TCR_TG0_16K (2 << 14) -#define TCR_TG0_64K (1 << 14) -#define TCR_PS_OFF (16) -#define TCR_PS_MSK (0x7 << TCR_PS_OFF) -#define TCR_PS_32B (0 << 16) -#define TCR_PS_36B (1 << 16) -#define TCR_PS_40B (2 << 16) -#define TCR_PS_42B (3 << 16) -#define TCR_PS_44B (4 << 16) -#define TCR_PS_48B (5 << 16) -#define TCR_PS_52B (6 << 16) -#define TCR_TBI (1 << 20) +#define TCR_RES1 ((1 << 23) | (1 << 31)) +#define TCR_T0SZ_MSK (0x1f << 0) +#define TCR_T0SZ_OFF (0) +#define TCR_T0SZ(SZ) ((SZ << TCR_T0SZ_OFF) & TCR_T0SZ_MSK) +#define TCR_IRGN0_MSK (0x3 << 8) +#define TCR_IRGN0_NC (0 << 8) +#define TCR_IRGN0_WB_RA_WA (1 << 8) +#define TCR_IRGN0_WT_RA_NWA (2 << 8) +#define TCR_IRGN0_WB_RA_NWA (3 << 8) +#define TCR_ORGN0_MSK (0x3 << 10) +#define TCR_ORGN0_NC (0 << 10) +#define TCR_ORGN0_WB_RA_WA (1 << 10) +#define TCR_ORGN0_WT_RA_NWA (2 << 10) +#define TCR_ORGN0_WB_RA_NWA (3 << 10) +#define TCR_SH0_MSK (0x3 << 12) +#define TCR_SH0_NS (0 << 12) +#define TCR_SH0_OS (2 << 12) +#define TCR_SH0_IS (3 << 12) +#define TCR_TG0_MSK (0x3 << 14) +#define TCR_TG0_4K (0 << 14) +#define TCR_TG0_16K (2 << 14) +#define TCR_TG0_64K (1 << 14) +#define TCR_PS_OFF (16) +#define TCR_PS_MSK (0x7 << TCR_PS_OFF) +#define TCR_PS_32B (0 << 16) +#define TCR_PS_36B (1 << 16) +#define TCR_PS_40B (2 << 16) +#define TCR_PS_42B (3 << 16) +#define TCR_PS_44B (4 << 16) +#define TCR_PS_48B (5 << 16) +#define TCR_PS_52B (6 << 16) +#define TCR_TBI (1 << 20) /** - * Default hypervisor translation control - * The PS field must be filled at runtime by first reading parange + * Default hypervisor translation control. The PS field must be filled at runtime by first reading + * parange */ -#define TCR_EL2_DFLT \ - (TCR_RES1 | TCR_TG0_4K | TCR_PS_48B | TCR_ORGN0_WB_RA_WA | \ - TCR_IRGN0_WB_RA_WA | TCR_T0SZ(16) | TCR_SH0_IS) +#define TCR_EL2_DFLT \ + (TCR_RES1 | TCR_TG0_4K | TCR_PS_48B | TCR_ORGN0_WB_RA_WA | TCR_IRGN0_WB_RA_WA | TCR_T0SZ(16) | \ + TCR_SH0_IS) -#define HTCR_DFLT \ - (TCR_SH0_IS | TCR_ORGN0_WB_RA_WA | TCR_IRGN0_WB_RA_WA | TCR_T0SZ(0)) +#define HTCR_DFLT (TCR_SH0_IS | TCR_ORGN0_WB_RA_WA | TCR_IRGN0_WB_RA_WA | TCR_T0SZ(0)) /* TCR - Translation Control Register */ -#define VTCR_RES1 (1UL << 31) -#define VTCR_MSA (1UL << 31) -#define VTCR_T0SZ_MSK (0x1f << 0) -#define VTCR_T0SZ_OFF (0) -#define VTCR_T0SZ(SZ) (((SZ) << TCR_T0SZ_OFF) & TCR_T0SZ_MSK) -#define VTCR_SL0_OFF (6) -#define VTCR_SL0_MSK (0xc0) -#define VTCR_SL0_01 ((0x2UL << VTCR_SL0_OFF) & VTCR_SL0_MSK) -#define VTCR_SL0_12 ((0x1UL << VTCR_SL0_OFF) & VTCR_SL0_MSK) -#define VTCR_SL0_23 (0) -#define VTCR_IRGN0_MSK (0x3 << 8) -#define VTCR_IRGN0_NC (0 << 8) -#define VTCR_IRGN0_WB_RA_WA (1 << 8) +#define VTCR_RES1 (1UL << 31) +#define VTCR_MSA (1UL << 31) +#define VTCR_T0SZ_MSK (0x1f << 0) +#define VTCR_T0SZ_OFF (0) +#define VTCR_T0SZ(SZ) (((SZ) << TCR_T0SZ_OFF) & TCR_T0SZ_MSK) +#define VTCR_SL0_OFF (6) +#define VTCR_SL0_MSK (0xc0) +#define VTCR_SL0_01 ((0x2UL << VTCR_SL0_OFF) & VTCR_SL0_MSK) +#define VTCR_SL0_12 ((0x1UL << VTCR_SL0_OFF) & VTCR_SL0_MSK) +#define VTCR_SL0_23 (0) +#define VTCR_IRGN0_MSK (0x3 << 8) +#define VTCR_IRGN0_NC (0 << 8) +#define VTCR_IRGN0_WB_RA_WA (1 << 8) #define VTCR_IRGN0_WT_RA_NWA (2 << 8) #define VTCR_IRGN0_WB_RA_NWA (3 << 8) -#define VTCR_ORGN0_MSK (0x3 << 10) -#define VTCR_ORGN0_NC (0 << 10) -#define VTCR_ORGN0_WB_RA_WA (1 << 10) +#define VTCR_ORGN0_MSK (0x3 << 10) +#define VTCR_ORGN0_NC (0 << 10) +#define VTCR_ORGN0_WB_RA_WA (1 << 10) #define VTCR_ORGN0_WT_RA_NWA (2 << 10) #define VTCR_ORGN0_WB_RA_NWA (3 << 10) -#define VTCR_SH0_MSK (0x3 << 12) -#define VTCR_SH0_NS (0 << 12) -#define VTCR_SH0_OS (2 << 12) -#define VTCR_SH0_IS (3 << 12) -#define VTCR_TG0_MSK (0x3 << 14) -#define VTCR_TG0_4K (0 << 14) -#define VTCR_TG0_16K (2 << 14) -#define VTCR_TG0_64K (1 << 14) -#define VTCR_PS_OFF (16) -#define VTCR_PS_MSK (0x7 << VTCR_PS_OFF) -#define VTCR_PS_32B (0 << 16) -#define VTCR_PS_36B (1 << 16) -#define VTCR_PS_40B (2 << 16) -#define VTCR_PS_42B (3 << 16) -#define VTCR_PS_44B (4 << 16) -#define VTCR_PS_48B (5 << 16) -#define VTCR_PS_52B (6 << 16) -#define VTCR_TBI (1 << 20) +#define VTCR_SH0_MSK (0x3 << 12) +#define VTCR_SH0_NS (0 << 12) +#define VTCR_SH0_OS (2 << 12) +#define VTCR_SH0_IS (3 << 12) +#define VTCR_TG0_MSK (0x3 << 14) +#define VTCR_TG0_4K (0 << 14) +#define VTCR_TG0_16K (2 << 14) +#define VTCR_TG0_64K (1 << 14) +#define VTCR_PS_OFF (16) +#define VTCR_PS_MSK (0x7 << VTCR_PS_OFF) +#define VTCR_PS_32B (0 << 16) +#define VTCR_PS_36B (1 << 16) +#define VTCR_PS_40B (2 << 16) +#define VTCR_PS_42B (3 << 16) +#define VTCR_PS_44B (4 << 16) +#define VTCR_PS_48B (5 << 16) +#define VTCR_PS_52B (6 << 16) +#define VTCR_TBI (1 << 20) /** * Default stage-2 translation control * ... */ -#define VTCR_DFLT \ - (VTCR_RES1 | VTCR_PS_40B | VTCR_TG0_4K | VTCR_ORGN0_WB_RA_WA | \ - VTCR_IRGN0_WB_RA_WA | VTCR_T0SZ(24) | VTCR_SLO_12 | VTCR_SH0_IS) +#define VTCR_DFLT \ + (VTCR_RES1 | VTCR_PS_40B | VTCR_TG0_4K | VTCR_ORGN0_WB_RA_WA | VTCR_IRGN0_WB_RA_WA | \ + VTCR_T0SZ(24) | VTCR_SLO_12 | VTCR_SH0_IS) /* MAIR - Memory Attribute Indirection Register */ #define MAIR_ATTR_WIDTH (8) -#define MAIT_ATTR_NUM (8) +#define MAIT_ATTR_NUM (8) #define MAIR_DEV_nGnRnE (0x0 << 2) -#define MAIR_DEV_nGnRE (0x1 << 2) -#define MAIR_DEV_nGRE (0x2 << 2) -#define MAIR_DEV_GRE (0x3 << 2) - -#define MAIR_OWTT (0x0 << 6) -#define MAIR_ONC (0x1 << 6) -#define MAIR_OWBT (0x1 << 6) -#define MAIR_OWTNT (0x2 << 6) -#define MAIR_OWBNT (0x3 << 6) -#define MAIR_ORA (0x1 << 5) -#define MAIR_OWA (0x1 << 4) - -#define MAIR_IWTT (0x0 << 2) -#define MAIR_INC (0x1 << 2) -#define MAIR_IWBT (0x1 << 2) -#define MAIR_IWTNT (0x2 << 2) -#define MAIR_IWBNT (0x3 << 2) -#define MAIR_IRA (0x1 << 1) -#define MAIR_IWA (0x1 << 0) +#define MAIR_DEV_nGnRE (0x1 << 2) +#define MAIR_DEV_nGRE (0x2 << 2) +#define MAIR_DEV_GRE (0x3 << 2) + +#define MAIR_OWTT (0x0 << 6) +#define MAIR_ONC (0x1 << 6) +#define MAIR_OWBT (0x1 << 6) +#define MAIR_OWTNT (0x2 << 6) +#define MAIR_OWBNT (0x3 << 6) +#define MAIR_ORA (0x1 << 5) +#define MAIR_OWA (0x1 << 4) + +#define MAIR_IWTT (0x0 << 2) +#define MAIR_INC (0x1 << 2) +#define MAIR_IWBT (0x1 << 2) +#define MAIR_IWTNT (0x2 << 2) +#define MAIR_IWBNT (0x3 << 2) +#define MAIR_IRA (0x1 << 1) +#define MAIR_IWA (0x1 << 0) /** - * Default hypervisor memory attributes - * 0 -> Device-nGnRnE - * 1 -> Normal, Inner/Outer WB/WA/RA - * 2 -> Device-nGnRE + * Default hypervisor memory attributes 0 -> Device-nGnRnE 1 -> Normal, Inner/Outer WB/WA/RA 2 -> + * Device-nGnRE */ -#define MAIR_EL2_DFLT \ - (((MAIR_OWBNT | MAIR_ORA | MAIR_OWA | MAIR_IWBNT | MAIR_IRA | MAIR_IWA) \ - << MAIR_ATTR_WIDTH) | \ - ((MAIR_DEV_nGnRE) << (MAIR_ATTR_WIDTH * 2))) +#define MAIR_EL2_DFLT \ + (((MAIR_OWBNT | MAIR_ORA | MAIR_OWA | MAIR_IWBNT | MAIR_IRA | MAIR_IWA) << MAIR_ATTR_WIDTH) | \ + ((MAIR_DEV_nGnRE) << (MAIR_ATTR_WIDTH * 2))) /* PAR - Physical Address Register */ -#define PAR_ATTR_OFF (56) -#define PAR_ATTR_MSK (0xffUL << PAR_ATTR_OFF) -#define PAR_PA_OFF (12) -#define PAR_PA_MSK (0x3ffffffUL << PAR_PA_OFF) -#define PAR_RES1 (0x1UL << 11) -#define PAR_IMPDEF (0x1UL << 10) -#define PAR_NS (0x1UL << 9) -#define PAR_SH_OFF (7) -#define PAR_SH_MSK (0x3UL << PAR_SH_OFF) -#define PAR_F (0x1UL << 0) +#define PAR_ATTR_OFF (56) +#define PAR_ATTR_MSK (0xffUL << PAR_ATTR_OFF) +#define PAR_PA_OFF (12) +#define PAR_PA_MSK (0x3ffffffUL << PAR_PA_OFF) +#define PAR_RES1 (0x1UL << 11) +#define PAR_IMPDEF (0x1UL << 10) +#define PAR_NS (0x1UL << 9) +#define PAR_SH_OFF (7) +#define PAR_SH_MSK (0x3UL << PAR_SH_OFF) +#define PAR_F (0x1UL << 0) /* SCTLR - System Control Register */ -#define SCTLR_RES1 (0x30C50830) -#define SCTLR_RES1_AARCH32 (0x30C50818) -#define SCTLR_M (1 << 0) -#define SCTLR_A (1 << 1) -#define SCTLR_C (1 << 2) -#define SCTLR_SA (1 << 3) -#define SCTLR_I (1 << 12) -#define SCTLR_BR (1 << 17) -#define SCTLR_WXN (1 << 19) -#define SCTLR_EE (1 << 25) +#define SCTLR_RES1 (0x30C50830) +#define SCTLR_RES1_AARCH32 (0x30C50818) +#define SCTLR_M (1 << 0) +#define SCTLR_A (1 << 1) +#define SCTLR_C (1 << 2) +#define SCTLR_SA (1 << 3) +#define SCTLR_I (1 << 12) +#define SCTLR_BR (1 << 17) +#define SCTLR_WXN (1 << 19) +#define SCTLR_EE (1 << 25) /* CLIDR - Cache Level ID Register */ -#define CLIDR_CTYPE_LEN (0x03) -#define CLIDR_CTYPE_NUM (0x07) -#define CLIDR_CTYPE_NO 0 -#define CLIDR_CTYPE_IO 1 -#define CLIDR_CTYPE_DO 2 -#define CLIDR_CTYPE_SP 3 -#define CLIDR_CTYPE_UN 4 +#define CLIDR_CTYPE_LEN (0x03) +#define CLIDR_CTYPE_NUM (0x07) +#define CLIDR_CTYPE_NO 0 +#define CLIDR_CTYPE_IO 1 +#define CLIDR_CTYPE_DO 2 +#define CLIDR_CTYPE_SP 3 +#define CLIDR_CTYPE_UN 4 /* CTR_EL0 - Cache Type Register */ -#define CTR_IMINLINE_OFF 0 -#define CTR_IMINLINE_LEN 4 -#define CTR_L1LP_OFF 14 -#define CTR_L1LP_LEN 2 -#define CTR_L1LP_AIVIVT (0x1UL << CTR_L1LP_OFF) -#define CTR_L1LP_VIPT (0x2UL << CTR_L1LP_OFF) -#define CTR_L1LP_PIPT (0x3UL << CTR_L1LP_OFF) -#define CTR_DMINLINE_OFF 16 -#define CTR_DMINLINE_LEN 4 -#define CTR_ERG_OFF 20 -#define CTR_ERG_LEN 4 -#define CTR_CEG_OFF 24 -#define CTR_CEG_LEN 4 -#define CTR_RES1 (1UL << 31) +#define CTR_IMINLINE_OFF 0 +#define CTR_IMINLINE_LEN 4 +#define CTR_L1LP_OFF 14 +#define CTR_L1LP_LEN 2 +#define CTR_L1LP_AIVIVT (0x1UL << CTR_L1LP_OFF) +#define CTR_L1LP_VIPT (0x2UL << CTR_L1LP_OFF) +#define CTR_L1LP_PIPT (0x3UL << CTR_L1LP_OFF) +#define CTR_DMINLINE_OFF 16 +#define CTR_DMINLINE_LEN 4 +#define CTR_ERG_OFF 20 +#define CTR_ERG_LEN 4 +#define CTR_CEG_OFF 24 +#define CTR_CEG_LEN 4 +#define CTR_RES1 (1UL << 31) /* CSSELR_EL1 - Cache Size Selection Register */ -#define CSSELR_IND_BIT 0 -#define CSSELR_LVL_OFF 1 -#define CSSELR_LVL_LEN 3 +#define CSSELR_IND_BIT 0 +#define CSSELR_LVL_OFF 1 +#define CSSELR_LVL_LEN 3 /* CCSIDR_EL1 - Current Cache Size ID Register */ -#define CCSIDR_LINESIZE_OFF 0 -#define CCSIDR_LINESIZE_LEN 3 -#define CCSIDR_ASSOCIATIVITY_OFF 3 -#define CCSIDR_ASSOCIATIVITY_LEN 10 -#define CCSIDR_NUMSETS_OFF 13 -#define CCSIDR_NUMSETS_LEN 15 +#define CCSIDR_LINESIZE_OFF 0 +#define CCSIDR_LINESIZE_LEN 3 +#define CCSIDR_ASSOCIATIVITY_OFF 3 +#define CCSIDR_ASSOCIATIVITY_LEN 10 +#define CCSIDR_NUMSETS_OFF 13 +#define CCSIDR_NUMSETS_LEN 15 /** - * Below are platform implementation registers related to a53. - * TODO: move them to a a53 specific file. + * Below are platform implementation registers related to a53. TODO: move them to a a53 specific + * file. */ /* CPUECTLR_EL1 - CPU Extended Control Register */ -#define CPUECTLR_EL1 S3_1_c15_c2_1 -#define CPUECTLR_SPEN_BIT (1UL << 6) +#define CPUECTLR_EL1 S3_1_c15_c2_1 +#define CPUECTLR_SPEN_BIT (1UL << 6) /* ACTLR - Auxiliary Control Register */ -#define ACTLR_CPUACTLR_BIT (1UL << 0) -#define ACTLR_CPUECTLR_BIT (1UL << 1) -#define ACTLR_L2CTLR_BIT (1UL << 4) -#define ACTLR_L2ECTLR_BIT (1UL << 5) -#define ACTLR_L2ACTLR_BIT (1UL << 6) +#define ACTLR_CPUACTLR_BIT (1UL << 0) +#define ACTLR_CPUECTLR_BIT (1UL << 1) +#define ACTLR_L2CTLR_BIT (1UL << 4) +#define ACTLR_L2ECTLR_BIT (1UL << 5) +#define ACTLR_L2ACTLR_BIT (1UL << 6) /* HCR_EL2 - Hypervisor Configuration Register */ -#define HCR_VM_BIT (1UL << 0) -#define HCR_SWIO_BIT (1UL << 1) -#define HCR_PTW_BIT (1UL << 2) -#define HCR_FMO_BIT (1UL << 3) -#define HCR_IMO_BIT (1UL << 4) -#define HCR_AMO_BIT (1UL << 5) -#define HCR_VF_BIT (1UL << 6) -#define HCR_VI_BIT (1UL << 7) -#define HCR_VSE_BIT (1UL << 8) -#define HCR_FB_BIT (1UL << 9) -#define HCR_BSU_BIT (1UL << 10) -#define HCR_DC_BIT (1UL << 12) -#define HCR_TWI_BIT (1UL << 13) -#define HCR_TWE_BIT (1UL << 14) -#define HCR_TID0_BIT (1UL << 15) -#define HCR_TID1_BIT (1UL << 16) -#define HCR_TID2_BIT (1UL << 17) -#define HCR_TID3_BIT (1UL << 18) -#define HCR_TSC_BIT (1UL << 19) -#define HCR_TIDCP_BIT (1UL << 20) -#define HCR_TACR_BIT (1UL << 21) -#define HCR_TSW_BIT (1UL << 22) -#define HCR_TPC_BIT (1UL << 23) -#define HCR_TPU_BIT (1UL << 24) -#define HCR_TTLB_BIT (1UL << 25) -#define HCR_TVM_BIT (1UL << 26) -#define HCR_TGE_BIT (1UL << 27) -#define HCR_TDZ_BIT (1UL << 28) -#define HCR_HCD_BIT (1UL << 29) -#define HCR_TRVM_BIT (1UL << 30) -#define HCR_RW_BIT (1UL << 31) -#define HCR_CD_BIT (1UL << 32) -#define HCR_ID_BIT (1UL << 33) -#define HCR_TERR_BIT (1UL << 36) -#define HCR_TEA_BIT (1UL << 37) -#define HCR_MIOCNCE_BIT (1UL << 38) -#define HCR_APK_BIT (1ULL << 40) -#define HCR_API_BIT (1ULL << 41) +#define HCR_VM_BIT (1UL << 0) +#define HCR_SWIO_BIT (1UL << 1) +#define HCR_PTW_BIT (1UL << 2) +#define HCR_FMO_BIT (1UL << 3) +#define HCR_IMO_BIT (1UL << 4) +#define HCR_AMO_BIT (1UL << 5) +#define HCR_VF_BIT (1UL << 6) +#define HCR_VI_BIT (1UL << 7) +#define HCR_VSE_BIT (1UL << 8) +#define HCR_FB_BIT (1UL << 9) +#define HCR_BSU_BIT (1UL << 10) +#define HCR_DC_BIT (1UL << 12) +#define HCR_TWI_BIT (1UL << 13) +#define HCR_TWE_BIT (1UL << 14) +#define HCR_TID0_BIT (1UL << 15) +#define HCR_TID1_BIT (1UL << 16) +#define HCR_TID2_BIT (1UL << 17) +#define HCR_TID3_BIT (1UL << 18) +#define HCR_TSC_BIT (1UL << 19) +#define HCR_TIDCP_BIT (1UL << 20) +#define HCR_TACR_BIT (1UL << 21) +#define HCR_TSW_BIT (1UL << 22) +#define HCR_TPC_BIT (1UL << 23) +#define HCR_TPU_BIT (1UL << 24) +#define HCR_TTLB_BIT (1UL << 25) +#define HCR_TVM_BIT (1UL << 26) +#define HCR_TGE_BIT (1UL << 27) +#define HCR_TDZ_BIT (1UL << 28) +#define HCR_HCD_BIT (1UL << 29) +#define HCR_TRVM_BIT (1UL << 30) +#define HCR_RW_BIT (1UL << 31) +#define HCR_CD_BIT (1UL << 32) +#define HCR_ID_BIT (1UL << 33) +#define HCR_TERR_BIT (1UL << 36) +#define HCR_TEA_BIT (1UL << 37) +#define HCR_MIOCNCE_BIT (1UL << 38) +#define HCR_APK_BIT (1ULL << 40) +#define HCR_API_BIT (1ULL << 41) /* ESR_ELx, Exception Syndrome Register (ELx) */ -#define ESR_ISS_OFF (0) -#define ESR_ISS_LEN (25) -#define ESR_IL_OFF (25) -#define ESR_IL_LEN (1) -#define ESR_EC_OFF (26) -#define ESR_EC_LEN (6) - -#define ESR_EC_UNKWN (0x00) -#define ESR_EC_WFIE (0x01) -#define ESR_EC_RG_32 (0x03) -#define ESR_EC_RG_64 (0x04) -#define ESR_EC_SVC32 (0x11) -#define ESR_EC_HVC32 (0x12) -#define ESR_EC_SMC32 (0x13) -#define ESR_EC_SVC64 (0x15) -#define ESR_EC_HVC64 (0x16) -#define ESR_EC_SMC64 (0x17) -#define ESR_EC_SYSRG (0x18) -#define ESR_EC_IALEL (0x20) -#define ESR_EC_IASEL (0x21) -#define ESR_EC_PCALG (0x22) -#define ESR_EC_DALEL (0x24) -#define ESR_EC_DASEL (0x25) -#define ESR_EC_SPALG (0x26) - -#define ESR_ISS_DA_DSFC_OFF (0) -#define ESR_ISS_DA_DSFC_LEN (6) -#define ESR_ISS_DA_WnR_OFF (6) -#define ESR_ISS_DA_WnR_LEN (1) -#define ESR_ISS_DA_WnR_BIT (1 << 6) -#define ESR_ISS_DA_S1PTW_OFF (7) -#define ESR_ISS_DA_S1PTW_LEN (1) -#define ESR_ISS_DA_CM_OFF (8) -#define ESR_ISS_DA_CM_LEN (1) -#define ESR_ISS_DA_EA_OFF (9) -#define ESR_ISS_DA_EA_LEN (1) -#define ESR_ISS_DA_FnV_OFF (10) -#define ESR_ISS_DA_FnV_LEN (1) -#define ESR_ISS_DA_FnV_BIT (1UL << 10) -#define ESR_ISS_DA_SET_OFF (11) -#define ESR_ISS_DA_SET_LEN (2) -#define ESR_ISS_DA_AR_OFF (14) -#define ESR_ISS_DA_AR_LEN (1) -#define ESR_ISS_DA_SF_OFF (15) -#define ESR_ISS_DA_SF_LEN (1) -#define ESR_ISS_DA_SRT_OFF (16) -#define ESR_ISS_DA_SRT_LEN (5) -#define ESR_ISS_DA_SSE_OFF (21) -#define ESR_ISS_DA_SSE_LEN (1) -#define ESR_ISS_DA_SAS_OFF (22) -#define ESR_ISS_DA_SAS_LEN (2) -#define ESR_ISS_DA_ISV_OFF (24) -#define ESR_ISS_DA_ISV_LEN (1) -#define ESR_ISS_DA_ISV_BIT (1UL << 24) - -#define ESR_ISS_DA_DSFC_CODE (0xf << 2) -#define ESR_ISS_DA_DSFC_ADDRSZ (0x0) -#define ESR_ISS_DA_DSFC_TRNSLT (0x4) -#define ESR_ISS_DA_DSFC_ACCESS (0x8) -#define ESR_ISS_DA_DSFC_PERMIS (0xC) - -#define ESR_ISS_SYSREG_ADDR ((0xfff << 10) | (0xf << 1)) -#define ESR_ISS_SYSREG_ADDR_32 (0xFFC1E) -#define ESR_ISS_SYSREG_ADDR_64 (0xF001E) -#define ESR_ISS_SYSREG_DIR (0x1) -#define ESR_ISS_SYSREG_REG_OFF (5) -#define ESR_ISS_SYSREG_REG_LEN (5) -#define ESR_ISS_SYSREG_REG2_OFF (10) -#define ESR_ISS_SYSREG_REG2_LEN (5) - -#define OP0_MRS_CP15 ((0x3)<<20) - -#define UNDEFINED_REG_ADDR (0xFFFFFFFFUL) +#define ESR_ISS_OFF (0) +#define ESR_ISS_LEN (25) +#define ESR_IL_OFF (25) +#define ESR_IL_LEN (1) +#define ESR_EC_OFF (26) +#define ESR_EC_LEN (6) + +#define ESR_EC_UNKWN (0x00) +#define ESR_EC_WFIE (0x01) +#define ESR_EC_RG_32 (0x03) +#define ESR_EC_RG_64 (0x04) +#define ESR_EC_SVC32 (0x11) +#define ESR_EC_HVC32 (0x12) +#define ESR_EC_SMC32 (0x13) +#define ESR_EC_SVC64 (0x15) +#define ESR_EC_HVC64 (0x16) +#define ESR_EC_SMC64 (0x17) +#define ESR_EC_SYSRG (0x18) +#define ESR_EC_IALEL (0x20) +#define ESR_EC_IASEL (0x21) +#define ESR_EC_PCALG (0x22) +#define ESR_EC_DALEL (0x24) +#define ESR_EC_DASEL (0x25) +#define ESR_EC_SPALG (0x26) + +#define ESR_ISS_DA_DSFC_OFF (0) +#define ESR_ISS_DA_DSFC_LEN (6) +#define ESR_ISS_DA_WnR_OFF (6) +#define ESR_ISS_DA_WnR_LEN (1) +#define ESR_ISS_DA_WnR_BIT (1 << 6) +#define ESR_ISS_DA_S1PTW_OFF (7) +#define ESR_ISS_DA_S1PTW_LEN (1) +#define ESR_ISS_DA_CM_OFF (8) +#define ESR_ISS_DA_CM_LEN (1) +#define ESR_ISS_DA_EA_OFF (9) +#define ESR_ISS_DA_EA_LEN (1) +#define ESR_ISS_DA_FnV_OFF (10) +#define ESR_ISS_DA_FnV_LEN (1) +#define ESR_ISS_DA_FnV_BIT (1UL << 10) +#define ESR_ISS_DA_SET_OFF (11) +#define ESR_ISS_DA_SET_LEN (2) +#define ESR_ISS_DA_AR_OFF (14) +#define ESR_ISS_DA_AR_LEN (1) +#define ESR_ISS_DA_SF_OFF (15) +#define ESR_ISS_DA_SF_LEN (1) +#define ESR_ISS_DA_SRT_OFF (16) +#define ESR_ISS_DA_SRT_LEN (5) +#define ESR_ISS_DA_SSE_OFF (21) +#define ESR_ISS_DA_SSE_LEN (1) +#define ESR_ISS_DA_SAS_OFF (22) +#define ESR_ISS_DA_SAS_LEN (2) +#define ESR_ISS_DA_ISV_OFF (24) +#define ESR_ISS_DA_ISV_LEN (1) +#define ESR_ISS_DA_ISV_BIT (1UL << 24) + +#define ESR_ISS_DA_DSFC_CODE (0xf << 2) +#define ESR_ISS_DA_DSFC_ADDRSZ (0x0) +#define ESR_ISS_DA_DSFC_TRNSLT (0x4) +#define ESR_ISS_DA_DSFC_ACCESS (0x8) +#define ESR_ISS_DA_DSFC_PERMIS (0xC) + +#define ESR_ISS_SYSREG_ADDR ((0xfff << 10) | (0xf << 1)) +#define ESR_ISS_SYSREG_ADDR_32 (0xFFC1E) +#define ESR_ISS_SYSREG_ADDR_64 (0xF001E) +#define ESR_ISS_SYSREG_DIR (0x1) +#define ESR_ISS_SYSREG_REG_OFF (5) +#define ESR_ISS_SYSREG_REG_LEN (5) +#define ESR_ISS_SYSREG_REG2_OFF (10) +#define ESR_ISS_SYSREG_REG2_LEN (5) + +#define OP0_MRS_CP15 ((0x3) << 20) + +#define UNDEFINED_REG_ADDR (0xFFFFFFFFUL) /* VTTBR_EL2, Virtualization Translation Table Base Register */ -#define VTTBR_VMID_OFF 48 -#define VTTBR_VMID_LEN 8 -#define VTTBR_VMID_MSK BIT64_MASK(VTTBR_VMID_OFF, VTTBR_VMID_LEN) +#define VTTBR_VMID_OFF 48 +#define VTTBR_VMID_LEN 8 +#define VTTBR_VMID_MSK BIT64_MASK(VTTBR_VMID_OFF, VTTBR_VMID_LEN) -#define CPUACTLR_EL1 S3_1_C15_C2_0 +#define CPUACTLR_EL1 S3_1_C15_C2_0 /* VSCTLR, Virtualization System Control Register */ -#define REG_LENGTH (sizeof(long)*8) -#define VSCTLR_EL2_VMID_LEN (REG_LENGTH/4) +#define REG_LENGTH (sizeof(long) * 8) +#define VSCTLR_EL2_VMID_LEN (REG_LENGTH / 4) #define VSCTLR_EL2_VMID_OFF_ADJUST (16) -#define VSCTLR_EL2_VMID_OFF (REG_LENGTH - VSCTLR_EL2_VMID_OFF_ADJUST) -#define VSCTLR_EL2_VMID_MSK BIT_MASK (VSCTLR_EL2_VMID_OFF, VSCTLR_EL2_VMID_LEN) +#define VSCTLR_EL2_VMID_OFF (REG_LENGTH - VSCTLR_EL2_VMID_OFF_ADJUST) +#define VSCTLR_EL2_VMID_MSK BIT_MASK(VSCTLR_EL2_VMID_OFF, VSCTLR_EL2_VMID_LEN) /* GICC System Register Interface Definitions */ -#define ICC_PMR_EL1 S3_0_C4_C6_0 -#define ICC_IAR0_EL1 S3_0_C12_C8_0 -#define ICC_EOIR0_EL1 S3_0_C12_C8_1 -#define ICC_HPPIR0_EL1 S3_0_C12_C8_2 -#define ICC_BPR0_EL1 S3_0_C12_C8_3 -// #define ICC_AP0R_EL1 S3_0_C12_C8_ -#define ICC_AP1R_EL1(N) S3_0_C12_C9_ ## N -#define ICC_DIR_EL1 S3_0_C12_C11_1 -#define ICC_RPR_EL1 S3_0_C12_C11_3 -#define ICC_SGI1R_EL1 S3_0_C12_C11_5 -#define ICC_ASGI1R_EL1 S3_0_C12_C11_6 -#define ICC_SGI0R_EL1 S3_0_C12_C11_7 -#define ICC_IAR1_EL1 S3_0_C12_C12_0 -#define ICC_EOIR1_EL1 S3_0_C12_C12_1 -#define ICC_HPPIR1_EL1 S3_0_C12_C12_2 -#define ICC_BPR1_EL1 S3_0_C12_C12_3 -#define ICC_CTLR_EL1 S3_0_C12_C12_4 -#define ICC_SRE_EL1 S3_0_C12_C12_5 -#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6 -#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7 -#define ICC_SRE_EL2 S3_4_C12_C9_5 -#define ICC_CTLR_EL3 S3_6_C12_C12_4 -#define ICC_SRE_EL3 S3_6_C12_C12_5 -#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7 - -#define ICC_SGI1R_CASE (0x18) -#define ICC_SGI1R_ADDR (0x3A3016) - -// #define ICH_AP0R_EL2 S3_4_C12_C8 _0-3 -// #define ICH_AP1R_EL2 S3_4_C12_C9 _0-3 -#define ICH_HCR_EL2 S3_4_C12_C11_0 -#define ICH_VTR_EL2 S3_4_C12_C11_1 -#define ICH_MISR_EL2 S3_4_C12_C11_2 -#define ICH_EISR_EL2 S3_4_C12_C11_3 -#define ICH_ELRSR_EL2 S3_4_C12_C11_5 -#define ICH_VMCR_EL2 S3_4_C12_C11_7 -#define ICH_LR0_EL2 S3_4_C12_C12_0 -#define ICH_LR1_EL2 S3_4_C12_C12_1 -#define ICH_LR2_EL2 S3_4_C12_C12_2 -#define ICH_LR3_EL2 S3_4_C12_C12_3 -#define ICH_LR4_EL2 S3_4_C12_C12_4 -#define ICH_LR5_EL2 S3_4_C12_C12_5 -#define ICH_LR6_EL2 S3_4_C12_C12_6 -#define ICH_LR7_EL2 S3_4_C12_C12_7 -#define ICH_LR8_EL2 S3_4_C12_C13_0 -#define ICH_LR9_EL2 S3_4_C12_C13_1 -#define ICH_LR10_EL2 S3_4_C12_C13_2 -#define ICH_LR11_EL2 S3_4_C12_C13_3 -#define ICH_LR12_EL2 S3_4_C12_C13_4 -#define ICH_LR13_EL2 S3_4_C12_C13_5 -#define ICH_LR14_EL2 S3_4_C12_C13_6 -#define ICH_LR15_EL2 S3_4_C12_C13_7 +#define ICC_PMR_EL1 S3_0_C4_C6_0 +#define ICC_IAR0_EL1 S3_0_C12_C8_0 +#define ICC_EOIR0_EL1 S3_0_C12_C8_1 +#define ICC_HPPIR0_EL1 S3_0_C12_C8_2 +#define ICC_BPR0_EL1 S3_0_C12_C8_3 +// #define ICC_AP0R_EL1 S3_0_C12_C8_ +#define ICC_AP1R_EL1(N) S3_0_C12_C9_##N +#define ICC_DIR_EL1 S3_0_C12_C11_1 +#define ICC_RPR_EL1 S3_0_C12_C11_3 +#define ICC_SGI1R_EL1 S3_0_C12_C11_5 +#define ICC_ASGI1R_EL1 S3_0_C12_C11_6 +#define ICC_SGI0R_EL1 S3_0_C12_C11_7 +#define ICC_IAR1_EL1 S3_0_C12_C12_0 +#define ICC_EOIR1_EL1 S3_0_C12_C12_1 +#define ICC_HPPIR1_EL1 S3_0_C12_C12_2 +#define ICC_BPR1_EL1 S3_0_C12_C12_3 +#define ICC_CTLR_EL1 S3_0_C12_C12_4 +#define ICC_SRE_EL1 S3_0_C12_C12_5 +#define ICC_IGRPEN0_EL1 S3_0_C12_C12_6 +#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7 +#define ICC_SRE_EL2 S3_4_C12_C9_5 +#define ICC_CTLR_EL3 S3_6_C12_C12_4 +#define ICC_SRE_EL3 S3_6_C12_C12_5 +#define ICC_IGRPEN1_EL3 S3_6_C12_C12_7 + +#define ICC_SGI1R_CASE (0x18) +#define ICC_SGI1R_ADDR (0x3A3016) + +// #define ICH_AP0R_EL2 S3_4_C12_C8 _0-3 #define ICH_AP1R_EL2 S3_4_C12_C9 _0-3 +#define ICH_HCR_EL2 S3_4_C12_C11_0 +#define ICH_VTR_EL2 S3_4_C12_C11_1 +#define ICH_MISR_EL2 S3_4_C12_C11_2 +#define ICH_EISR_EL2 S3_4_C12_C11_3 +#define ICH_ELRSR_EL2 S3_4_C12_C11_5 +#define ICH_VMCR_EL2 S3_4_C12_C11_7 +#define ICH_LR0_EL2 S3_4_C12_C12_0 +#define ICH_LR1_EL2 S3_4_C12_C12_1 +#define ICH_LR2_EL2 S3_4_C12_C12_2 +#define ICH_LR3_EL2 S3_4_C12_C12_3 +#define ICH_LR4_EL2 S3_4_C12_C12_4 +#define ICH_LR5_EL2 S3_4_C12_C12_5 +#define ICH_LR6_EL2 S3_4_C12_C12_6 +#define ICH_LR7_EL2 S3_4_C12_C12_7 +#define ICH_LR8_EL2 S3_4_C12_C13_0 +#define ICH_LR9_EL2 S3_4_C12_C13_1 +#define ICH_LR10_EL2 S3_4_C12_C13_2 +#define ICH_LR11_EL2 S3_4_C12_C13_3 +#define ICH_LR12_EL2 S3_4_C12_C13_4 +#define ICH_LR13_EL2 S3_4_C12_C13_5 +#define ICH_LR14_EL2 S3_4_C12_C13_6 +#define ICH_LR15_EL2 S3_4_C12_C13_7 /* MPU registers */ -#define MPUIR_REGION_MSK (0xFFUL) -#define MPUIR_REGION(MPUIR) ((MPUIR) & MPUIR_REGION_MSK) +#define MPUIR_REGION_MSK (0xFFUL) +#define MPUIR_REGION(MPUIR) ((MPUIR) & MPUIR_REGION_MSK) #ifdef AARCH64 -#define PRBAR_ADJUST_SHIFT 1 +#define PRBAR_ADJUST_SHIFT 1 #else -#define PRBAR_ADJUST_SHIFT 0 +#define PRBAR_ADJUST_SHIFT 0 #endif -#define PRBAR_XN ((1UL << 0) << PRBAR_ADJUST_SHIFT) -#define PRBAR_NWR_BIT ((1UL << 2) << PRBAR_ADJUST_SHIFT) -#define PRBAR_EL1_BIT ((1UL << 1) << PRBAR_ADJUST_SHIFT) -#define PRBAR_AP_RW_EL2 ((0 << 1) << PRBAR_ADJUST_SHIFT) -#define PRBAR_AP_RW_EL1_EL2 ((1UL << 1) << PRBAR_ADJUST_SHIFT) -#define PRBAR_AP_RO_EL2 ((2UL << 1) << PRBAR_ADJUST_SHIFT) -#define PRBAR_AP_RO_EL1_EL2 ((3UL << 1) << PRBAR_ADJUST_SHIFT) -#define PRCBR_SH_OFF (3 + PRBAR_ADJUST_SHIFT) -#define PRBAR_SH_NS (0 << PRCBR_SH_OFF) -#define PRBAR_SH_OS (2UL << PRCBR_SH_OFF) -#define PRBAR_SH_IS (3UL << PRCBR_SH_OFF) -#define PRBAR_FLAGS_MSK (0x3FUL) -#define PRBAR_FLAGS(PRBAR) ((PRBAR) & PRBAR_FLAGS_MSK) -#define PRBAR_BASE_MSK (~PRBAR_FLAGS_MSK) -#define PRBAR_BASE(BASE) ((BASE) & PRBAR_BASE_MSK) -#define PRBAR_MEM_ATTR_FLAGS_MSK ((0x18UL) << PRBAR_ADJUST_SHIFT) -#define PRBAR_PERMS_FLAGS_MSK ((1 << PRCBR_SH_OFF) - 1) - -#define PRLAR_EN (0x1UL) -#define PRLAR_ATTR_OFF (1) -#define PRLAR_ATTR_MSK (0x3UL << PRLAR_ATTR_OFF) -#define PRLAR_ATTR(N) (((N) << PRLAR_ATTR_OFF) & PRLAR_ATTR_MSK) -#define PRLAR_FLAGS_MSK (0x3FUL) -#define PRLAR_FLAGS(PRLAR) ((PRLAR) & PRLAR_FLAGS_MSK) -#define PRLAR_LIMIT_MSK (~PRBAR_FLAGS_MSK) -#define PRLAR_LIMIT(LIMIT) (((LIMIT) & PRLAR_LIMIT_MSK) | 0x3FUL) -#define PRLAR_MEM_ATTR_FLAGS_MSK (0x0EUL) +#define PRBAR_XN ((1UL << 0) << PRBAR_ADJUST_SHIFT) +#define PRBAR_NWR_BIT ((1UL << 2) << PRBAR_ADJUST_SHIFT) +#define PRBAR_EL1_BIT ((1UL << 1) << PRBAR_ADJUST_SHIFT) +#define PRBAR_AP_RW_EL2 ((0 << 1) << PRBAR_ADJUST_SHIFT) +#define PRBAR_AP_RW_EL1_EL2 ((1UL << 1) << PRBAR_ADJUST_SHIFT) +#define PRBAR_AP_RO_EL2 ((2UL << 1) << PRBAR_ADJUST_SHIFT) +#define PRBAR_AP_RO_EL1_EL2 ((3UL << 1) << PRBAR_ADJUST_SHIFT) +#define PRCBR_SH_OFF (3 + PRBAR_ADJUST_SHIFT) +#define PRBAR_SH_NS (0 << PRCBR_SH_OFF) +#define PRBAR_SH_OS (2UL << PRCBR_SH_OFF) +#define PRBAR_SH_IS (3UL << PRCBR_SH_OFF) +#define PRBAR_FLAGS_MSK (0x3FUL) +#define PRBAR_FLAGS(PRBAR) ((PRBAR) & PRBAR_FLAGS_MSK) +#define PRBAR_BASE_MSK (~PRBAR_FLAGS_MSK) +#define PRBAR_BASE(BASE) ((BASE) & PRBAR_BASE_MSK) +#define PRBAR_MEM_ATTR_FLAGS_MSK ((0x18UL) << PRBAR_ADJUST_SHIFT) +#define PRBAR_PERMS_FLAGS_MSK ((1 << PRCBR_SH_OFF) - 1) + +#define PRLAR_EN (0x1UL) +#define PRLAR_ATTR_OFF (1) +#define PRLAR_ATTR_MSK (0x3UL << PRLAR_ATTR_OFF) +#define PRLAR_ATTR(N) (((N) << PRLAR_ATTR_OFF) & PRLAR_ATTR_MSK) +#define PRLAR_FLAGS_MSK (0x3FUL) +#define PRLAR_FLAGS(PRLAR) ((PRLAR) & PRLAR_FLAGS_MSK) +#define PRLAR_LIMIT_MSK (~PRBAR_FLAGS_MSK) +#define PRLAR_LIMIT(LIMIT) (((LIMIT) & PRLAR_LIMIT_MSK) | 0x3FUL) +#define PRLAR_MEM_ATTR_FLAGS_MSK (0x0EUL) #ifndef __ASSEMBLER__ -#define SYSREG_ENC_ADDR(Op0, Op1, CRn, CRm, Op2) \ - ((((Op0) & 0x3) << 20) | \ - (((Op2) & 0x7) << 17) | \ - (((Op1) & 0x7) << 14) | \ - (((CRn) & 0xf) << 10) | \ - (((CRm) & 0xf) << 1)) +#define SYSREG_ENC_ADDR(Op0, Op1, CRn, CRm, Op2) \ + ((((Op0) & 0x3) << 20) | (((Op2) & 0x7) << 17) | (((Op1) & 0x7) << 14) | \ + (((CRn) & 0xf) << 10) | (((CRm) & 0xf) << 1)) #endif /* |__ASSEMBLER__ */ diff --git a/src/arch/armv8/inc/arch/vgic.h b/src/arch/armv8/inc/arch/vgic.h index e8680ff26..cc623866c 100644 --- a/src/arch/armv8/inc/arch/vgic.h +++ b/src/arch/armv8/inc/arch/vgic.h @@ -19,7 +19,7 @@ struct vgic_dscrp; */ struct vgic_int { node_t node; - struct vcpu *owner; + struct vcpu* owner; #if (GIC_VERSION != GICV2) unsigned long route; union { @@ -48,7 +48,7 @@ struct vgic_int { }; struct vgicd { - struct vgic_int *interrupts; + struct vgic_int* interrupts; spinlock_t lock; size_t int_num; uint32_t CTLR; @@ -71,11 +71,11 @@ struct vgic_priv { struct vgic_int interrupts[GIC_CPU_PRIV]; }; -void vgic_init(struct vm *vm, const struct vgic_dscrp *vgic_dscrp); -void vgic_cpu_init(struct vcpu *vcpu); -void vgic_set_hw(struct vm *vm, irqid_t id); -void vgic_inject(struct vcpu *vcpu, irqid_t id, vcpuid_t source); -void vgic_inject_hw(struct vcpu *vcpu, irqid_t id); +void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp); +void vgic_cpu_init(struct vcpu* vcpu); +void vgic_set_hw(struct vm* vm, irqid_t id); +void vgic_inject(struct vcpu* vcpu, irqid_t id, vcpuid_t source); +void vgic_inject_hw(struct vcpu* vcpu, irqid_t id); /* VGIC INTERNALS */ @@ -94,39 +94,36 @@ enum vgic_reg_handler_info_id { }; struct vgic_reg_handler_info { - void (*reg_access)(struct emul_access *, struct vgic_reg_handler_info *, - bool gicr_accces, cpuid_t vgicr_id); + void (*reg_access)(struct emul_access*, struct vgic_reg_handler_info*, bool gicr_accces, + cpuid_t vgicr_id); size_t alignment; size_t regid; vaddr_t regroup_base; size_t field_width; - unsigned long (*read_field)(struct vcpu *, struct vgic_int *); - bool (*update_field)(struct vcpu *, struct vgic_int *, unsigned long data); - void (*update_hw)(struct vcpu *, struct vgic_int *); + unsigned long (*read_field)(struct vcpu*, struct vgic_int*); + bool (*update_field)(struct vcpu*, struct vgic_int*, unsigned long data); + void (*update_hw)(struct vcpu*, struct vgic_int*); }; /* interface for version agnostic vgic */ -bool vgicd_emul_handler(struct emul_access *); -bool vgic_check_reg_alignment(struct emul_access *acc, - struct vgic_reg_handler_info *handlers); -bool vgic_add_lr(struct vcpu *vcpu, struct vgic_int *interrupt); -bool vgic_remove_lr(struct vcpu *vcpu, struct vgic_int *interrupt); -bool vgic_get_ownership(struct vcpu *vcpu, struct vgic_int *interrupt); -void vgic_yield_ownership(struct vcpu *vcpu, struct vgic_int *interrupt); -void vgic_emul_generic_access(struct emul_access *, struct vgic_reg_handler_info *, - bool, vcpuid_t); -void vgic_send_sgi_msg(struct vcpu *vcpu, cpumap_t pcpu_mask, irqid_t int_id); -size_t vgic_get_itln(const struct vgic_dscrp *vgic_dscrp); -struct vgic_int *vgic_get_int(struct vcpu *vcpu, irqid_t int_id, - vcpuid_t vgicr_id); -void vgic_int_set_field(struct vgic_reg_handler_info *handlers, struct vcpu *vcpu, - struct vgic_int *interrupt, unsigned long data); -void vgic_emul_razwi(struct emul_access *acc, struct vgic_reg_handler_info *handlers, - bool gicr_access, cpuid_t vgicr_id); +bool vgicd_emul_handler(struct emul_access*); +bool vgic_check_reg_alignment(struct emul_access* acc, struct vgic_reg_handler_info* handlers); +bool vgic_add_lr(struct vcpu* vcpu, struct vgic_int* interrupt); +bool vgic_remove_lr(struct vcpu* vcpu, struct vgic_int* interrupt); +bool vgic_get_ownership(struct vcpu* vcpu, struct vgic_int* interrupt); +void vgic_yield_ownership(struct vcpu* vcpu, struct vgic_int* interrupt); +void vgic_emul_generic_access(struct emul_access*, struct vgic_reg_handler_info*, bool, vcpuid_t); +void vgic_send_sgi_msg(struct vcpu* vcpu, cpumap_t pcpu_mask, irqid_t int_id); +size_t vgic_get_itln(const struct vgic_dscrp* vgic_dscrp); +struct vgic_int* vgic_get_int(struct vcpu* vcpu, irqid_t int_id, vcpuid_t vgicr_id); +void vgic_int_set_field(struct vgic_reg_handler_info* handlers, struct vcpu* vcpu, + struct vgic_int* interrupt, unsigned long data); +void vgic_emul_razwi(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, cpuid_t vgicr_id); /* interface for version specific vgic */ -bool vgic_int_has_other_target(struct vcpu *vcpu, struct vgic_int *interrupt); -uint8_t vgic_int_ptarget_mask(struct vcpu *vcpu, struct vgic_int *interrupt); -void vgic_inject_sgi(struct vcpu *vcpu, struct vgic_int *interrupt, vcpuid_t source); +bool vgic_int_has_other_target(struct vcpu* vcpu, struct vgic_int* interrupt); +uint8_t vgic_int_ptarget_mask(struct vcpu* vcpu, struct vgic_int* interrupt); +void vgic_inject_sgi(struct vcpu* vcpu, struct vgic_int* interrupt, vcpuid_t source); #endif /* __VGIC_H__ */ diff --git a/src/arch/armv8/inc/arch/vgicv2.h b/src/arch/armv8/inc/arch/vgicv2.h index 2d05f428a..8f1a22009 100644 --- a/src/arch/armv8/inc/arch/vgicv2.h +++ b/src/arch/armv8/inc/arch/vgicv2.h @@ -9,7 +9,7 @@ #include #include -static inline bool vgic_int_vcpu_is_target(struct vcpu *vcpu, struct vgic_int *interrupt) +static inline bool vgic_int_vcpu_is_target(struct vcpu* vcpu, struct vgic_int* interrupt) { bool priv = gic_is_priv(interrupt->id); bool target = interrupt->targets & (1 << vcpu->phys_id); diff --git a/src/arch/armv8/inc/arch/vgicv3.h b/src/arch/armv8/inc/arch/vgicv3.h index 7dd3e8dff..39b0786e6 100644 --- a/src/arch/armv8/inc/arch/vgicv3.h +++ b/src/arch/armv8/inc/arch/vgicv3.h @@ -9,12 +9,12 @@ #include #include -static inline bool vgic_broadcast(struct vcpu *vcpu, struct vgic_int *interrupt) +static inline bool vgic_broadcast(struct vcpu* vcpu, struct vgic_int* interrupt) { return (interrupt->route & GICD_IROUTER_IRM_BIT); } -static inline bool vgic_int_vcpu_is_target(struct vcpu *vcpu, struct vgic_int *interrupt) +static inline bool vgic_int_vcpu_is_target(struct vcpu* vcpu, struct vgic_int* interrupt) { bool priv = gic_is_priv(interrupt->id); bool local = priv && (interrupt->phys.redist == vcpu->phys_id); @@ -24,5 +24,4 @@ static inline bool vgic_int_vcpu_is_target(struct vcpu *vcpu, struct vgic_int *i return local || routed_here || any; } - #endif /* __VGICV3_H__ */ diff --git a/src/arch/armv8/inc/arch/vm.h b/src/arch/armv8/inc/arch/vm.h index 4a636351a..bbd18b7af 100644 --- a/src/arch/armv8/inc/arch/vm.h +++ b/src/arch/armv8/inc/arch/vm.h @@ -10,7 +10,7 @@ #include #include #include -#ifdef MEM_PROT_MMU +#ifdef MEM_PROT_MMU #include #endif #include @@ -23,14 +23,14 @@ struct arch_vm_platform { size_t interrupt_num; } gic; -#ifdef MEM_PROT_MMU +#ifdef MEM_PROT_MMU struct { streamid_t global_mask; size_t group_num; struct smmu_group { streamid_t mask; streamid_t id; - } *groups; + }* groups; } smmu; #endif }; diff --git a/src/arch/armv8/interrupts.c b/src/arch/armv8/interrupts.c index 68f95aca6..13a614310 100644 --- a/src/arch/armv8/interrupts.c +++ b/src/arch/armv8/interrupts.c @@ -25,7 +25,9 @@ void interrupts_arch_init() void interrupts_arch_ipi_send(cpuid_t target_cpu, irqid_t ipi_id) { - if (ipi_id < GIC_MAX_SGIS) gic_send_sgi(target_cpu, ipi_id); + if (ipi_id < GIC_MAX_SGIS) { + gic_send_sgi(target_cpu, ipi_id); + } } void interrupts_arch_enable(irqid_t int_id, bool en) @@ -55,7 +57,7 @@ void interrupts_arch_clear(irqid_t int_id) gic_set_pend(int_id, false); } -void interrupts_arch_vm_assign(struct vm *vm, irqid_t id) +void interrupts_arch_vm_assign(struct vm* vm, irqid_t id) { vgic_set_hw(vm, id); } diff --git a/src/arch/armv8/platform.c b/src/arch/armv8/platform.c index 130fc7dc3..0d751da09 100644 --- a/src/arch/armv8/platform.c +++ b/src/arch/armv8/platform.c @@ -6,11 +6,10 @@ #include #include -unsigned long platform_arch_cpuid_to_mpidr(const struct platform* plat, - cpuid_t cpuid) +unsigned long platform_arch_cpuid_to_mpidr(const struct platform* plat, cpuid_t cpuid) { if (cpuid > plat->cpu_num) { - return ~(~MPIDR_RES1 & MPIDR_RES0_MSK); //return an invlid mpidr by inverting res bits + return ~(~MPIDR_RES1 & MPIDR_RES0_MSK); // return an invlid mpidr by inverting res bits } unsigned long mpidr = 0; diff --git a/src/arch/armv8/psci.c b/src/arch/armv8/psci.c index 08a3cfe46..ccb04c4ea 100644 --- a/src/arch/armv8/psci.c +++ b/src/arch/armv8/psci.c @@ -12,21 +12,21 @@ #include #include -enum {PSCI_MSG_ON}; +enum { PSCI_MSG_ON }; /* -------------------------------- - SMC Trapping + SMC Trapping --------------------------------- */ -void psci_wake_from_off() { - - if(cpu()->vcpu == NULL){ +void psci_wake_from_off() +{ + if (cpu()->vcpu == NULL) { return; } /* update vcpu()->psci_ctx */ spin_lock(&cpu()->vcpu->arch.psci_ctx.lock); - if(cpu()->vcpu->arch.psci_ctx.state == ON_PENDING){ + if (cpu()->vcpu->arch.psci_ctx.state == ON_PENDING) { vcpu_arch_reset(cpu()->vcpu, cpu()->vcpu->arch.psci_ctx.entrypoint); cpu()->vcpu->arch.psci_ctx.state = ON; vcpu_writereg(cpu()->vcpu, 0, cpu()->vcpu->arch.psci_ctx.context_id); @@ -34,45 +34,42 @@ void psci_wake_from_off() { spin_unlock(&cpu()->vcpu->arch.psci_ctx.lock); } -void psci_cpumsg_handler(uint32_t event, uint64_t data){ - - switch(event){ +void psci_cpumsg_handler(uint32_t event, uint64_t data) +{ + switch (event) { case PSCI_MSG_ON: psci_wake_from_off(); - break; + break; } } CPU_MSG_HANDLER(psci_cpumsg_handler, PSCI_CPUMSG_ID); -int32_t psci_cpu_suspend_handler(uint32_t power_state, unsigned long entrypoint, - unsigned long context_id) +int32_t psci_cpu_suspend_handler(uint32_t power_state, unsigned long entrypoint, + unsigned long context_id) { /** - * !! Ignoring the rest of the requested powerstate for now. - * This might be a problem howwver since powerlevel and stateid are - * implementation defined. - */ + * !! Ignoring the rest of the requested powerstate for now. This might be a problem howwver + * since powerlevel and stateid are implementation defined. + */ uint32_t state_type = power_state & PSCI_STATE_TYPE_BIT; int32_t ret; - if(state_type){ - //PSCI_STATE_TYPE_POWERDOWN: + if (state_type) { + // PSCI_STATE_TYPE_POWERDOWN: spin_lock(&cpu()->vcpu->arch.psci_ctx.lock); cpu()->vcpu->arch.psci_ctx.entrypoint = entrypoint; cpu()->vcpu->arch.psci_ctx.context_id = context_id; spin_unlock(&cpu()->vcpu->arch.psci_ctx.lock); ret = psci_power_down(PSCI_WAKEUP_POWERDOWN); } else { - //PSCI_STATE_TYPE_STANDBY: + // PSCI_STATE_TYPE_STANDBY: /** - * TODO: ideally we would emmit a standby request to PSCI - * (currently, ATF), but when we do, we do not wake up on interrupts - * on the current development target zcu104. - * We should understand why. To circunvent this, we directly emmit a - * wfi + * TODO: ideally we would emmit a standby request to PSCI (currently, ATF), but when we + * do, we do not wake up on interrupts on the current development target zcu104. We should + * understand why. To circunvent this, we directly emmit a wfi */ - //ret = psci_standby(); + // ret = psci_standby(); asm volatile("wfi\n\r"); ret = PSCI_E_SUCCESS; } @@ -80,13 +77,11 @@ int32_t psci_cpu_suspend_handler(uint32_t power_state, unsigned long entrypoint, return ret; } - int32_t psci_cpu_off_handler(void) { /** - * Right now we only support one vcpu por cpu, so passthrough the request - * directly to the monitor psci implementation. Later another vcpu, will - * call cpu_on on this vcpu()-> + * Right now we only support one vcpu por cpu, so passthrough the request directly to the + * monitor psci implementation. Later another vcpu, will call cpu_on on this vcpu()-> */ spin_lock(&cpu()->vcpu->arch.psci_ctx.lock); @@ -103,26 +98,25 @@ int32_t psci_cpu_off_handler(void) } int32_t psci_cpu_on_handler(unsigned long target_cpu, unsigned long entrypoint, - unsigned long context_id) + unsigned long context_id) { int32_t ret; struct vm* vm = cpu()->vcpu->vm; struct vcpu* target_vcpu = vm_get_vcpu_by_mpidr(vm, target_cpu); - if (target_vcpu != NULL){ - + if (target_vcpu != NULL) { bool already_on = true; spin_lock(&cpu()->vcpu->arch.psci_ctx.lock); - if(target_vcpu->arch.psci_ctx.state == OFF){ + if (target_vcpu->arch.psci_ctx.state == OFF) { target_vcpu->arch.psci_ctx.state = ON_PENDING; target_vcpu->arch.psci_ctx.entrypoint = entrypoint; target_vcpu->arch.psci_ctx.context_id = context_id; fence_sync_write(); already_on = false; - } + } spin_unlock(&cpu()->vcpu->arch.psci_ctx.lock); - if(already_on){ + if (already_on) { return PSCI_E_ALREADY_ON; } @@ -130,7 +124,7 @@ int32_t psci_cpu_on_handler(unsigned long target_cpu, unsigned long entrypoint, if (pcpuid == INVALID_CPUID) { ret = PSCI_E_INVALID_PARAMS; } else { - struct cpu_msg msg = {PSCI_CPUMSG_ID, PSCI_MSG_ON}; + struct cpu_msg msg = { PSCI_CPUMSG_ID, PSCI_MSG_ON }; cpu_send_msg(pcpuid, &msg); ret = PSCI_E_SUCCESS; } @@ -142,19 +136,16 @@ int32_t psci_cpu_on_handler(unsigned long target_cpu, unsigned long entrypoint, return ret; } - -int32_t psci_affinity_info_handler(unsigned long target_affinity, - uint32_t lowest_affinity_level) +int32_t psci_affinity_info_handler(unsigned long target_affinity, uint32_t lowest_affinity_level) { - /* return ON, if at least one core in the affinity instance: has been - enabled with a call to CPU_ON, and that core has not called CPU_OFF */ + /* return ON, if at least one core in the affinity instance: has been enabled with a call to + CPU_ON, and that core has not called CPU_OFF */ - /* return off if all of the cores in the affinity instance have called - CPU_OFF and each of these calls has been processed by the PSCI - implementation. */ + /* return off if all of the cores in the affinity instance have called CPU_OFF and each of + these calls has been processed by the PSCI implementation. */ - /* return ON_PENDING if at least one core in the affinity instance is in - the ON_PENDING state */ + /* return ON_PENDING if at least one core in the affinity instance is in the ON_PENDING state + */ /** * TODO @@ -163,8 +154,8 @@ int32_t psci_affinity_info_handler(unsigned long target_affinity, return 0; } -int32_t psci_features_handler(uint32_t feature_id){ - +int32_t psci_features_handler(uint32_t feature_id) +{ int32_t ret = PSCI_E_NOT_SUPPORTED; switch (feature_id) { @@ -179,16 +170,14 @@ int32_t psci_features_handler(uint32_t feature_id){ case PSCI_FEATURES: ret = PSCI_E_SUCCESS; break; - } + } return ret; } - -int32_t psci_smc_handler(uint32_t smc_fid, unsigned long x1, unsigned long x2, - unsigned long x3) +int32_t psci_smc_handler(uint32_t smc_fid, unsigned long x1, unsigned long x2, unsigned long x3) { - int32_t ret = PSCI_E_NOT_SUPPORTED; + int32_t ret = PSCI_E_NOT_SUPPORTED; switch (smc_fid) { case PSCI_VERSION: diff --git a/src/arch/armv8/vgic.c b/src/arch/armv8/vgic.c index 0db52a425..b306a06b0 100644 --- a/src/arch/armv8/vgic.c +++ b/src/arch/armv8/vgic.c @@ -11,7 +11,7 @@ #elif (GIC_VERSION == GICV3) #include #include -#else +#else #error "unknown GIV version " GIC_VERSION #endif @@ -25,31 +25,29 @@ enum VGIC_EVENTS { VGIC_UPDATE_ENABLE, VGIC_ROUTE, VGIC_INJECT, VGIC_SET_REG }; extern volatile const size_t VGIC_IPI_ID; -#define GICD_IS_REG(REG, offset) \ +#define GICD_IS_REG(REG, offset) \ (((offset) >= offsetof(struct gicd_hw, REG)) && \ - (offset) < (offsetof(struct gicd_hw, REG) + sizeof(gicd->REG))) + (offset) < (offsetof(struct gicd_hw, REG) + sizeof(gicd->REG))) #define GICD_REG_GROUP(REG) ((offsetof(struct gicd_hw, REG) & 0xff80) >> 7) -#define GICD_REG_MASK(ADDR) ((ADDR)&(GIC_VERSION == GICV2 ? 0xfffUL : 0xffffUL)) -#define GICD_REG_IND(REG) (offsetof(struct gicd_hw, REG) & 0x7f) +#define GICD_REG_MASK(ADDR) ((ADDR) & (GIC_VERSION == GICV2 ? 0xfffUL : 0xffffUL)) +#define GICD_REG_IND(REG) (offsetof(struct gicd_hw, REG) & 0x7f) -#define VGIC_MSG_DATA(VM_ID, VGICRID, INT_ID, REG, VAL) \ - (((uint64_t)(VM_ID) << 48) | (((uint64_t)(VGICRID)&0xffff) << 32) | \ - (((INT_ID)&0x7fff) << 16) | (((REG)&0xff) << 8) | ((VAL)&0xff)) -#define VGIC_MSG_VM(DATA) ((DATA) >> 48) +#define VGIC_MSG_DATA(VM_ID, VGICRID, INT_ID, REG, VAL) \ + (((uint64_t)(VM_ID) << 48) | (((uint64_t)(VGICRID) & 0xffff) << 32) | \ + (((INT_ID) & 0x7fff) << 16) | (((REG) & 0xff) << 8) | ((VAL) & 0xff)) +#define VGIC_MSG_VM(DATA) ((DATA) >> 48) #define VGIC_MSG_VGICRID(DATA) (((DATA) >> 32) & 0xffff) -#define VGIC_MSG_INTID(DATA) (((DATA) >> 16) & 0x7fff) -#define VGIC_MSG_REG(DATA) (((DATA) >> 8) & 0xff) -#define VGIC_MSG_VAL(DATA) ((DATA)&0xff) +#define VGIC_MSG_INTID(DATA) (((DATA) >> 16) & 0x7fff) +#define VGIC_MSG_REG(DATA) (((DATA) >> 8) & 0xff) +#define VGIC_MSG_VAL(DATA) ((DATA) & 0xff) void vgic_ipi_handler(uint32_t event, uint64_t data); CPU_MSG_HANDLER(vgic_ipi_handler, VGIC_IPI_ID); -inline struct vgic_int *vgic_get_int(struct vcpu *vcpu, irqid_t int_id, - vcpuid_t vgicr_id) +inline struct vgic_int* vgic_get_int(struct vcpu* vcpu, irqid_t int_id, vcpuid_t vgicr_id) { if (int_id < GIC_CPU_PRIV) { - struct vcpu *target_vcpu = - vgicr_id == vcpu->id ? vcpu : vm_get_vcpu(vcpu->vm, vgicr_id); + struct vcpu* target_vcpu = vgicr_id == vcpu->id ? vcpu : vm_get_vcpu(vcpu->vm, vgicr_id); return &target_vcpu->arch.vgic_priv.interrupts[int_id]; } else if (int_id < vcpu->vm->arch.vgicd.int_num) { return &vcpu->vm->arch.vgicd.interrupts[int_id - GIC_CPU_PRIV]; @@ -58,28 +56,29 @@ inline struct vgic_int *vgic_get_int(struct vcpu *vcpu, irqid_t int_id, return NULL; } -static inline bool vgic_int_is_hw(struct vgic_int *interrupt) +static inline bool vgic_int_is_hw(struct vgic_int* interrupt) { return !(interrupt->id < GIC_MAX_SGIS) && interrupt->hw; } -static inline int64_t gich_get_lr(struct vgic_int *interrupt, unsigned long *lr) +static inline int64_t gich_get_lr(struct vgic_int* interrupt, unsigned long* lr) { if (!interrupt->in_lr || interrupt->owner->phys_id != cpu()->id) { return -1; } unsigned long lr_val = gich_read_lr(interrupt->lr); - if ((GICH_LR_VID(lr_val) == interrupt->id) && - (GICH_LR_STATE(lr_val) != INV)) { - if (lr != NULL) *lr = lr_val; + if ((GICH_LR_VID(lr_val) == interrupt->id) && (GICH_LR_STATE(lr_val) != INV)) { + if (lr != NULL) { + *lr = lr_val; + } return interrupt->lr; } return -1; } -static inline uint8_t vgic_get_state(struct vgic_int *interrupt) +static inline uint8_t vgic_get_state(struct vgic_int* interrupt) { uint8_t state = 0; @@ -99,7 +98,7 @@ static inline uint8_t vgic_get_state(struct vgic_int *interrupt) return state; } -bool vgic_get_ownership(struct vcpu *vcpu, struct vgic_int *interrupt) +bool vgic_get_ownership(struct vcpu* vcpu, struct vgic_int* interrupt) { bool ret = false; @@ -113,27 +112,28 @@ bool vgic_get_ownership(struct vcpu *vcpu, struct vgic_int *interrupt) return ret; } -bool vgic_owns(struct vcpu *vcpu, struct vgic_int *interrupt) +bool vgic_owns(struct vcpu* vcpu, struct vgic_int* interrupt) { return interrupt->owner == vcpu; } -void vgic_yield_ownership(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_yield_ownership(struct vcpu* vcpu, struct vgic_int* interrupt) { - if ((GIC_VERSION == GICV2 && gic_is_priv(interrupt->id)) || - !vgic_owns(vcpu, interrupt) || interrupt->in_lr || - (vgic_get_state(interrupt) & ACT)) { + if ((GIC_VERSION == GICV2 && gic_is_priv(interrupt->id)) || !vgic_owns(vcpu, interrupt) || + interrupt->in_lr || (vgic_get_state(interrupt) & ACT)) { return; } interrupt->owner = NULL; } -void vgic_send_sgi_msg(struct vcpu *vcpu, cpumap_t pcpu_mask, irqid_t int_id) +void vgic_send_sgi_msg(struct vcpu* vcpu, cpumap_t pcpu_mask, irqid_t int_id) { struct cpu_msg msg = { - VGIC_IPI_ID, VGIC_INJECT, - VGIC_MSG_DATA(cpu()->vcpu->vm->id, 0, int_id, 0, cpu()->vcpu->id)}; + VGIC_IPI_ID, + VGIC_INJECT, + VGIC_MSG_DATA(cpu()->vcpu->vm->id, 0, int_id, 0, cpu()->vcpu->id), + }; for (size_t i = 0; i < platform.cpu_num; i++) { if (pcpu_mask & (1ull << i)) { @@ -142,7 +142,7 @@ void vgic_send_sgi_msg(struct vcpu *vcpu, cpumap_t pcpu_mask, irqid_t int_id) } } -void vgic_route(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_route(struct vcpu* vcpu, struct vgic_int* interrupt) { if ((interrupt->state == INV) || !interrupt->enabled) { return; @@ -154,11 +154,12 @@ void vgic_route(struct vcpu *vcpu, struct vgic_int *interrupt) if (!interrupt->in_lr && vgic_int_has_other_target(vcpu, interrupt)) { struct cpu_msg msg = { - VGIC_IPI_ID, VGIC_ROUTE, - VGIC_MSG_DATA(vcpu->vm->id, vcpu->id, interrupt->id, 0, 0)}; + VGIC_IPI_ID, + VGIC_ROUTE, + VGIC_MSG_DATA(vcpu->vm->id, vcpu->id, interrupt->id, 0, 0), + }; vgic_yield_ownership(vcpu, interrupt); - cpumap_t trgtlist = - vgic_int_ptarget_mask(vcpu, interrupt) & ~(1ull << vcpu->phys_id); + cpumap_t trgtlist = vgic_int_ptarget_mask(vcpu, interrupt) & ~(1ull << vcpu->phys_id); for (size_t i = 0; i < platform.cpu_num; i++) { if (trgtlist & (1ull << i)) { cpu_send_msg(i, &msg); @@ -167,13 +168,12 @@ void vgic_route(struct vcpu *vcpu, struct vgic_int *interrupt) } } -static inline void vgic_write_lr(struct vcpu *vcpu, struct vgic_int *interrupt, - size_t lr_ind) +static inline void vgic_write_lr(struct vcpu* vcpu, struct vgic_int* interrupt, size_t lr_ind) { irqid_t prev_int_id = vcpu->arch.vgic_priv.curr_lrs[lr_ind]; if ((prev_int_id != interrupt->id) && !gic_is_priv(prev_int_id)) { - struct vgic_int *prev_interrupt = vgic_get_int(vcpu, prev_int_id, vcpu->id); + struct vgic_int* prev_interrupt = vgic_get_int(vcpu, prev_int_id, vcpu->id); if (prev_interrupt != NULL) { spin_lock(&prev_interrupt->lock); if (vgic_owns(vcpu, prev_interrupt) && prev_interrupt->in_lr && @@ -190,11 +190,9 @@ static inline void vgic_write_lr(struct vcpu *vcpu, struct vgic_int *interrupt, gic_lr_t lr = ((interrupt->id << GICH_LR_VID_OFF) & GICH_LR_VID_MSK); #if (GIC_VERSION == GICV2) - lr |= (((gic_lr_t)interrupt->prio >> 3) << GICH_LR_PRIO_OFF) & - GICH_LR_PRIO_MSK; + lr |= (((gic_lr_t)interrupt->prio >> 3) << GICH_LR_PRIO_OFF) & GICH_LR_PRIO_MSK; #else - lr |= (((gic_lr_t)interrupt->prio << GICH_LR_PRIO_OFF) & GICH_LR_PRIO_MSK) | - GICH_LR_GRP_BIT; + lr |= (((gic_lr_t)interrupt->prio << GICH_LR_PRIO_OFF) & GICH_LR_PRIO_MSK) | GICH_LR_GRP_BIT; #endif if (vgic_int_is_hw(interrupt)) { @@ -244,7 +242,7 @@ static inline void vgic_write_lr(struct vcpu *vcpu, struct vgic_int *interrupt, gich_write_lr(lr_ind, lr); } -bool vgic_remove_lr(struct vcpu *vcpu, struct vgic_int *interrupt) +bool vgic_remove_lr(struct vcpu* vcpu, struct vgic_int* interrupt) { bool ret = false; @@ -283,9 +281,10 @@ bool vgic_remove_lr(struct vcpu *vcpu, struct vgic_int *interrupt) return ret; } -void vgic_add_spilled(struct vcpu *vcpu, struct vgic_int* interrupt) { +void vgic_add_spilled(struct vcpu* vcpu, struct vgic_int* interrupt) +{ spin_lock(&vcpu->vm->arch.vgic_spilled_lock); - struct list *spilled_list = NULL; + struct list* spilled_list = NULL; if (gic_is_priv(interrupt->id)) { spilled_list = &vcpu->arch.vgic_spilled; } else { @@ -296,9 +295,10 @@ void vgic_add_spilled(struct vcpu *vcpu, struct vgic_int* interrupt) { gich_set_hcr(gich_get_hcr() | GICH_HCR_NPIE_BIT); } -void vgic_spill_lr(struct vcpu *vcpu, unsigned lr_ind) { +void vgic_spill_lr(struct vcpu* vcpu, unsigned lr_ind) +{ unsigned long lr = gich_read_lr(lr_ind); - struct vgic_int *spilled_int = vgic_get_int(vcpu, GICH_LR_VID(lr), vcpu->id); + struct vgic_int* spilled_int = vgic_get_int(vcpu, GICH_LR_VID(lr), vcpu->id); if (spilled_int != NULL) { spin_lock(&spilled_int->lock); @@ -309,7 +309,7 @@ void vgic_spill_lr(struct vcpu *vcpu, unsigned lr_ind) { } } -bool vgic_add_lr(struct vcpu *vcpu, struct vgic_int *interrupt) +bool vgic_add_lr(struct vcpu* vcpu, struct vgic_int* interrupt) { bool ret = false; @@ -342,16 +342,14 @@ bool vgic_add_lr(struct vcpu *vcpu, struct vgic_int *interrupt) unsigned lr_state = (lr & GICH_LR_STATE_MSK); if (lr_state & GICH_LR_STATE_ACT) { - if (lr_prio > min_prio_act || - (lr_prio == min_prio_act && lr_id > min_id_act)) { + if (lr_prio > min_prio_act || (lr_prio == min_prio_act && lr_id > min_id_act)) { min_id_act = lr_id; min_prio_act = lr_prio; act_ind = i; } act_found++; } else if (lr_state & GICH_LR_STATE_PND) { - if (lr_prio > min_prio_pend || - (lr_prio == min_prio_pend && lr_id > min_id_pend)) { + if (lr_prio > min_prio_pend || (lr_prio == min_prio_pend && lr_id > min_id_pend)) { min_id_pend = lr_id; min_prio_pend = lr_prio; pend_ind = i; @@ -381,10 +379,9 @@ bool vgic_add_lr(struct vcpu *vcpu, struct vgic_int *interrupt) return ret; } -#define VGIC_ENABLE_MASK \ - ((GIC_VERSION == GICV2) ? GICD_CTLR_EN_BIT : GICD_CTLR_ENA_BIT) +#define VGIC_ENABLE_MASK ((GIC_VERSION == GICV2) ? GICD_CTLR_EN_BIT : GICD_CTLR_ENA_BIT) -static inline void vgic_update_enable(struct vcpu *vcpu) +static inline void vgic_update_enable(struct vcpu* vcpu) { if (cpu()->vcpu->vm->arch.vgicd.CTLR & VGIC_ENABLE_MASK) { gich_set_hcr(gich_get_hcr() | GICH_HCR_En_BIT); @@ -393,29 +390,28 @@ static inline void vgic_update_enable(struct vcpu *vcpu) } } -void vgicd_emul_misc_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, cpuid_t vgicr_id) +void vgicd_emul_misc_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, cpuid_t vgicr_id) { - struct vgicd *vgicd = &cpu()->vcpu->vm->arch.vgicd; + struct vgicd* vgicd = &cpu()->vcpu->vm->arch.vgicd; unsigned reg = acc->addr & 0x7F; switch (reg) { case GICD_REG_IND(CTLR): if (acc->write) { uint32_t prev_ctrl = vgicd->CTLR; - vgicd->CTLR = - vcpu_readreg(cpu()->vcpu, acc->reg) & VGIC_ENABLE_MASK; + vgicd->CTLR = vcpu_readreg(cpu()->vcpu, acc->reg) & VGIC_ENABLE_MASK; if (prev_ctrl ^ vgicd->CTLR) { vgic_update_enable(cpu()->vcpu); struct cpu_msg msg = { - VGIC_IPI_ID, VGIC_UPDATE_ENABLE, - VGIC_MSG_DATA(cpu()->vcpu->vm->id, 0, 0, 0, 0)}; + VGIC_IPI_ID, + VGIC_UPDATE_ENABLE, + VGIC_MSG_DATA(cpu()->vcpu->vm->id, 0, 0, 0, 0), + }; vm_msg_broadcast(cpu()->vcpu->vm, &msg); } } else { - vcpu_writereg(cpu()->vcpu, acc->reg, - vgicd->CTLR | GICD_CTLR_ARE_NS_BIT); + vcpu_writereg(cpu()->vcpu, acc->reg, vgicd->CTLR | GICD_CTLR_ARE_NS_BIT); } break; case GICD_REG_IND(TYPER): @@ -431,17 +427,15 @@ void vgicd_emul_misc_access(struct emul_access *acc, } } -void vgicd_emul_pidr_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, cpuid_t vgicr_id) +void vgicd_emul_pidr_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, cpuid_t vgicr_id) { if (!acc->write) { - vcpu_writereg(cpu()->vcpu, acc->reg, - gicd->ID[((acc->addr & 0xff) - 0xd0) / 4]); + vcpu_writereg(cpu()->vcpu, acc->reg, gicd->ID[((acc->addr & 0xff) - 0xd0) / 4]); } } -bool vgic_int_update_enable(struct vcpu *vcpu, struct vgic_int *interrupt, bool enable) +bool vgic_int_update_enable(struct vcpu* vcpu, struct vgic_int* interrupt, bool enable) { if (GIC_VERSION == GICV2 && gic_is_sgi(interrupt->id)) { return false; @@ -455,12 +449,11 @@ bool vgic_int_update_enable(struct vcpu *vcpu, struct vgic_int *interrupt, bool } } -void vgic_int_enable_hw(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_int_enable_hw(struct vcpu* vcpu, struct vgic_int* interrupt) { #if (GIC_VERSION != GICV2) if (gic_is_priv(interrupt->id)) { - gicr_set_enable(interrupt->id, interrupt->enabled, - interrupt->phys.redist); + gicr_set_enable(interrupt->id, interrupt->enabled, interrupt->phys.redist); } else { gicd_set_enable(interrupt->id, interrupt->enabled); } @@ -469,45 +462,48 @@ void vgic_int_enable_hw(struct vcpu *vcpu, struct vgic_int *interrupt) #endif } -bool vgic_int_clear_enable(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long data) +bool vgic_int_clear_enable(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long data) { - if (!data) + if (!data) { return false; - else + } else { return vgic_int_update_enable(vcpu, interrupt, false); + } } -bool vgic_int_set_enable(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long data) +bool vgic_int_set_enable(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long data) { - if (!data) + if (!data) { return false; - else + } else { return vgic_int_update_enable(vcpu, interrupt, true); + } } -unsigned long vgic_int_get_enable(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgic_int_get_enable(struct vcpu* vcpu, struct vgic_int* interrupt) { return (unsigned long)interrupt->enabled; } -bool vgic_int_update_pend(struct vcpu *vcpu, struct vgic_int *interrupt, bool pend) +bool vgic_int_update_pend(struct vcpu* vcpu, struct vgic_int* interrupt, bool pend) { if (GIC_VERSION == GICV2 && gic_is_sgi(interrupt->id)) { return false; } if (pend ^ !!(interrupt->state & PEND)) { - if (pend) + if (pend) { interrupt->state |= PEND; - else + } else { interrupt->state &= ~PEND; + } return true; } else { return false; } } -void vgic_int_state_hw(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_int_state_hw(struct vcpu* vcpu, struct vgic_int* interrupt) { uint8_t state = interrupt->state == PEND ? ACT : interrupt->state; bool pend = (state & PEND) != 0; @@ -526,74 +522,79 @@ void vgic_int_state_hw(struct vcpu *vcpu, struct vgic_int *interrupt) #endif } -bool vgic_int_clear_pend(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long data) +bool vgic_int_clear_pend(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long data) { - if (!data) + if (!data) { return false; - else + } else { return vgic_int_update_pend(vcpu, interrupt, false); + } } -bool vgic_int_set_pend(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long data) +bool vgic_int_set_pend(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long data) { - if (!data) + if (!data) { return false; - else + } else { return vgic_int_update_pend(vcpu, interrupt, true); + } } -unsigned long vgic_int_get_pend(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgic_int_get_pend(struct vcpu* vcpu, struct vgic_int* interrupt) { return (interrupt->state & PEND) ? 1 : 0; } -bool vgic_int_update_act(struct vcpu *vcpu, struct vgic_int *interrupt, bool act) +bool vgic_int_update_act(struct vcpu* vcpu, struct vgic_int* interrupt, bool act) { if (act ^ !!(interrupt->state & ACT)) { - if (act) + if (act) { interrupt->state |= ACT; - else + } else { interrupt->state &= ~ACT; + } return true; } else { return false; } } -bool vgic_int_clear_act(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long data) +bool vgic_int_clear_act(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long data) { - if (!data) + if (!data) { return false; - else + } else { return vgic_int_update_act(vcpu, interrupt, false); + } } -bool vgic_int_set_act(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long data) +bool vgic_int_set_act(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long data) { - if (!data) + if (!data) { return false; - else + } else { return vgic_int_update_act(vcpu, interrupt, true); + } } -unsigned long vgic_int_get_act(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgic_int_get_act(struct vcpu* vcpu, struct vgic_int* interrupt) { return (interrupt->state & ACT) ? 1 : 0; } -bool vgic_int_set_cfg(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long cfg) +bool vgic_int_set_cfg(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long cfg) { uint8_t prev_cfg = interrupt->cfg; interrupt->cfg = (uint8_t)cfg; return prev_cfg != cfg; } -unsigned long vgic_int_get_cfg(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgic_int_get_cfg(struct vcpu* vcpu, struct vgic_int* interrupt) { return (unsigned long)interrupt->cfg; } -void vgic_int_set_cfg_hw(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_int_set_cfg_hw(struct vcpu* vcpu, struct vgic_int* interrupt) { #if (GIC_VERSION != GICV2) if (gic_is_priv(interrupt->id)) { @@ -606,20 +607,19 @@ void vgic_int_set_cfg_hw(struct vcpu *vcpu, struct vgic_int *interrupt) #endif } -bool vgic_int_set_prio(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long prio) +bool vgic_int_set_prio(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long prio) { uint8_t prev_prio = interrupt->prio; - interrupt->prio = (uint8_t)prio & - BIT_MASK(8-GICH_LR_PRIO_LEN, GICH_LR_PRIO_LEN); + interrupt->prio = (uint8_t)prio & BIT_MASK(8 - GICH_LR_PRIO_LEN, GICH_LR_PRIO_LEN); return prev_prio != prio; } -unsigned long vgic_int_get_prio(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgic_int_get_prio(struct vcpu* vcpu, struct vgic_int* interrupt) { return (unsigned long)interrupt->prio; } -void vgic_int_set_prio_hw(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_int_set_prio_hw(struct vcpu* vcpu, struct vgic_int* interrupt) { #if (GIC_VERSION != GICV2) if (gic_is_priv(interrupt->id)) { @@ -632,56 +632,56 @@ void vgic_int_set_prio_hw(struct vcpu *vcpu, struct vgic_int *interrupt) #endif } -void vgic_emul_razwi(struct emul_access *acc, struct vgic_reg_handler_info *handlers, - bool gicr_access, cpuid_t vgicr_id) +void vgic_emul_razwi(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, cpuid_t vgicr_id) { - if (!acc->write) vcpu_writereg(cpu()->vcpu, acc->reg, 0); + if (!acc->write) { + vcpu_writereg(cpu()->vcpu, acc->reg, 0); + } } -void vgic_int_set_field(struct vgic_reg_handler_info *handlers, struct vcpu *vcpu, - struct vgic_int *interrupt, unsigned long data) +void vgic_int_set_field(struct vgic_reg_handler_info* handlers, struct vcpu* vcpu, + struct vgic_int* interrupt, unsigned long data) { spin_lock(&interrupt->lock); if (vgic_get_ownership(vcpu, interrupt)) { vgic_remove_lr(vcpu, interrupt); - if (handlers->update_field(vcpu, interrupt, data) && - vgic_int_is_hw(interrupt)) { + if (handlers->update_field(vcpu, interrupt, data) && vgic_int_is_hw(interrupt)) { handlers->update_hw(vcpu, interrupt); } vgic_route(vcpu, interrupt); vgic_yield_ownership(vcpu, interrupt); } else { - struct cpu_msg msg = {VGIC_IPI_ID, VGIC_SET_REG, - VGIC_MSG_DATA(vcpu->vm->id, 0, interrupt->id, - handlers->regid, data)}; + struct cpu_msg msg = { + VGIC_IPI_ID, + VGIC_SET_REG, + VGIC_MSG_DATA(vcpu->vm->id, 0, interrupt->id, handlers->regid, data), + }; cpu_send_msg(interrupt->owner->phys_id, &msg); } spin_unlock(&interrupt->lock); } -void vgic_emul_generic_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, cpuid_t vgicr_id) +void vgic_emul_generic_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, cpuid_t vgicr_id) { size_t field_width = handlers->field_width; - size_t first_int = - (GICD_REG_MASK(acc->addr) - handlers->regroup_base) * 8 / field_width; + size_t first_int = (GICD_REG_MASK(acc->addr) - handlers->regroup_base) * 8 / field_width; unsigned long val = acc->write ? vcpu_readreg(cpu()->vcpu, acc->reg) : 0; unsigned long mask = (1ull << field_width) - 1; - bool valid_access = - (GIC_VERSION == GICV2) || !(gicr_access ^ gic_is_priv(first_int)); + bool valid_access = (GIC_VERSION == GICV2) || !(gicr_access ^ gic_is_priv(first_int)); if (valid_access) { for (size_t i = 0; i < ((acc->width * 8) / field_width); i++) { - struct vgic_int *interrupt = - vgic_get_int(cpu()->vcpu, first_int + i, vgicr_id); - if (interrupt == NULL) break; + struct vgic_int* interrupt = vgic_get_int(cpu()->vcpu, first_int + i, vgicr_id); + if (interrupt == NULL) { + break; + } if (acc->write) { unsigned long data = bit_extract(val, i * field_width, field_width); vgic_int_set_field(handlers, cpu()->vcpu, interrupt, data); } else { - val |= (handlers->read_field(cpu()->vcpu, interrupt) & mask) - << (i * field_width); + val |= (handlers->read_field(cpu()->vcpu, interrupt) & mask) << (i * field_width); } } } @@ -809,20 +809,20 @@ __attribute__((weak)) struct vgic_reg_handler_info irouter_info = { 0b0100, }; -struct vgic_reg_handler_info *reg_handler_info_table[VGIC_REG_HANDLER_ID_NUM] = - {[VGIC_ISENABLER_ID] = &isenabler_info, - [VGIC_ISPENDR_ID] = &ispendr_info, - [VGIC_ISACTIVER_ID] = &isactiver_info, - [VGIC_ICENABLER_ID] = &icenabler_info, - [VGIC_ICPENDR_ID] = &icpendr_info, - [VGIC_ICACTIVER_ID] = &iactiver_info, - [VGIC_ICFGR_ID] = &icfgr_info, - [VGIC_IROUTER_ID] = &irouter_info, - [VGIC_IPRIORITYR_ID] = &ipriorityr_info, - [VGIC_ITARGETSR_ID] = &itargetr_info}; - -struct vgic_reg_handler_info - *vgic_get_reg_handler_info(enum vgic_reg_handler_info_id id) +struct vgic_reg_handler_info* reg_handler_info_table[VGIC_REG_HANDLER_ID_NUM] = { + [VGIC_ISENABLER_ID] = &isenabler_info, + [VGIC_ISPENDR_ID] = &ispendr_info, + [VGIC_ISACTIVER_ID] = &isactiver_info, + [VGIC_ICENABLER_ID] = &icenabler_info, + [VGIC_ICPENDR_ID] = &icpendr_info, + [VGIC_ICACTIVER_ID] = &iactiver_info, + [VGIC_ICFGR_ID] = &icfgr_info, + [VGIC_IROUTER_ID] = &irouter_info, + [VGIC_IPRIORITYR_ID] = &ipriorityr_info, + [VGIC_ITARGETSR_ID] = &itargetr_info, +}; + +struct vgic_reg_handler_info* vgic_get_reg_handler_info(enum vgic_reg_handler_info_id id) { if (id < VGIC_REG_HANDLER_ID_NUM) { return reg_handler_info_table[id]; @@ -831,20 +831,18 @@ struct vgic_reg_handler_info } } -bool vgic_check_reg_alignment(struct emul_access *acc, - struct vgic_reg_handler_info *handlers) +bool vgic_check_reg_alignment(struct emul_access* acc, struct vgic_reg_handler_info* handlers) { - if (!(handlers->alignment & acc->width) || - ((acc->addr & (acc->width - 1)) != 0)) { + if (!(handlers->alignment & acc->width) || ((acc->addr & (acc->width - 1)) != 0)) { return false; } else { return true; } } -bool vgicd_emul_handler(struct emul_access *acc) +bool vgicd_emul_handler(struct emul_access* acc) { - struct vgic_reg_handler_info *handler_info = NULL; + struct vgic_reg_handler_info* handler_info = NULL; switch (GICD_REG_MASK(acc->addr) >> 7) { case GICD_REG_GROUP(CTLR): handler_info = &vgicd_misc_info; @@ -899,8 +897,9 @@ bool vgicd_emul_handler(struct emul_access *acc) } } -void vgic_inject_hw(struct vcpu* vcpu, irqid_t id) { - struct vgic_int *interrupt = vgic_get_int(vcpu, id, vcpu->id); +void vgic_inject_hw(struct vcpu* vcpu, irqid_t id) +{ + struct vgic_int* interrupt = vgic_get_int(vcpu, id, vcpu->id); spin_lock(&interrupt->lock); interrupt->owner = vcpu; interrupt->state = PEND; @@ -911,7 +910,7 @@ void vgic_inject_hw(struct vcpu* vcpu, irqid_t id) { void vgic_inject(struct vcpu* vcpu, irqid_t id, vcpuid_t source) { - struct vgic_int *interrupt = vgic_get_int(vcpu, id, vcpu->id); + struct vgic_int* interrupt = vgic_get_int(vcpu, id, vcpu->id); if (interrupt != NULL) { if (vgic_int_is_hw(interrupt)) { vgic_inject_hw(vcpu, id); @@ -932,8 +931,7 @@ void vgic_ipi_handler(uint32_t event, uint64_t data) if (vm_id != cpu()->vcpu->vm->id) { ERROR("received vgic3 msg target to another vcpu"); - // TODO: need to fetch vcpu from other vm if the taget vm for this - // is not active + // TODO: need to fetch vcpu from other vm if the taget vm for this is not active } switch (event) { @@ -942,8 +940,7 @@ void vgic_ipi_handler(uint32_t event, uint64_t data) } break; case VGIC_ROUTE: { - struct vgic_int *interrupt = - vgic_get_int(cpu()->vcpu, int_id, cpu()->vcpu->id); + struct vgic_int* interrupt = vgic_get_int(cpu()->vcpu, int_id, cpu()->vcpu->id); if (interrupt != NULL) { spin_lock(&interrupt->lock); if (vgic_get_ownership(cpu()->vcpu, interrupt)) { @@ -962,9 +959,8 @@ void vgic_ipi_handler(uint32_t event, uint64_t data) case VGIC_SET_REG: { uint64_t reg_id = VGIC_MSG_REG(data); - struct vgic_reg_handler_info *handlers = - vgic_get_reg_handler_info(reg_id); - struct vgic_int *interrupt = vgic_get_int(cpu()->vcpu, int_id, vgicr_id); + struct vgic_reg_handler_info* handlers = vgic_get_reg_handler_info(reg_id); + struct vgic_int* interrupt = vgic_get_int(cpu()->vcpu, int_id, vgicr_id); if (handlers != NULL && interrupt != NULL) { vgic_int_set_field(handlers, cpu()->vcpu, interrupt, val); } @@ -975,20 +971,21 @@ void vgic_ipi_handler(uint32_t event, uint64_t data) /** * Must be called holding the vgic_spilled_lock */ -static inline -struct vgic_int* vgic_highest_prio_spilled(struct vcpu *vcpu, - unsigned flags, - struct list** outlist) { +static inline struct vgic_int* vgic_highest_prio_spilled(struct vcpu* vcpu, unsigned flags, + struct list** outlist) +{ struct vgic_int* irq = NULL; struct list* spilled_lists[] = { &vcpu->arch.vgic_spilled, &vcpu->vm->arch.vgic_spilled, }; - size_t spilled_list_size = sizeof(spilled_lists)/sizeof(struct list*); - for(size_t i = 0; i< spilled_list_size; i++) { - struct list *list = spilled_lists[i]; - list_foreach((*list), struct vgic_int, temp_irq) { - if(!(vgic_get_state(temp_irq) & flags)) { continue; } + size_t spilled_list_size = sizeof(spilled_lists) / sizeof(struct list*); + for (size_t i = 0; i < spilled_list_size; i++) { + struct list* list = spilled_lists[i]; + list_foreach ((*list), struct vgic_int, temp_irq) { + if (!(vgic_get_state(temp_irq) & flags)) { + continue; + } bool irq_is_null = irq == NULL; uint8_t irq_prio = irq_is_null ? GIC_LOWEST_PRIO : irq->prio; irqid_t irq_id = irq_is_null ? GIC_MAX_VALID_INTERRUPTS : irq->id; @@ -1004,23 +1001,26 @@ struct vgic_int* vgic_highest_prio_spilled(struct vcpu *vcpu, return irq; } -static void vgic_refill_lrs(struct vcpu *vcpu, bool npie) { +static void vgic_refill_lrs(struct vcpu* vcpu, bool npie) +{ uint64_t elrsr = gich_get_elrsr(); - ssize_t lr_ind = bit64_ffs(elrsr & BIT64_MASK(0, NUM_LRS)); + ssize_t lr_ind = bit64_ffs(elrsr & BIT64_MASK(0, NUM_LRS)); unsigned flags = npie ? PEND : ACT | PEND; spin_lock(&vcpu->vm->arch.vgic_spilled_lock); - while(lr_ind >= 0) { + while (lr_ind >= 0) { struct list* list = NULL; struct vgic_int* irq = vgic_highest_prio_spilled(vcpu, flags, &list); if (irq != NULL) { spin_lock(&irq->lock); bool got_ownership = vgic_get_ownership(vcpu, irq); - if(got_ownership) { + if (got_ownership) { list_rm(list, &irq->node); vgic_write_lr(vcpu, irq, lr_ind); } spin_unlock(&irq->lock); - if(!got_ownership) { continue; } + if (!got_ownership) { + continue; + } } else { uint32_t hcr = gich_get_hcr(); gich_set_hcr(hcr & ~(GICH_HCR_NPIE_BIT | GICH_HCR_UIE_BIT)); @@ -1033,16 +1033,14 @@ static void vgic_refill_lrs(struct vcpu *vcpu, bool npie) { spin_unlock(&vcpu->vm->arch.vgic_spilled_lock); } - -static void vgic_eoir_highest_spilled_active(struct vcpu *vcpu) -{ +static void vgic_eoir_highest_spilled_active(struct vcpu* vcpu) +{ struct list* list = NULL; - struct vgic_int *interrupt = - vgic_highest_prio_spilled(vcpu, ACT, &list); + struct vgic_int* interrupt = vgic_highest_prio_spilled(vcpu, ACT, &list); if (interrupt != NULL) { spin_lock(&interrupt->lock); - if(vgic_get_ownership(vcpu, interrupt)) { + if (vgic_get_ownership(vcpu, interrupt)) { interrupt->state &= ~ACT; if (vgic_int_is_hw(interrupt)) { gic_set_act(interrupt->id, false); @@ -1056,7 +1054,7 @@ static void vgic_eoir_highest_spilled_active(struct vcpu *vcpu) } } -void vgic_handle_trapped_eoir(struct vcpu *vcpu) +void vgic_handle_trapped_eoir(struct vcpu* vcpu) { uint64_t eisr = gich_get_eisr(); int64_t lr_ind = bit64_ffs(eisr & BIT64_MASK(0, NUM_LRS)); @@ -1064,9 +1062,10 @@ void vgic_handle_trapped_eoir(struct vcpu *vcpu) unsigned long lr_val = gich_read_lr(lr_ind); gich_write_lr(lr_ind, 0); - struct vgic_int *interrupt = - vgic_get_int(vcpu, GICH_LR_VID(lr_val), vcpu->id); - if (interrupt == NULL) continue; + struct vgic_int* interrupt = vgic_get_int(vcpu, GICH_LR_VID(lr_val), vcpu->id); + if (interrupt == NULL) { + continue; + } spin_lock(&interrupt->lock); interrupt->in_lr = false; @@ -1104,35 +1103,34 @@ void gic_maintenance_handler(irqid_t irq_id) } } -size_t vgic_get_itln(const struct vgic_dscrp *vgic_dscrp) { - +size_t vgic_get_itln(const struct vgic_dscrp* vgic_dscrp) +{ /** - * By default the guest sees the real platforms interrupt line number - * in the virtual gic. However a user can control this using the - * interrupt_num in the platform description configuration which be at - * least the number of ppis and a multiple of 32. + * By default the guest sees the real platforms interrupt line number in the virtual gic. + * However a user can control this using the interrupt_num in the platform description + * configuration which be at least the number of ppis and a multiple of 32. */ - size_t vtyper_itln = - bit32_extract(gicd->TYPER, GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN); + size_t vtyper_itln = bit32_extract(gicd->TYPER, GICD_TYPER_ITLN_OFF, GICD_TYPER_ITLN_LEN); - if(vgic_dscrp->interrupt_num > GIC_MAX_PPIS) { - vtyper_itln = (ALIGN(vgic_dscrp->interrupt_num, 32)/32 - 1) & - BIT32_MASK(0, GICD_TYPER_ITLN_LEN); + if (vgic_dscrp->interrupt_num > GIC_MAX_PPIS) { + vtyper_itln = + (ALIGN(vgic_dscrp->interrupt_num, 32) / 32 - 1) & BIT32_MASK(0, GICD_TYPER_ITLN_LEN); } return vtyper_itln; } -void vgic_set_hw(struct vm *vm, irqid_t id) +void vgic_set_hw(struct vm* vm, irqid_t id) { - if (id < GIC_MAX_SGIS) return; + if (id < GIC_MAX_SGIS) { + return; + } - struct vgic_int *interrupt = NULL; + struct vgic_int* interrupt = NULL; if (id < GIC_CPU_PRIV) { - for (vcpuid_t vcpuid = 0; vcpuid < vm->cpu_num; vcpuid++) - { + for (vcpuid_t vcpuid = 0; vcpuid < vm->cpu_num; vcpuid++) { interrupt = vgic_get_int(vm_get_vcpu(vm, vcpuid), id, vcpuid); if (interrupt != NULL) { spin_lock(&interrupt->lock); diff --git a/src/arch/armv8/vgicv2.c b/src/arch/armv8/vgicv2.c index bccfdcc6d..ff9461c5b 100644 --- a/src/arch/armv8/vgicv2.c +++ b/src/arch/armv8/vgicv2.c @@ -13,19 +13,19 @@ #include #include -bool vgic_int_has_other_target(struct vcpu *vcpu, struct vgic_int *interrupt) +bool vgic_int_has_other_target(struct vcpu* vcpu, struct vgic_int* interrupt) { bool priv = gic_is_priv(interrupt->id); bool has_other_targets = (interrupt->targets & ~(1 << cpu()->id)) != 0; return !priv && has_other_targets; } -uint8_t vgic_int_ptarget_mask(struct vcpu *vcpu, struct vgic_int *interrupt) +uint8_t vgic_int_ptarget_mask(struct vcpu* vcpu, struct vgic_int* interrupt) { return interrupt->targets; } -bool vgicd_set_trgt(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long targets) +bool vgicd_set_trgt(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long targets) { if (gic_is_priv(interrupt->id)) { return false; @@ -37,24 +37,22 @@ bool vgicd_set_trgt(struct vcpu *vcpu, struct vgic_int *interrupt, unsigned long return prev_targets != targets; } -void vgicd_set_trgt_hw(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgicd_set_trgt_hw(struct vcpu* vcpu, struct vgic_int* interrupt) { gicd_set_trgt(interrupt->id, interrupt->targets); } -unsigned long vgicd_get_trgt(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgicd_get_trgt(struct vcpu* vcpu, struct vgic_int* interrupt) { if (gic_is_priv(interrupt->id)) { return (((unsigned long)1) << vcpu->id); } else { - return vm_translate_to_vcpu_mask(vcpu->vm, interrupt->targets, - GIC_TARGET_BITS); + return vm_translate_to_vcpu_mask(vcpu->vm, interrupt->targets, GIC_TARGET_BITS); } } -void vgicd_emul_sgiregs_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, vcpuid_t vgicr_id) +void vgicd_emul_sgiregs_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, vcpuid_t vgicr_id) { unsigned long val = acc->write ? vcpu_readreg(cpu()->vcpu, acc->reg) : 0; @@ -64,8 +62,7 @@ void vgicd_emul_sgiregs_access(struct emul_access *acc, irqid_t int_id = GICD_SGIR_SGIINTID(val); switch (GICD_SGIR_TRGLSTFLT(val)) { case 0: - trgtlist = vm_translate_to_pcpu_mask( - cpu()->vcpu->vm, GICD_SGIR_CPUTRGLST(val), + trgtlist = vm_translate_to_pcpu_mask(cpu()->vcpu->vm, GICD_SGIR_CPUTRGLST(val), GIC_TARGET_BITS); break; case 1: @@ -102,7 +99,7 @@ struct vgic_reg_handler_info sgir_info = { 0b0100, }; -void vgic_inject_sgi(struct vcpu *vcpu, struct vgic_int *interrupt, vcpuid_t source) +void vgic_inject_sgi(struct vcpu* vcpu, struct vgic_int* interrupt, vcpuid_t source) { spin_lock(&interrupt->lock); @@ -127,23 +124,21 @@ void vgic_inject_sgi(struct vcpu *vcpu, struct vgic_int *interrupt, vcpuid_t sou spin_unlock(&interrupt->lock); } -void vgic_init(struct vm *vm, const struct vgic_dscrp *vgic_dscrp) +void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) { vm->arch.vgicd.CTLR = 0; size_t vtyper_itln = vgic_get_itln(vgic_dscrp); vm->arch.vgicd.int_num = 32 * (vtyper_itln + 1); - vm->arch.vgicd.TYPER = - ((vtyper_itln << GICD_TYPER_ITLN_OFF) & GICD_TYPER_ITLN_MSK) | + vm->arch.vgicd.TYPER = ((vtyper_itln << GICD_TYPER_ITLN_OFF) & GICD_TYPER_ITLN_MSK) | (((vm->cpu_num - 1) << GICD_TYPER_CPUNUM_OFF) & GICD_TYPER_CPUNUM_MSK); vm->arch.vgicd.IIDR = gicd->IIDR; size_t n = NUM_PAGES(sizeof(struct gicc_hw)); - mem_alloc_map_dev(&vm->as, SEC_VM_ANY,(vaddr_t)vgic_dscrp->gicc_addr, + mem_alloc_map_dev(&vm->as, SEC_VM_ANY, (vaddr_t)vgic_dscrp->gicc_addr, (vaddr_t)platform.arch.gic.gicv_addr, n); size_t vgic_int_size = vm->arch.vgicd.int_num * sizeof(struct vgic_int); - vm->arch.vgicd.interrupts = - mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, false); + vm->arch.vgicd.interrupts = mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, false); if (vm->arch.vgicd.interrupts == NULL) { ERROR("failed to alloc vgic"); } @@ -161,18 +156,16 @@ void vgic_init(struct vm *vm, const struct vgic_dscrp *vgic_dscrp) vm->arch.vgicd.interrupts[i].enabled = false; } - vm->arch.vgicd_emul = (struct emul_mem) { - .va_base = vgic_dscrp->gicd_addr, + vm->arch.vgicd_emul = (struct emul_mem){ .va_base = vgic_dscrp->gicd_addr, .size = ALIGN(sizeof(struct gicd_hw), PAGE_SIZE), - .handler = vgicd_emul_handler - }; + .handler = vgicd_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vgicd_emul); list_init(&vm->arch.vgic_spilled); vm->arch.vgic_spilled_lock = SPINLOCK_INITVAL; } -void vgic_cpu_init(struct vcpu *vcpu) +void vgic_cpu_init(struct vcpu* vcpu) { for (size_t i = 0; i < GIC_CPU_PRIV; i++) { vcpu->arch.vgic_priv.interrupts[i].owner = vcpu; @@ -182,7 +175,7 @@ void vgic_cpu_init(struct vcpu *vcpu) vcpu->arch.vgic_priv.interrupts[i].prio = GIC_LOWEST_PRIO; vcpu->arch.vgic_priv.interrupts[i].cfg = 0; vcpu->arch.vgic_priv.interrupts[i].sgi.act = 0; - vcpu->arch.vgic_priv.interrupts[i].sgi.pend = 0; + vcpu->arch.vgic_priv.interrupts[i].sgi.pend = 0; vcpu->arch.vgic_priv.interrupts[i].hw = false; vcpu->arch.vgic_priv.interrupts[i].in_lr = false; vcpu->arch.vgic_priv.interrupts[i].enabled = false; diff --git a/src/arch/armv8/vgicv3.c b/src/arch/armv8/vgicv3.c index 9e7143466..8a20e8028 100644 --- a/src/arch/armv8/vgicv3.c +++ b/src/arch/armv8/vgicv3.c @@ -13,14 +13,14 @@ #include #include -#define GICR_IS_REG(REG, offset) \ +#define GICR_IS_REG(REG, offset) \ (((offset) >= offsetof(struct gicr_hw, REG)) && \ - (offset) < (offsetof(struct gicr_hw, REG) + sizeof(gicr[0].REG))) -#define GICR_REG_OFF(REG) (offsetof(struct gicr_hw, REG) & 0x1ffff) -#define GICR_REG_MASK(ADDR) ((ADDR)&0x1ffff) -#define GICD_REG_MASK(ADDR) ((ADDR)&(GIC_VERSION == GICV2 ? 0xfffUL : 0xffffUL)) + (offset) < (offsetof(struct gicr_hw, REG) + sizeof(gicr[0].REG))) +#define GICR_REG_OFF(REG) (offsetof(struct gicr_hw, REG) & 0x1ffff) +#define GICR_REG_MASK(ADDR) ((ADDR) & 0x1ffff) +#define GICD_REG_MASK(ADDR) ((ADDR) & (GIC_VERSION == GICV2 ? 0xfffUL : 0xffffUL)) -bool vgic_int_has_other_target(struct vcpu *vcpu, struct vgic_int *interrupt) +bool vgic_int_has_other_target(struct vcpu* vcpu, struct vgic_int* interrupt) { bool priv = gic_is_priv(interrupt->id); bool routed_here = @@ -30,7 +30,7 @@ bool vgic_int_has_other_target(struct vcpu *vcpu, struct vgic_int *interrupt) return any || (!routed_here && route_valid); } -uint8_t vgic_int_ptarget_mask(struct vcpu *vcpu, struct vgic_int *interrupt) +uint8_t vgic_int_ptarget_mask(struct vcpu* vcpu, struct vgic_int* interrupt) { if (vgic_broadcast(vcpu, interrupt)) { return cpu()->vcpu->vm->cpus & ~(1U << cpu()->vcpu->phys_id); @@ -39,19 +39,19 @@ uint8_t vgic_int_ptarget_mask(struct vcpu *vcpu, struct vgic_int *interrupt) } } -bool vgic_int_set_route(struct vcpu *vcpu, struct vgic_int *interrupt, - unsigned long route) +bool vgic_int_set_route(struct vcpu* vcpu, struct vgic_int* interrupt, unsigned long route) { unsigned long phys_route; unsigned long prev_route = interrupt->route; - if (gic_is_priv(interrupt->id)) return false; + if (gic_is_priv(interrupt->id)) { + return false; + } if (route & GICD_IROUTER_IRM_BIT) { phys_route = cpu_id_to_mpidr(vcpu->phys_id); } else { - struct vcpu *tvcpu = - vm_get_vcpu_by_mpidr(vcpu->vm, route & MPIDR_AFF_MSK); + struct vcpu* tvcpu = vm_get_vcpu_by_mpidr(vcpu->vm, route & MPIDR_AFF_MSK); if (tvcpu != NULL) { phys_route = cpu_id_to_mpidr(tvcpu->phys_id) & MPIDR_AFF_MSK; } else { @@ -64,35 +64,35 @@ bool vgic_int_set_route(struct vcpu *vcpu, struct vgic_int *interrupt, return prev_route != interrupt->route; } -unsigned long vgic_int_get_route(struct vcpu *vcpu, struct vgic_int *interrupt) +unsigned long vgic_int_get_route(struct vcpu* vcpu, struct vgic_int* interrupt) { - if (gic_is_priv(interrupt->id)) return 0; + if (gic_is_priv(interrupt->id)) { + return 0; + } return interrupt->route; } -void vgic_int_set_route_hw(struct vcpu *vcpu, struct vgic_int *interrupt) +void vgic_int_set_route_hw(struct vcpu* vcpu, struct vgic_int* interrupt) { gicd_set_route(interrupt->id, interrupt->phys.route); } -void vgicr_emul_ctrl_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, vcpuid_t vgicr_id) +void vgicr_emul_ctrl_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, vcpuid_t vgicr_id) { if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, 0); } } -void vgicr_emul_typer_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, vcpuid_t vgicr_id) +void vgicr_emul_typer_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, vcpuid_t vgicr_id) { bool word_access = (acc->width == 4); bool top_access = word_access && ((acc->addr & 0x4) != 0); - + if (!acc->write) { - struct vcpu *vcpu = vm_get_vcpu(cpu()->vcpu->vm, vgicr_id); + struct vcpu* vcpu = vm_get_vcpu(cpu()->vcpu->vm, vgicr_id); uint64_t typer = vcpu->arch.vgic_priv.vgicr.TYPER; if (top_access) { @@ -105,34 +105,30 @@ void vgicr_emul_typer_access(struct emul_access *acc, } } -void vgicr_emul_pidr_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, vcpuid_t vgicr_id) +void vgicr_emul_pidr_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, vcpuid_t vgicr_id) { if (!acc->write) { unsigned long val = 0; cpuid_t pgicr_id = vm_translate_to_pcpuid(cpu()->vcpu->vm, vgicr_id); - if(pgicr_id != INVALID_CPUID) { + if (pgicr_id != INVALID_CPUID) { val = gicr[pgicr_id].ID[((acc->addr & 0xff) - 0xd0) / 4]; - } + } vcpu_writereg(cpu()->vcpu, acc->reg, val); } } -void vgicd_emul_router_access(struct emul_access *acc, - struct vgic_reg_handler_info *handlers, - bool gicr_access, vcpuid_t vgicr_id) +void vgicd_emul_router_access(struct emul_access* acc, struct vgic_reg_handler_info* handlers, + bool gicr_access, vcpuid_t vgicr_id) { bool word_access = (acc->width == 4); bool top_access = word_access && ((acc->addr & 0x4) != 0); vaddr_t aligned_addr = acc->addr & ~((vaddr_t)0x7); - size_t irq_id = - (GICD_REG_MASK(aligned_addr) - offsetof(struct gicd_hw, IROUTER)) / 8; - - struct vgic_int *interrupt = - vgic_get_int(cpu()->vcpu, irq_id, cpu()->vcpu->id); + size_t irq_id = (GICD_REG_MASK(aligned_addr) - offsetof(struct gicd_hw, IROUTER)) / 8; + + struct vgic_int* interrupt = vgic_get_int(cpu()->vcpu, irq_id, cpu()->vcpu->id); - if(interrupt == NULL) { + if (interrupt == NULL) { return vgic_emul_razwi(acc, handlers, gicr_access, vgicr_id); } @@ -148,17 +144,14 @@ void vgicd_emul_router_access(struct emul_access *acc, } else { uint64_t reg_value = vcpu_readreg(cpu()->vcpu, acc->reg); if (top_access) { - route = (route & BIT64_MASK(0, 32)) | - ((reg_value & BIT64_MASK(0, 32)) << 32); + route = (route & BIT64_MASK(0, 32)) | ((reg_value & BIT64_MASK(0, 32)) << 32); } else if (word_access) { - route = (route & BIT64_MASK(32, 32)) | - (reg_value & BIT64_MASK(0, 32)); + route = (route & BIT64_MASK(32, 32)) | (reg_value & BIT64_MASK(0, 32)); } else { route = reg_value; } vgic_int_set_field(handlers, cpu()->vcpu, interrupt, route); } - } extern struct vgic_reg_handler_info isenabler_info; @@ -195,14 +188,14 @@ struct vgic_reg_handler_info vgicr_pidr_info = { 0b0100, }; -static inline vcpuid_t vgicr_get_id(struct emul_access *acc) +static inline vcpuid_t vgicr_get_id(struct emul_access* acc) { return (acc->addr - cpu()->vcpu->vm->arch.vgicr_addr) / sizeof(struct gicr_hw); } -bool vgicr_emul_handler(struct emul_access *acc) +bool vgicr_emul_handler(struct emul_access* acc) { - struct vgic_reg_handler_info *handler_info = NULL; + struct vgic_reg_handler_info* handler_info = NULL; switch (GICR_REG_MASK(acc->addr)) { case GICR_REG_OFF(CTLR): handler_info = &vgicr_ctrl_info; @@ -246,9 +239,8 @@ bool vgicr_emul_handler(struct emul_access *acc) if (vgic_check_reg_alignment(acc, handler_info)) { vcpuid_t vgicr_id = vgicr_get_id(acc); - struct vcpu *vcpu = vgicr_id == cpu()->vcpu->id - ? cpu()->vcpu - : vm_get_vcpu(cpu()->vcpu->vm, vgicr_id); + struct vcpu* vcpu = + vgicr_id == cpu()->vcpu->id ? cpu()->vcpu : vm_get_vcpu(cpu()->vcpu->vm, vgicr_id); spin_lock(&vcpu->arch.vgic_priv.vgicr.lock); handler_info->reg_access(acc, handler_info, true, vgicr_id); spin_unlock(&vcpu->arch.vgic_priv.vgicr.lock); @@ -258,13 +250,13 @@ bool vgicr_emul_handler(struct emul_access *acc) } } -bool vgic_icc_sgir_handler(struct emul_access *acc) +bool vgic_icc_sgir_handler(struct emul_access* acc) { if (acc->write) { uint64_t sgir = vcpu_readreg(cpu()->vcpu, acc->reg); if (acc->multi_reg) { - uint64_t sgir_high = vcpu_readreg(cpu()->vcpu, acc->reg_high); - sgir |= (sgir_high<<32); + uint64_t sgir_high = vcpu_readreg(cpu()->vcpu, acc->reg_high); + sgir |= (sgir_high << 32); } irqid_t int_id = ICC_SGIR_SGIINTID(sgir); cpumap_t trgtlist; @@ -272,11 +264,11 @@ bool vgic_icc_sgir_handler(struct emul_access *acc) trgtlist = cpu()->vcpu->vm->cpus & ~(1U << cpu()->vcpu->phys_id); } else { /** - * TODO: we are assuming the vm has a single cluster. Change this - * when adding virtual cluster support. + * TODO: we are assuming the vm has a single cluster. Change this when adding virtual + * cluster support. */ - trgtlist = vm_translate_to_pcpu_mask( - cpu()->vcpu->vm, ICC_SGIR_TRGLSTFLT(sgir), cpu()->vcpu->vm->cpu_num); + trgtlist = vm_translate_to_pcpu_mask(cpu()->vcpu->vm, ICC_SGIR_TRGLSTFLT(sgir), + cpu()->vcpu->vm->cpu_num); } vgic_send_sgi_msg(cpu()->vcpu, trgtlist, int_id); } @@ -284,7 +276,7 @@ bool vgic_icc_sgir_handler(struct emul_access *acc) return true; } -bool vgic_icc_sre_handler(struct emul_access *acc) +bool vgic_icc_sre_handler(struct emul_access* acc) { if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, 0x1); @@ -292,21 +284,19 @@ bool vgic_icc_sre_handler(struct emul_access *acc) return true; } -void vgic_init(struct vm *vm, const struct vgic_dscrp *vgic_dscrp) +void vgic_init(struct vm* vm, const struct vgic_dscrp* vgic_dscrp) { vm->arch.vgicr_addr = vgic_dscrp->gicr_addr; vm->arch.vgicd.CTLR = 0; size_t vtyper_itln = vgic_get_itln(vgic_dscrp); vm->arch.vgicd.int_num = 32 * (vtyper_itln + 1); - vm->arch.vgicd.TYPER = - ((vtyper_itln << GICD_TYPER_ITLN_OFF) & GICD_TYPER_ITLN_MSK) | + vm->arch.vgicd.TYPER = ((vtyper_itln << GICD_TYPER_ITLN_OFF) & GICD_TYPER_ITLN_MSK) | (((vm->cpu_num - 1) << GICD_TYPER_CPUNUM_OFF) & GICD_TYPER_CPUNUM_MSK) | (((10 - 1) << GICD_TYPER_IDBITS_OFF) & GICD_TYPER_IDBITS_MSK); vm->arch.vgicd.IIDR = gicd->IIDR; size_t vgic_int_size = vm->arch.vgicd.int_num * sizeof(struct vgic_int); - vm->arch.vgicd.interrupts = - mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, false); + vm->arch.vgicd.interrupts = mem_alloc_page(NUM_PAGES(vgic_int_size), SEC_HYP_VM, false); if (vm->arch.vgicd.interrupts == NULL) { ERROR("failed to alloc vgic"); } @@ -325,15 +315,12 @@ void vgic_init(struct vm *vm, const struct vgic_dscrp *vgic_dscrp) vm->arch.vgicd.interrupts[i].enabled = false; } - vm->arch.vgicd_emul = (struct emul_mem) { - .va_base = vgic_dscrp->gicd_addr, + vm->arch.vgicd_emul = (struct emul_mem){ .va_base = vgic_dscrp->gicd_addr, .size = ALIGN(sizeof(struct gicd_hw), PAGE_SIZE), - .handler = vgicd_emul_handler - }; + .handler = vgicd_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vgicd_emul); - for (vcpuid_t vcpuid = 0; vcpuid < vm->cpu_num; vcpuid++) - { + for (vcpuid_t vcpuid = 0; vcpuid < vm->cpu_num; vcpuid++) { struct vcpu* vcpu = vm_get_vcpu(vm, vcpuid); uint64_t typer = (uint64_t)vcpu->id << GICR_TYPER_PRCNUM_OFF; typer |= ((uint64_t)vcpu->arch.vmpidr & MPIDR_AFF_MSK) << GICR_TYPER_AFFVAL_OFF; @@ -343,30 +330,24 @@ void vgic_init(struct vm *vm, const struct vgic_dscrp *vgic_dscrp) vcpu->arch.vgic_priv.vgicr.IIDR = gicr[cpu()->id].IIDR; } - vm->arch.vgicr_emul = (struct emul_mem) { - .va_base = vgic_dscrp->gicr_addr, + vm->arch.vgicr_emul = (struct emul_mem){ .va_base = vgic_dscrp->gicr_addr, .size = ALIGN(sizeof(struct gicr_hw), PAGE_SIZE) * vm->cpu_num, - .handler = vgicr_emul_handler - }; + .handler = vgicr_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vgicr_emul); - vm->arch.icc_sgir_emul = (struct emul_reg) { - .addr = SYSREG_ENC_ADDR(3, 0, 12, 11, 5), - .handler = vgic_icc_sgir_handler - }; + vm->arch.icc_sgir_emul = (struct emul_reg){ .addr = SYSREG_ENC_ADDR(3, 0, 12, 11, 5), + .handler = vgic_icc_sgir_handler }; vm_emul_add_reg(vm, &vm->arch.icc_sgir_emul); - vm->arch.icc_sre_emul = (struct emul_reg) { - .addr = SYSREG_ENC_ADDR(3, 0, 12, 12, 5), - .handler = vgic_icc_sre_handler - }; + vm->arch.icc_sre_emul = (struct emul_reg){ .addr = SYSREG_ENC_ADDR(3, 0, 12, 12, 5), + .handler = vgic_icc_sre_handler }; vm_emul_add_reg(vm, &vm->arch.icc_sre_emul); list_init(&vm->arch.vgic_spilled); vm->arch.vgic_spilled_lock = SPINLOCK_INITVAL; } -void vgic_cpu_init(struct vcpu *vcpu) +void vgic_cpu_init(struct vcpu* vcpu) { for (size_t i = 0; i < GIC_CPU_PRIV; i++) { vcpu->arch.vgic_priv.interrupts[i].owner = NULL; @@ -376,7 +357,7 @@ void vgic_cpu_init(struct vcpu *vcpu) vcpu->arch.vgic_priv.interrupts[i].prio = GIC_LOWEST_PRIO; vcpu->arch.vgic_priv.interrupts[i].cfg = 0; vcpu->arch.vgic_priv.interrupts[i].route = GICD_IROUTER_INV; - vcpu->arch.vgic_priv.interrupts[i].phys.redist = vcpu->phys_id; + vcpu->arch.vgic_priv.interrupts[i].phys.redist = vcpu->phys_id; vcpu->arch.vgic_priv.interrupts[i].hw = false; vcpu->arch.vgic_priv.interrupts[i].in_lr = false; vcpu->arch.vgic_priv.interrupts[i].enabled = false; diff --git a/src/arch/armv8/vm.c b/src/arch/armv8/vm.c index 00ddb58c7..317a876b4 100644 --- a/src/arch/armv8/vm.c +++ b/src/arch/armv8/vm.c @@ -19,10 +19,9 @@ void vm_arch_init(struct vm* vm, const struct vm_config* config) struct vcpu* vm_get_vcpu_by_mpidr(struct vm* vm, unsigned long mpidr) { - for (cpuid_t vcpuid = 0; vcpuid < vm->cpu_num; vcpuid++) - { - struct vcpu *vcpu = vm_get_vcpu(vm, vcpuid); - if ((vcpu->arch.vmpidr & MPIDR_AFF_MSK) == (mpidr & MPIDR_AFF_MSK)) { + for (cpuid_t vcpuid = 0; vcpuid < vm->cpu_num; vcpuid++) { + struct vcpu* vcpu = vm_get_vcpu(vm, vcpuid); + if ((vcpu->arch.vmpidr & MPIDR_AFF_MSK) == (mpidr & MPIDR_AFF_MSK)) { return vcpu; } } @@ -33,7 +32,8 @@ struct vcpu* vm_get_vcpu_by_mpidr(struct vm* vm, unsigned long mpidr) static unsigned long vm_cpuid_to_mpidr(struct vm* vm, vcpuid_t cpuid) { if (cpuid > vm->cpu_num) { - return ~(~MPIDR_RES1 & MPIDR_RES0_MSK); //return an invlid mpidr by inverting res bits + return ~(~MPIDR_RES1 & MPIDR_RES0_MSK); // return an invlid mpidr by + // inverting res bits } unsigned long mpidr = cpuid | MPIDR_RES1; @@ -68,20 +68,20 @@ void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) sysreg_cntvoff_el2_write(0); /** - * See ARMv8-A ARM section D1.9.1 for registers that must be in a known - * state at reset. + * See ARMv8-A ARM section D1.9.1 for registers that must be in a known state at reset. */ sysreg_sctlr_el1_write(SCTLR_RES1); sysreg_cntkctl_el1_write(0); sysreg_pmcr_el0_write(0); /** - * TODO: ARMv8-A ARM mentions another implementation optional registers - * that reset to a known value. + * TODO: ARMv8-A ARM mentions another implementation optional registers that reset to a known + * value. */ } -static inline bool vcpu_psci_state_on(struct vcpu* vcpu) { +static inline bool vcpu_psci_state_on(struct vcpu* vcpu) +{ return vcpu->arch.psci_ctx.state == ON; } @@ -91,5 +91,5 @@ void vcpu_arch_run(struct vcpu* vcpu) vcpu_arch_entry(); } else { cpu_idle(); - } + } } diff --git a/src/arch/armv8/vmm.c b/src/arch/armv8/vmm.c index 2b55c576b..f2b3d4bad 100644 --- a/src/arch/armv8/vmm.c +++ b/src/arch/armv8/vmm.c @@ -11,8 +11,8 @@ void vmm_arch_init() { vmm_arch_profile_init(); - uint64_t hcr = HCR_VM_BIT | HCR_RW_BIT | HCR_IMO_BIT | HCR_FMO_BIT | - HCR_TSC_BIT | HCR_APK_BIT | HCR_API_BIT; + uint64_t hcr = HCR_VM_BIT | HCR_RW_BIT | HCR_IMO_BIT | HCR_FMO_BIT | HCR_TSC_BIT | HCR_APK_BIT | + HCR_API_BIT; sysreg_hcr_el2_write(hcr); diff --git a/src/arch/riscv/boot.S b/src/arch/riscv/boot.S index b486d4a6a..0dbdb505f 100644 --- a/src/arch/riscv/boot.S +++ b/src/arch/riscv/boot.S @@ -14,8 +14,8 @@ #define PTE_INDEX_SHIFT(LEVEL) ((9 * (PT_LVLS - 1 - (LEVEL))) + 12) /** - * Calculates the index or offset of a page table entry for given virtual - * address(addr) at a given level of page table. + * Calculates the index or offset of a page table entry for given virtual address(addr) at a given + * level of page table. */ .macro PTE_INDEX_ASM index, addr, level srl \index, \addr, PTE_INDEX_SHIFT(\level) @@ -25,8 +25,8 @@ .endm /** - * Calculates the pointer to a pte given the page table pointer(pt), - * the page table level (levle) and the target virtual address (va) + * Calculates the pointer to a pte given the page table pointer(pt), the page table level (levle) + * and the target virtual address (va) */ .macro PTE_PTR pte, pt, level, va PTE_INDEX_ASM s1, \va, \level @@ -34,8 +34,7 @@ .endm /** - * Creates a page table entry (pte) for a given physical address (pa) - * and set of flags. + * Creates a page table entry (pte) for a given physical address (pa) and set of flags. */ .macro PTE_FILL pte, pa, flags srl \pte, \pa, 2 @@ -61,15 +60,14 @@ _bss_end_sym: .8byte _bss_end .data .align 3 /** - * barrier is used to minimal synchronization in boot - other cores wait for - * bsp to set it. + * barrier is used to minimal synchronization in boot - other cores wait for bsp to set it. */ _barrier: .8byte 0 /** - * The following code MUST be at the base of the image, as this is bao's entry - * point. Therefore .boot section must also be the first in the linker script. - * DO NOT implement any code before the _reset_handler in this section. + * The following code MUST be at the base of the image, as this is bao's entrypoint. Therefore + * .boot section must also be the first in the linker script. DO NOT implement any code before the + * _reset_handler in this section. */ .section ".boot", "ax" .globl _reset_handler @@ -77,19 +75,17 @@ _barrier: .8byte 0 _reset_handler: /** - * The previous boot stage should pass the following arguments: + * The previous boot stage should pass the following arguments: * a0 -> hart id * a1 -> config binary load addr - * The following registers are reserved to be passed to init function - * as arguments: + * The following registers are reserved to be passed to init function as arguments: * a0 -> hart id * a1 -> contains image base load address * a2 -> config binary load address (originally passed in a1) * - * The remaining code must use t0-t6 as scratchpad registers in the main - * flow and s0-s5 in auxiliary routines. s6-s11 are used to hold constants - * a3-a7 are used as arguments and return values (can be also corrputed in - * auxiliary routines). + * The remaining code must use t0-t6 as scratchpad registers in the main flow and s0-s5 in + * auxiliary routines. s6-s11 are used to hold constants a3-a7 are used as arguments and return + * values (can be also corrputed in auxiliary routines). */ mv a2, a1 @@ -97,8 +93,8 @@ _reset_handler: la s6, extra_allocated_phys_mem /** - * Setup stvec early. In case of we cause an exception in this boot code - * we end up at a known place. + * Setup stvec early. In case of we cause an exception in this boot code we end up at a known + * place. */ la t0, _hyp_trap_vector and t0, t0, ~STVEC_MODE_MSK @@ -106,8 +102,8 @@ _reset_handler: csrw stvec, t0 /** - * Bring processor to known supervisor state: make sure interrupts - * and memory translation are disabled. + * Bring processor to known supervisor state: make sure interrupts and memory translation are + * disabled. */ csrw sstatus, zero diff --git a/src/arch/riscv/cache.c b/src/arch/riscv/cache.c index ae6492aff..621e99129 100644 --- a/src/arch/riscv/cache.c +++ b/src/arch/riscv/cache.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -7,18 +7,16 @@ #include /** - * The riscv spec does not include cache maintenance. There are current - * efforts to define and standardize a set of cache management instructions, - * but for now this is platform dependent. + * The riscv spec does not include cache maintenance. There are current efforts to define and + * standardize a set of cache management instructions, but for now this is platform dependent. */ void cache_arch_enumerate(struct cache* dscrp) { /** - * Currently the typical of way for system software to discover cache - * topology is to read it of a dtb passed by the bootloader. As we are not - * implementing an fdt parser, a platform port must add it to the platform - * description. + * Currently the typical of way for system software to discover cache topology is to read it of + * a dtb passed by the bootloader. As we are not implementing an fdt parser, a platform port + * must add it to the platform description. */ *dscrp = platform.cache; } @@ -26,9 +24,8 @@ void cache_arch_enumerate(struct cache* dscrp) __attribute__((weak)) void cache_flush_range(vaddr_t base, size_t size) { /** - * A platform must define its custom cache flush operation, otherwise - * certain mechanisms such as coloring and hypervisor relocation will - * most probably fail. + * A platform must define its custom cache flush operation, otherwise certain mechanisms such + * as coloring and hypervisor relocation will most probably fail. */ WARNING("trying to flush caches but the operation is not defined for this " "platform"); diff --git a/src/arch/riscv/cpu.c b/src/arch/riscv/cpu.c index ce0fd692d..5e396cfa5 100644 --- a/src/arch/riscv/cpu.c +++ b/src/arch/riscv/cpu.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -15,10 +15,12 @@ void cpu_arch_init(cpuid_t cpuid, paddr_t load_addr) { if (cpuid == CPU_MASTER) { sbi_init(); - for(size_t hartid = 0; hartid < platform.cpu_num; hartid++){ - if(hartid == cpuid) continue; + for (size_t hartid = 0; hartid < platform.cpu_num; hartid++) { + if (hartid == cpuid) { + continue; + } struct sbiret ret = sbi_hart_start(hartid, load_addr, 0); - if(ret.error < 0) { + if (ret.error < 0) { WARNING("failed to wake up hart %d", hartid); } } diff --git a/src/arch/riscv/inc/arch/bao.h b/src/arch/riscv/inc/arch/bao.h index 77111e0ad..06443f9f6 100644 --- a/src/arch/riscv/inc/arch/bao.h +++ b/src/arch/riscv/inc/arch/bao.h @@ -10,7 +10,7 @@ #define RV64 (__riscv_xlen == 64) #define RV32 (__riscv_xlen == 32) #else -#define RV64 1 +#define RV64 1 #endif #if (!(RV64)) @@ -18,28 +18,28 @@ #endif #if (RV64) -#define LOAD ld -#define STORE sd +#define LOAD ld +#define STORE sd #define REGLEN (8) #elif (RV32) -#define LOAD lw -#define STORE sw +#define LOAD lw +#define STORE sw #define REGLEN (4) #endif #if (RV64) #define BAO_VAS_BASE (0xffffffc000000000) #define BAO_CPU_BASE (0xffffffc040000000) -#define BAO_VM_BASE (0xffffffe000000000) -#define BAO_VAS_TOP (0xfffffff000000000) +#define BAO_VM_BASE (0xffffffe000000000) +#define BAO_VAS_TOP (0xfffffff000000000) #elif (RV32) #define BAO_VAS_BASE (0xc0000000) #define BAO_CPU_BASE (0x00000000) -#define BAO_VM_BASE (0x00000000) -#define BAO_VAS_TOP (0xffffffff) +#define BAO_VM_BASE (0x00000000) +#define BAO_VAS_TOP (0xffffffff) #endif -#define PAGE_SIZE (0x1000) +#define PAGE_SIZE (0x1000) #define STACK_SIZE (PAGE_SIZE) #ifndef __ASSEMBLER__ diff --git a/src/arch/riscv/inc/arch/cache.h b/src/arch/riscv/inc/arch/cache.h index efe2ed26b..096a19c72 100644 --- a/src/arch/riscv/inc/arch/cache.h +++ b/src/arch/riscv/inc/arch/cache.h @@ -8,6 +8,6 @@ #include -#define CACHE_MAX_LVL 8 // Does this make sense in all architectures? +#define CACHE_MAX_LVL 8 // Does this make sense in all architectures? -#endif /* __ARCH_CACHE_H__ */ +#endif /* __ARCH_CACHE_H__ */ diff --git a/src/arch/riscv/inc/arch/cpu.h b/src/arch/riscv/inc/arch/cpu.h index b63cd00f6..3a5ca07c4 100644 --- a/src/arch/riscv/inc/arch/cpu.h +++ b/src/arch/riscv/inc/arch/cpu.h @@ -17,7 +17,8 @@ struct cpu_arch { unsigned plic_cntxt; }; -static inline struct cpu* cpu() { +static inline struct cpu* cpu() +{ return (struct cpu*)BAO_CPU_BASE; } diff --git a/src/arch/riscv/inc/arch/csrs.h b/src/arch/riscv/inc/arch/csrs.h index 53bab6cc6..e4c99ff50 100644 --- a/src/arch/riscv/inc/arch/csrs.h +++ b/src/arch/riscv/inc/arch/csrs.h @@ -8,229 +8,225 @@ #include -#define CSR_VSSTATUS 0x200 -#define CSR_VSIE 0x204 -#define CSR_VSTVEC 0x205 -#define CSR_VSSCRATCH 0x240 -#define CSR_VSEPC 0x241 -#define CSR_VSCAUSE 0x242 -#define CSR_VSTVAL 0x243 -#define CSR_VSIP 0x244 -#define CSR_VSATP 0x280 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 /* Sstc Extension */ -#define CSR_VSTIMECMP 0x24D -#define CSR_VSTIMECMPH 0x25D - -#define CSR_HSTATUS 0x600 -#define CSR_HEDELEG 0x602 -#define CSR_HIDELEG 0x603 -#define CSR_HIE 0x604 -#define CSR_HTIMEDELTA 0x605 -#define CSR_HTIMEDELTAH 0x615 -#define CSR_HCOUNTEREN 0x606 -#define CSR_HGEIE 0x607 -#define CSR_HTVAL 0x643 -#define CSR_HIP 0x644 -#define CSR_HVIP 0x645 -#define CSR_HTINST 0x64A -#define CSR_HGATP 0x680 -#define CSR_HGEIP 0xE07 +#define CSR_VSTIMECMP 0x24D +#define CSR_VSTIMECMPH 0x25D + +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64A +#define CSR_HGATP 0x680 +#define CSR_HGEIP 0xE07 /* Hypervisor Configuration */ -#define CSR_HENVCFG 0x60A -#define CSR_HENVCFGH 0x61A +#define CSR_HENVCFG 0x60A +#define CSR_HENVCFGH 0x61A /* Sstc Extension */ -#define CSR_STIMECMP 0x14D -#define CSR_STIMECMPH 0x15D +#define CSR_STIMECMP 0x14D +#define CSR_STIMECMPH 0x15D -#define STVEC_MODE_OFF (0) -#define STVEC_MODE_LEN (2) -#define STVEC_MODE_MSK BIT_MASK(STVEC_MODE_OFF, STVEC_MODE_LEN) +#define STVEC_MODE_OFF (0) +#define STVEC_MODE_LEN (2) +#define STVEC_MODE_MSK BIT_MASK(STVEC_MODE_OFF, STVEC_MODE_LEN) #define STVEC_MODE_DIRECT (0) #define STVEC_MODE_VECTRD (1) #if (RV64) -#define SATP_MODE_OFF (60) +#define SATP_MODE_OFF (60) #define SATP_MODE_DFLT SATP_MODE_39 -#define SATP_ASID_OFF (44) -#define SATP_ASID_LEN (16) +#define SATP_ASID_OFF (44) +#define SATP_ASID_LEN (16) #define HGATP_VMID_OFF (44) #define HGATP_VMID_LEN (14) #elif (RV32) -#define SATP_MODE_OFF (31) +#define SATP_MODE_OFF (31) #define SATP_MODE_DFLT SATP_MODE_32 -#define SATP_ASID_OFF (22) -#define SATP_ASID_LEN (9) +#define SATP_ASID_OFF (22) +#define SATP_ASID_LEN (9) #define HGATP_VMID_OFF (22) #define HGATP_VMID_LEN (7) #endif -#define SATP_MODE_BARE (0ULL << SATP_MODE_OFF) -#define SATP_MODE_32 (1ULL << SATP_MODE_OFF) -#define SATP_MODE_39 (8ULL << SATP_MODE_OFF) -#define SATP_MODE_48 (9ULL << SATP_MODE_OFF) -#define SATP_ASID_MSK BIT_MASK(SATP_ASID_OFF, SATP_ASID_LEN) - -#define HGATP_MODE_OFF SATP_MODE_OFF -#define HGATP_MODE_DFLT SATP_MODE_DFLT -#define HGATP_VMID_MSK BIT_MASK(HGATP_VMID_OFF, HGATP_VMID_LEN) - -#define SSTATUS_UIE_BIT (1ULL << 0) -#define SSTATUS_SIE_BIT (1ULL << 1) -#define SSTATUS_UPIE_BIT (1ULL << 4) -#define SSTATUS_SPIE_BIT (1ULL << 5) -#define SSTATUS_SPP_BIT (1ULL << 8) -#define SSTATUS_FS_OFF (13) -#define SSTATUS_FS_LEN (2) -#define SSTATUS_FS_MSK BIT_MASK(SSTATUS_FS_OFF, SSTATUS_FS_LEN) -#define SSTATUS_FS_AOFF (0) -#define SSTATUS_FS_INITIAL (1ULL << SSTATUS_FS_OFF) -#define SSTATUS_FS_CLEAN (2ULL << SSTATUS_FS_OFF) -#define SSTATUS_FS_DIRTY (3ULL << SSTATUS_FS_OFF) -#define SSTATUS_XS_OFF (15) -#define SSTATUS_XS_LEN (2) -#define SSTATUS_XS_MSK BIT_MASK(SSTATUS_XS_OFF, SSTATUS_XS_LEN) -#define SSTATUS_XS_AOFF (0) -#define SSTATUS_XS_INITIAL (1ULL << SSTATUS_XS_OFF) -#define SSTATUS_XS_CLEAN (2ULL << SSTATUS_XS_OFF) -#define SSTATUS_XS_DIRTY (3ULL << SSTATUS_XS_OFF) -#define SSTATUS_SUM (1ULL << 18) -#define SSTATUS_MXR (1ULL << 19) -#define SSTATUS_SD (1ULL << 63) - -#define SIE_USIE (1ULL << 0) -#define SIE_SSIE (1ULL << 1) -#define SIE_UTIE (1ULL << 4) -#define SIE_STIE (1ULL << 5) -#define SIE_UEIE (1ULL << 8) -#define SIE_SEIE (1ULL << 9) - -#define SIP_USIP SIE_USIE -#define SIP_SSIP SIE_SSIE -#define SIP_UTIP SIE_UTIE -#define SIP_STIP SIE_STIE -#define SIP_UEIP SIE_UEIE -#define SIP_SEIP SIE_SEIE - -#define HIE_VSSIE (1ULL << 2) -#define HIE_VSTIE (1ULL << 6) -#define HIE_VSEIE (1ULL << 10) -#define HIE_SGEIE (1ULL << 12) - -#define HIP_VSSIP HIE_VSSIE -#define HIP_VSTIP HIE_VSTIE -#define HIP_VSEIP HIE_VSEIE -#define HIP_SGEIP HIE_SGEIE - -#define SCAUSE_INT_BIT (1ULL << ((REGLEN * 8) - 1)) -#define SCAUSE_CODE_MSK (SCAUSE_INT_BIT - 1) -#define SCAUSE_CODE_USI (0 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_SSI (1 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_VSSI (2 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_UTI (4 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_STI (5 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_VSTI (6 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_UEI (8 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_SEI (9 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_VSEI (10 | SCAUSE_INT_BIT) -#define SCAUSE_CODE_IAM (0) -#define SCAUSE_CODE_IAF (1) -#define SCAUSE_CODE_ILI (2) -#define SCAUSE_CODE_BKP (3) -#define SCAUSE_CODE_LAM (4) -#define SCAUSE_CODE_LAF (5) -#define SCAUSE_CODE_SAM (6) -#define SCAUSE_CODE_SAF (7) -#define SCAUSE_CODE_ECU (8) -#define SCAUSE_CODE_ECS (9) -#define SCAUSE_CODE_ECV (10) -#define SCAUSE_CODE_IPF (12) -#define SCAUSE_CODE_LPF (13) -#define SCAUSE_CODE_SPF (15) -#define SCAUSE_CODE_IGPF (20) -#define SCAUSE_CODE_LGPF (21) -#define SCAUSE_CODE_VRTI (22) -#define SCAUSE_CODE_SGPF (23) - -#define HIDELEG_USI SIP_USIP -#define HIDELEG_SSI SIP_SSIP -#define HIDELEG_UTI SIP_UTIP -#define HIDELEG_STI SIP_STIP -#define HIDELEG_UEI SIP_UEIP -#define HIDELEG_SEI SIP_SEIP -#define HIDELEG_VSSI HIP_VSSIP -#define HIDELEG_VSTI HIP_VSTIP -#define HIDELEG_VSEI HIP_VSEIP -#define HIDELEG_SGEI HIP_SGEIP - -#define HEDELEG_IAM (1ULL << 0) -#define HEDELEG_IAF (1ULL << 1) -#define HEDELEG_ILI (1ULL << 2) -#define HEDELEG_BKP (1ULL << 3) -#define HEDELEG_LAM (1ULL << 4) -#define HEDELEG_LAF (1ULL << 5) -#define HEDELEG_SAM (1ULL << 6) -#define HEDELEG_SAF (1ULL << 7) -#define HEDELEG_ECU (1ULL << 8) -#define HEDELEG_ECS (1ULL << 9) -#define HEDELEG_ECV (1ULL << 10) -#define HEDELEG_IPF (1ULL << 12) -#define HEDELEG_LPF (1ULL << 13) -#define HEDELEG_SPF (1ULL << 15) - -#define MISA_H (1ULL << 7) - -#define HSTATUS_VSBE (1ULL << 5) -#define HSTATUS_GVA (1ULL << 6) -#define HSTATUS_SPV (1ULL << 7) -#define HSTATUS_SPVP (1ULL << 8) -#define HSTATUS_HU (1ULL << 9) -#define HSTATUS_VGEIN_OFF (12) -#define HSTATUS_VGEIN_LEN (12) -#define HSTATUS_VGEIN_MSK (BIT_MASK(HSTATUS_VGEIN_OFF, HSTATUS_VGEIN_LEN)) -#define HSTATUS_VTVM (1ULL << 20) -#define HSTATUS_VTW (1ULL << 21) -#define HSTATUS_VTSR (1ULL << 22) -#define HSTATUS_VSXL_OFF (32) -#define HSTATUS_VSXL_LEN (2) -#define HSTATUS_VSXL_MSK (BIT_MASK(HSTATUS_VSXL_OFF, HSTATUS_VSXL_LEN)) -#define HSTATUS_VSXL_32 (1ULL << HSTATUS_VSXL_OFF) -#define HSTATUS_VSXL_64 (2ULL << HSTATUS_VSXL_OFF) - -#define HENVCFG_FIOM (1ULL << 0) -#define HENVCFG_CBIE_OFF (4) -#define HSTATUS_CBIE_LEN (2) -#define HSTATUS_CBIE_MSK (BIT_MASK(HSTATUS_CBIE_OFF, HSTATUS_CBIE_LEN)) -#define HENVCFG_CBCFE (1ULL << 6) -#define HENVCFG_CBZE (1ULL << 7) -#define HENVCFG_PBMTE (1ULL << 62) -#define HENVCFG_STCE (1ULL << 63) - -#define HCOUNTEREN_CY (1ULL << 0) -#define HCOUNTEREN_TM (1ULL << 1) -#define HCOUNTEREN_IR (1ULL << 2) - -#define TINST_PSEUDO_STORE (0x3020) -#define TINST_PSEUDO_LOAD (0x3000) +#define SATP_MODE_BARE (0ULL << SATP_MODE_OFF) +#define SATP_MODE_32 (1ULL << SATP_MODE_OFF) +#define SATP_MODE_39 (8ULL << SATP_MODE_OFF) +#define SATP_MODE_48 (9ULL << SATP_MODE_OFF) +#define SATP_ASID_MSK BIT_MASK(SATP_ASID_OFF, SATP_ASID_LEN) + +#define HGATP_MODE_OFF SATP_MODE_OFF +#define HGATP_MODE_DFLT SATP_MODE_DFLT +#define HGATP_VMID_MSK BIT_MASK(HGATP_VMID_OFF, HGATP_VMID_LEN) + +#define SSTATUS_UIE_BIT (1ULL << 0) +#define SSTATUS_SIE_BIT (1ULL << 1) +#define SSTATUS_UPIE_BIT (1ULL << 4) +#define SSTATUS_SPIE_BIT (1ULL << 5) +#define SSTATUS_SPP_BIT (1ULL << 8) +#define SSTATUS_FS_OFF (13) +#define SSTATUS_FS_LEN (2) +#define SSTATUS_FS_MSK BIT_MASK(SSTATUS_FS_OFF, SSTATUS_FS_LEN) +#define SSTATUS_FS_AOFF (0) +#define SSTATUS_FS_INITIAL (1ULL << SSTATUS_FS_OFF) +#define SSTATUS_FS_CLEAN (2ULL << SSTATUS_FS_OFF) +#define SSTATUS_FS_DIRTY (3ULL << SSTATUS_FS_OFF) +#define SSTATUS_XS_OFF (15) +#define SSTATUS_XS_LEN (2) +#define SSTATUS_XS_MSK BIT_MASK(SSTATUS_XS_OFF, SSTATUS_XS_LEN) +#define SSTATUS_XS_AOFF (0) +#define SSTATUS_XS_INITIAL (1ULL << SSTATUS_XS_OFF) +#define SSTATUS_XS_CLEAN (2ULL << SSTATUS_XS_OFF) +#define SSTATUS_XS_DIRTY (3ULL << SSTATUS_XS_OFF) +#define SSTATUS_SUM (1ULL << 18) +#define SSTATUS_MXR (1ULL << 19) +#define SSTATUS_SD (1ULL << 63) + +#define SIE_USIE (1ULL << 0) +#define SIE_SSIE (1ULL << 1) +#define SIE_UTIE (1ULL << 4) +#define SIE_STIE (1ULL << 5) +#define SIE_UEIE (1ULL << 8) +#define SIE_SEIE (1ULL << 9) + +#define SIP_USIP SIE_USIE +#define SIP_SSIP SIE_SSIE +#define SIP_UTIP SIE_UTIE +#define SIP_STIP SIE_STIE +#define SIP_UEIP SIE_UEIE +#define SIP_SEIP SIE_SEIE + +#define HIE_VSSIE (1ULL << 2) +#define HIE_VSTIE (1ULL << 6) +#define HIE_VSEIE (1ULL << 10) +#define HIE_SGEIE (1ULL << 12) + +#define HIP_VSSIP HIE_VSSIE +#define HIP_VSTIP HIE_VSTIE +#define HIP_VSEIP HIE_VSEIE +#define HIP_SGEIP HIE_SGEIE + +#define SCAUSE_INT_BIT (1ULL << ((REGLEN * 8) - 1)) +#define SCAUSE_CODE_MSK (SCAUSE_INT_BIT - 1) +#define SCAUSE_CODE_USI (0 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_SSI (1 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_VSSI (2 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_UTI (4 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_STI (5 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_VSTI (6 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_UEI (8 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_SEI (9 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_VSEI (10 | SCAUSE_INT_BIT) +#define SCAUSE_CODE_IAM (0) +#define SCAUSE_CODE_IAF (1) +#define SCAUSE_CODE_ILI (2) +#define SCAUSE_CODE_BKP (3) +#define SCAUSE_CODE_LAM (4) +#define SCAUSE_CODE_LAF (5) +#define SCAUSE_CODE_SAM (6) +#define SCAUSE_CODE_SAF (7) +#define SCAUSE_CODE_ECU (8) +#define SCAUSE_CODE_ECS (9) +#define SCAUSE_CODE_ECV (10) +#define SCAUSE_CODE_IPF (12) +#define SCAUSE_CODE_LPF (13) +#define SCAUSE_CODE_SPF (15) +#define SCAUSE_CODE_IGPF (20) +#define SCAUSE_CODE_LGPF (21) +#define SCAUSE_CODE_VRTI (22) +#define SCAUSE_CODE_SGPF (23) + +#define HIDELEG_USI SIP_USIP +#define HIDELEG_SSI SIP_SSIP +#define HIDELEG_UTI SIP_UTIP +#define HIDELEG_STI SIP_STIP +#define HIDELEG_UEI SIP_UEIP +#define HIDELEG_SEI SIP_SEIP +#define HIDELEG_VSSI HIP_VSSIP +#define HIDELEG_VSTI HIP_VSTIP +#define HIDELEG_VSEI HIP_VSEIP +#define HIDELEG_SGEI HIP_SGEIP + +#define HEDELEG_IAM (1ULL << 0) +#define HEDELEG_IAF (1ULL << 1) +#define HEDELEG_ILI (1ULL << 2) +#define HEDELEG_BKP (1ULL << 3) +#define HEDELEG_LAM (1ULL << 4) +#define HEDELEG_LAF (1ULL << 5) +#define HEDELEG_SAM (1ULL << 6) +#define HEDELEG_SAF (1ULL << 7) +#define HEDELEG_ECU (1ULL << 8) +#define HEDELEG_ECS (1ULL << 9) +#define HEDELEG_ECV (1ULL << 10) +#define HEDELEG_IPF (1ULL << 12) +#define HEDELEG_LPF (1ULL << 13) +#define HEDELEG_SPF (1ULL << 15) + +#define MISA_H (1ULL << 7) + +#define HSTATUS_VSBE (1ULL << 5) +#define HSTATUS_GVA (1ULL << 6) +#define HSTATUS_SPV (1ULL << 7) +#define HSTATUS_SPVP (1ULL << 8) +#define HSTATUS_HU (1ULL << 9) +#define HSTATUS_VGEIN_OFF (12) +#define HSTATUS_VGEIN_LEN (12) +#define HSTATUS_VGEIN_MSK (BIT_MASK(HSTATUS_VGEIN_OFF, HSTATUS_VGEIN_LEN)) +#define HSTATUS_VTVM (1ULL << 20) +#define HSTATUS_VTW (1ULL << 21) +#define HSTATUS_VTSR (1ULL << 22) +#define HSTATUS_VSXL_OFF (32) +#define HSTATUS_VSXL_LEN (2) +#define HSTATUS_VSXL_MSK (BIT_MASK(HSTATUS_VSXL_OFF, HSTATUS_VSXL_LEN)) +#define HSTATUS_VSXL_32 (1ULL << HSTATUS_VSXL_OFF) +#define HSTATUS_VSXL_64 (2ULL << HSTATUS_VSXL_OFF) + +#define HENVCFG_FIOM (1ULL << 0) +#define HENVCFG_CBIE_OFF (4) +#define HSTATUS_CBIE_LEN (2) +#define HSTATUS_CBIE_MSK (BIT_MASK(HSTATUS_CBIE_OFF, HSTATUS_CBIE_LEN)) +#define HENVCFG_CBCFE (1ULL << 6) +#define HENVCFG_CBZE (1ULL << 7) +#define HENVCFG_PBMTE (1ULL << 62) +#define HENVCFG_STCE (1ULL << 63) + +#define HCOUNTEREN_CY (1ULL << 0) +#define HCOUNTEREN_TM (1ULL << 1) +#define HCOUNTEREN_IR (1ULL << 2) + +#define TINST_PSEUDO_STORE (0x3020) +#define TINST_PSEUDO_LOAD (0x3000) #define TINST_INS_COMPRESSED(tinst) (!((tinst) & 0x2)) -#define TINST_INS_SIZE(tinst) (TINST_INS_COMPRESSED((tinst)) ? 2 : 4) +#define TINST_INS_SIZE(tinst) (TINST_INS_COMPRESSED(tinst) ? 2 : 4) #ifndef __ASSEMBLER__ -#define CSRR(csr) \ - ({ \ - unsigned long _temp; \ - asm volatile("csrr %0, " XSTR(csr) "\n\r" \ - : "=r"(_temp)::"memory"); \ - _temp; \ +#define CSRR(csr) \ + ({ \ + unsigned long _temp; \ + asm volatile("csrr %0, " XSTR(csr) "\n\r" : "=r"(_temp)::"memory"); \ + _temp; \ }) -#define CSRW(csr, rs) \ - asm volatile("csrw " XSTR(csr) ", %0\n\r" ::"rK"(rs) : "memory") -#define CSRS(csr, rs) \ - asm volatile("csrs " XSTR(csr) ", %0\n\r" ::"rK"(rs) : "memory") -#define CSRC(csr, rs) \ - asm volatile("csrc " XSTR(csr) ", %0\n\r" ::"rK"(rs) : "memory") +#define CSRW(csr, rs) asm volatile("csrw " XSTR(csr) ", %0\n\r" ::"rK"(rs) : "memory") +#define CSRS(csr, rs) asm volatile("csrs " XSTR(csr) ", %0\n\r" ::"rK"(rs) : "memory") +#define CSRC(csr, rs) asm volatile("csrc " XSTR(csr) ", %0\n\r" ::"rK"(rs) : "memory") #endif /* __ASSEMBLER__ */ diff --git a/src/arch/riscv/inc/arch/encoding.h b/src/arch/riscv/inc/arch/encoding.h index 73b5289e5..cd02b645b 100644 --- a/src/arch/riscv/inc/arch/encoding.h +++ b/src/arch/riscv/inc/arch/encoding.h @@ -9,19 +9,19 @@ #include #define INS_COMPRESSED(ins) (!((ins & 3) == 3)) -#define INS_SIZE(ins) ((ins & 3) == 3 ? 4 : 2) +#define INS_SIZE(ins) ((ins & 3) == 3 ? 4 : 2) -#define INS_OPCODE(ins) ((ins)&0x7f) -#define INS_FUNCT3(ins) ((ins >> 12) & 0x7) -#define INS_RD(ins) (((ins) >> 7) & 0x1f) -#define INS_RS1(ins) (((ins) >> 15) & 0x1f) -#define INS_RS2(ins) (((ins) >> 20) & 0x1f) -#define MATCH_LOAD (0x03) -#define MATCH_STORE (0x23) +#define INS_OPCODE(ins) ((ins) & 0x7f) +#define INS_FUNCT3(ins) ((ins >> 12) & 0x7) +#define INS_RD(ins) (((ins) >> 7) & 0x1f) +#define INS_RS1(ins) (((ins) >> 15) & 0x1f) +#define INS_RS2(ins) (((ins) >> 20) & 0x1f) +#define MATCH_LOAD (0x03) +#define MATCH_STORE (0x23) -#define INS_C_OPCODE(ins) ((ins)&0xe003) -#define INS_C_RD_RS2(ins) ((ins >> 2) & 0x7) -#define MATCH_C_LOAD (0x4000) -#define MATCH_C_STORE (0xc000) +#define INS_C_OPCODE(ins) ((ins) & 0xe003) +#define INS_C_RD_RS2(ins) ((ins >> 2) & 0x7) +#define MATCH_C_LOAD (0x4000) +#define MATCH_C_STORE (0xc000) #endif /* __RISCV_ENCODING_H__ */ diff --git a/src/arch/riscv/inc/arch/hypercall.h b/src/arch/riscv/inc/arch/hypercall.h index 3342d12e8..314de9768 100644 --- a/src/arch/riscv/inc/arch/hypercall.h +++ b/src/arch/riscv/inc/arch/hypercall.h @@ -6,6 +6,6 @@ #ifndef ARCH_HYPERCALL_H #define ARCH_HYPERCALL_H -#define HYPCALL_ARG_REG(ARG) ((ARG) + REG_A0) +#define HYPCALL_ARG_REG(ARG) ((ARG) + REG_A0) #endif /* ARCH_HYPERCALL_H */ diff --git a/src/arch/riscv/inc/arch/instructions.h b/src/arch/riscv/inc/arch/instructions.h index 3034ffbc0..a77bb5536 100644 --- a/src/arch/riscv/inc/arch/instructions.h +++ b/src/arch/riscv/inc/arch/instructions.h @@ -6,11 +6,10 @@ #ifndef ARCH_INSTRUCTIONS_H #define ARCH_INSTRUCTIONS_H -static inline uint64_t hlvxhu(uintptr_t addr){ +static inline uint64_t hlvxhu(uintptr_t addr) +{ uint64_t value; - asm volatile( - ".insn r 0x73, 0x4, 0x32, %0, %1, x3\n\t" - : "=r"(value): "r"(addr) : "memory"); + asm volatile(".insn r 0x73, 0x4, 0x32, %0, %1, x3\n\t" : "=r"(value) : "r"(addr) : "memory"); return value; } diff --git a/src/arch/riscv/inc/arch/interrupts.h b/src/arch/riscv/inc/arch/interrupts.h index 2fc2b3f1b..aba7d60ed 100644 --- a/src/arch/riscv/inc/arch/interrupts.h +++ b/src/arch/riscv/inc/arch/interrupts.h @@ -9,19 +9,18 @@ #include #include -#define PLIC (1) -#define APLIC (2) +#define PLIC (1) +#define APLIC (2) /** - * In riscv, the ipi (software interrupt) and timer interrupts dont actually - * have an ID as their are treated differently from external interrupts - * routed by the external interrupt controller, the PLIC. - * Will define their ids as the ids after the maximum possible in the PLIC. + * In riscv, the ipi (software interrupt) and timer interrupts dont actually have an ID as their + * are treated differently from external interrupts routed by the external interrupt controller, + * the PLIC. Will define their ids as the ids after the maximum possible in the PLIC. */ -#define SOFT_INT_ID (IRQC_MAX_INTERRUPTS + 1) -#define TIMR_INT_ID (IRQC_MAX_INTERRUPTS + 2) +#define SOFT_INT_ID (IRQC_MAX_INTERRUPTS + 1) +#define TIMR_INT_ID (IRQC_MAX_INTERRUPTS + 2) #define MAX_INTERRUPTS (TIMR_INT_ID + 1) -#define IPI_CPU_MSG SOFT_INT_ID +#define IPI_CPU_MSG SOFT_INT_ID #endif /* __ARCH_INTERRUPTS_H__ */ diff --git a/src/arch/riscv/inc/arch/iommu.h b/src/arch/riscv/inc/arch/iommu.h index e33644b5c..0c330eb51 100644 --- a/src/arch/riscv/inc/arch/iommu.h +++ b/src/arch/riscv/inc/arch/iommu.h @@ -9,8 +9,6 @@ #include // VM-specific IOMMU data -struct iommu_vm_arch { +struct iommu_vm_arch { }; -}; - -#endif /* __IOMMU_ARCH_H__ */ +#endif /* __IOMMU_ARCH_H__ */ diff --git a/src/arch/riscv/inc/arch/opcodes.h b/src/arch/riscv/inc/arch/opcodes.h index 1bc81eacc..f4fa4ed7e 100644 --- a/src/arch/riscv/inc/arch/opcodes.h +++ b/src/arch/riscv/inc/arch/opcodes.h @@ -6,735 +6,735 @@ /* Automatically generated by parse_opcodes. */ #ifndef RISCV_ENCODING_H #define RISCV_ENCODING_H -#define MATCH_BEQ 0x63 -#define MASK_BEQ 0x707f -#define MATCH_BNE 0x1063 -#define MASK_BNE 0x707f -#define MATCH_BLT 0x4063 -#define MASK_BLT 0x707f -#define MATCH_BGE 0x5063 -#define MASK_BGE 0x707f -#define MATCH_BLTU 0x6063 -#define MASK_BLTU 0x707f -#define MATCH_BGEU 0x7063 -#define MASK_BGEU 0x707f -#define MATCH_JALR 0x67 -#define MASK_JALR 0x707f -#define MATCH_JAL 0x6f -#define MASK_JAL 0x7f -#define MATCH_LUI 0x37 -#define MASK_LUI 0x7f -#define MATCH_AUIPC 0x17 -#define MASK_AUIPC 0x7f -#define MATCH_ADDI 0x13 -#define MASK_ADDI 0x707f -#define MATCH_SLLI 0x1013 -#define MASK_SLLI 0xfc00707f -#define MATCH_SLTI 0x2013 -#define MASK_SLTI 0x707f -#define MATCH_SLTIU 0x3013 -#define MASK_SLTIU 0x707f -#define MATCH_XORI 0x4013 -#define MASK_XORI 0x707f -#define MATCH_SRLI 0x5013 -#define MASK_SRLI 0xfc00707f -#define MATCH_SRAI 0x40005013 -#define MASK_SRAI 0xfc00707f -#define MATCH_ORI 0x6013 -#define MASK_ORI 0x707f -#define MATCH_ANDI 0x7013 -#define MASK_ANDI 0x707f -#define MATCH_ADD 0x33 -#define MASK_ADD 0xfe00707f -#define MATCH_SUB 0x40000033 -#define MASK_SUB 0xfe00707f -#define MATCH_SLL 0x1033 -#define MASK_SLL 0xfe00707f -#define MATCH_SLT 0x2033 -#define MASK_SLT 0xfe00707f -#define MATCH_SLTU 0x3033 -#define MASK_SLTU 0xfe00707f -#define MATCH_XOR 0x4033 -#define MASK_XOR 0xfe00707f -#define MATCH_SRL 0x5033 -#define MASK_SRL 0xfe00707f -#define MATCH_SRA 0x40005033 -#define MASK_SRA 0xfe00707f -#define MATCH_OR 0x6033 -#define MASK_OR 0xfe00707f -#define MATCH_AND 0x7033 -#define MASK_AND 0xfe00707f -#define MATCH_ADDIW 0x1b -#define MASK_ADDIW 0x707f -#define MATCH_SLLIW 0x101b -#define MASK_SLLIW 0xfe00707f -#define MATCH_SRLIW 0x501b -#define MASK_SRLIW 0xfe00707f -#define MATCH_SRAIW 0x4000501b -#define MASK_SRAIW 0xfe00707f -#define MATCH_ADDW 0x3b -#define MASK_ADDW 0xfe00707f -#define MATCH_SUBW 0x4000003b -#define MASK_SUBW 0xfe00707f -#define MATCH_SLLW 0x103b -#define MASK_SLLW 0xfe00707f -#define MATCH_SRLW 0x503b -#define MASK_SRLW 0xfe00707f -#define MATCH_SRAW 0x4000503b -#define MASK_SRAW 0xfe00707f -#define MATCH_LB 0x3 -#define MASK_LB 0x707f -#define MATCH_LH 0x1003 -#define MASK_LH 0x707f -#define MATCH_LW 0x2003 -#define MASK_LW 0x707f -#define MATCH_LD 0x3003 -#define MASK_LD 0x707f -#define MATCH_LBU 0x4003 -#define MASK_LBU 0x707f -#define MATCH_LHU 0x5003 -#define MASK_LHU 0x707f -#define MATCH_LWU 0x6003 -#define MASK_LWU 0x707f -#define MATCH_SB 0x23 -#define MASK_SB 0x707f -#define MATCH_SH 0x1023 -#define MASK_SH 0x707f -#define MATCH_SW 0x2023 -#define MASK_SW 0x707f -#define MATCH_SD 0x3023 -#define MASK_SD 0x707f -#define MATCH_FENCE 0xf -#define MASK_FENCE 0x707f -#define MATCH_FENCE_I 0x100f -#define MASK_FENCE_I 0x707f -#define MATCH_MUL 0x2000033 -#define MASK_MUL 0xfe00707f -#define MATCH_MULH 0x2001033 -#define MASK_MULH 0xfe00707f -#define MATCH_MULHSU 0x2002033 -#define MASK_MULHSU 0xfe00707f -#define MATCH_MULHU 0x2003033 -#define MASK_MULHU 0xfe00707f -#define MATCH_DIV 0x2004033 -#define MASK_DIV 0xfe00707f -#define MATCH_DIVU 0x2005033 -#define MASK_DIVU 0xfe00707f -#define MATCH_REM 0x2006033 -#define MASK_REM 0xfe00707f -#define MATCH_REMU 0x2007033 -#define MASK_REMU 0xfe00707f -#define MATCH_MULW 0x200003b -#define MASK_MULW 0xfe00707f -#define MATCH_DIVW 0x200403b -#define MASK_DIVW 0xfe00707f -#define MATCH_DIVUW 0x200503b -#define MASK_DIVUW 0xfe00707f -#define MATCH_REMW 0x200603b -#define MASK_REMW 0xfe00707f -#define MATCH_REMUW 0x200703b -#define MASK_REMUW 0xfe00707f -#define MATCH_AMOADD_W 0x202f -#define MASK_AMOADD_W 0xf800707f -#define MATCH_AMOXOR_W 0x2000202f -#define MASK_AMOXOR_W 0xf800707f -#define MATCH_AMOOR_W 0x4000202f -#define MASK_AMOOR_W 0xf800707f -#define MATCH_AMOAND_W 0x6000202f -#define MASK_AMOAND_W 0xf800707f -#define MATCH_AMOMIN_W 0x8000202f -#define MASK_AMOMIN_W 0xf800707f -#define MATCH_AMOMAX_W 0xa000202f -#define MASK_AMOMAX_W 0xf800707f -#define MATCH_AMOMINU_W 0xc000202f -#define MASK_AMOMINU_W 0xf800707f -#define MATCH_AMOMAXU_W 0xe000202f -#define MASK_AMOMAXU_W 0xf800707f -#define MATCH_AMOSWAP_W 0x800202f -#define MASK_AMOSWAP_W 0xf800707f -#define MATCH_LR_W 0x1000202f -#define MASK_LR_W 0xf9f0707f -#define MATCH_SC_W 0x1800202f -#define MASK_SC_W 0xf800707f -#define MATCH_AMOADD_D 0x302f -#define MASK_AMOADD_D 0xf800707f -#define MATCH_AMOXOR_D 0x2000302f -#define MASK_AMOXOR_D 0xf800707f -#define MATCH_AMOOR_D 0x4000302f -#define MASK_AMOOR_D 0xf800707f -#define MATCH_AMOAND_D 0x6000302f -#define MASK_AMOAND_D 0xf800707f -#define MATCH_AMOMIN_D 0x8000302f -#define MASK_AMOMIN_D 0xf800707f -#define MATCH_AMOMAX_D 0xa000302f -#define MASK_AMOMAX_D 0xf800707f -#define MATCH_AMOMINU_D 0xc000302f -#define MASK_AMOMINU_D 0xf800707f -#define MATCH_AMOMAXU_D 0xe000302f -#define MASK_AMOMAXU_D 0xf800707f -#define MATCH_AMOSWAP_D 0x800302f -#define MASK_AMOSWAP_D 0xf800707f -#define MATCH_LR_D 0x1000302f -#define MASK_LR_D 0xf9f0707f -#define MATCH_SC_D 0x1800302f -#define MASK_SC_D 0xf800707f -#define MATCH_ECALL 0x73 -#define MASK_ECALL 0xffffffff -#define MATCH_EBREAK 0x100073 -#define MASK_EBREAK 0xffffffff -#define MATCH_URET 0x200073 -#define MASK_URET 0xffffffff -#define MATCH_SRET 0x10200073 -#define MASK_SRET 0xffffffff -#define MATCH_MRET 0x30200073 -#define MASK_MRET 0xffffffff -#define MATCH_DRET 0x7b200073 -#define MASK_DRET 0xffffffff -#define MATCH_SFENCE_VMA 0x12000073 -#define MASK_SFENCE_VMA 0xfe007fff -#define MATCH_WFI 0x10500073 -#define MASK_WFI 0xffffffff -#define MATCH_CSRRW 0x1073 -#define MASK_CSRRW 0x707f -#define MATCH_CSRRS 0x2073 -#define MASK_CSRRS 0x707f -#define MATCH_CSRRC 0x3073 -#define MASK_CSRRC 0x707f -#define MATCH_CSRRWI 0x5073 -#define MASK_CSRRWI 0x707f -#define MATCH_CSRRSI 0x6073 -#define MASK_CSRRSI 0x707f -#define MATCH_CSRRCI 0x7073 -#define MASK_CSRRCI 0x707f -#define MATCH_HFENCE_VVMA 0x22000073 -#define MASK_HFENCE_VVMA 0xfe007fff -#define MATCH_HFENCE_GVMA 0x62000073 -#define MASK_HFENCE_GVMA 0xfe007fff -#define MATCH_FADD_S 0x53 -#define MASK_FADD_S 0xfe00007f -#define MATCH_FSUB_S 0x8000053 -#define MASK_FSUB_S 0xfe00007f -#define MATCH_FMUL_S 0x10000053 -#define MASK_FMUL_S 0xfe00007f -#define MATCH_FDIV_S 0x18000053 -#define MASK_FDIV_S 0xfe00007f -#define MATCH_FSGNJ_S 0x20000053 -#define MASK_FSGNJ_S 0xfe00707f -#define MATCH_FSGNJN_S 0x20001053 -#define MASK_FSGNJN_S 0xfe00707f -#define MATCH_FSGNJX_S 0x20002053 -#define MASK_FSGNJX_S 0xfe00707f -#define MATCH_FMIN_S 0x28000053 -#define MASK_FMIN_S 0xfe00707f -#define MATCH_FMAX_S 0x28001053 -#define MASK_FMAX_S 0xfe00707f -#define MATCH_FSQRT_S 0x58000053 -#define MASK_FSQRT_S 0xfff0007f -#define MATCH_FADD_D 0x2000053 -#define MASK_FADD_D 0xfe00007f -#define MATCH_FSUB_D 0xa000053 -#define MASK_FSUB_D 0xfe00007f -#define MATCH_FMUL_D 0x12000053 -#define MASK_FMUL_D 0xfe00007f -#define MATCH_FDIV_D 0x1a000053 -#define MASK_FDIV_D 0xfe00007f -#define MATCH_FSGNJ_D 0x22000053 -#define MASK_FSGNJ_D 0xfe00707f -#define MATCH_FSGNJN_D 0x22001053 -#define MASK_FSGNJN_D 0xfe00707f -#define MATCH_FSGNJX_D 0x22002053 -#define MASK_FSGNJX_D 0xfe00707f -#define MATCH_FMIN_D 0x2a000053 -#define MASK_FMIN_D 0xfe00707f -#define MATCH_FMAX_D 0x2a001053 -#define MASK_FMAX_D 0xfe00707f -#define MATCH_FCVT_S_D 0x40100053 -#define MASK_FCVT_S_D 0xfff0007f -#define MATCH_FCVT_D_S 0x42000053 -#define MASK_FCVT_D_S 0xfff0007f -#define MATCH_FSQRT_D 0x5a000053 -#define MASK_FSQRT_D 0xfff0007f -#define MATCH_FADD_Q 0x6000053 -#define MASK_FADD_Q 0xfe00007f -#define MATCH_FSUB_Q 0xe000053 -#define MASK_FSUB_Q 0xfe00007f -#define MATCH_FMUL_Q 0x16000053 -#define MASK_FMUL_Q 0xfe00007f -#define MATCH_FDIV_Q 0x1e000053 -#define MASK_FDIV_Q 0xfe00007f -#define MATCH_FSGNJ_Q 0x26000053 -#define MASK_FSGNJ_Q 0xfe00707f -#define MATCH_FSGNJN_Q 0x26001053 -#define MASK_FSGNJN_Q 0xfe00707f -#define MATCH_FSGNJX_Q 0x26002053 -#define MASK_FSGNJX_Q 0xfe00707f -#define MATCH_FMIN_Q 0x2e000053 -#define MASK_FMIN_Q 0xfe00707f -#define MATCH_FMAX_Q 0x2e001053 -#define MASK_FMAX_Q 0xfe00707f -#define MATCH_FCVT_S_Q 0x40300053 -#define MASK_FCVT_S_Q 0xfff0007f -#define MATCH_FCVT_Q_S 0x46000053 -#define MASK_FCVT_Q_S 0xfff0007f -#define MATCH_FCVT_D_Q 0x42300053 -#define MASK_FCVT_D_Q 0xfff0007f -#define MATCH_FCVT_Q_D 0x46100053 -#define MASK_FCVT_Q_D 0xfff0007f -#define MATCH_FSQRT_Q 0x5e000053 -#define MASK_FSQRT_Q 0xfff0007f -#define MATCH_FLE_S 0xa0000053 -#define MASK_FLE_S 0xfe00707f -#define MATCH_FLT_S 0xa0001053 -#define MASK_FLT_S 0xfe00707f -#define MATCH_FEQ_S 0xa0002053 -#define MASK_FEQ_S 0xfe00707f -#define MATCH_FLE_D 0xa2000053 -#define MASK_FLE_D 0xfe00707f -#define MATCH_FLT_D 0xa2001053 -#define MASK_FLT_D 0xfe00707f -#define MATCH_FEQ_D 0xa2002053 -#define MASK_FEQ_D 0xfe00707f -#define MATCH_FLE_Q 0xa6000053 -#define MASK_FLE_Q 0xfe00707f -#define MATCH_FLT_Q 0xa6001053 -#define MASK_FLT_Q 0xfe00707f -#define MATCH_FEQ_Q 0xa6002053 -#define MASK_FEQ_Q 0xfe00707f -#define MATCH_FCVT_W_S 0xc0000053 -#define MASK_FCVT_W_S 0xfff0007f -#define MATCH_FCVT_WU_S 0xc0100053 -#define MASK_FCVT_WU_S 0xfff0007f -#define MATCH_FCVT_L_S 0xc0200053 -#define MASK_FCVT_L_S 0xfff0007f -#define MATCH_FCVT_LU_S 0xc0300053 -#define MASK_FCVT_LU_S 0xfff0007f -#define MATCH_FMV_X_W 0xe0000053 -#define MASK_FMV_X_W 0xfff0707f -#define MATCH_FCLASS_S 0xe0001053 -#define MASK_FCLASS_S 0xfff0707f -#define MATCH_FCVT_W_D 0xc2000053 -#define MASK_FCVT_W_D 0xfff0007f -#define MATCH_FCVT_WU_D 0xc2100053 -#define MASK_FCVT_WU_D 0xfff0007f -#define MATCH_FCVT_L_D 0xc2200053 -#define MASK_FCVT_L_D 0xfff0007f -#define MATCH_FCVT_LU_D 0xc2300053 -#define MASK_FCVT_LU_D 0xfff0007f -#define MATCH_FMV_X_D 0xe2000053 -#define MASK_FMV_X_D 0xfff0707f -#define MATCH_FCLASS_D 0xe2001053 -#define MASK_FCLASS_D 0xfff0707f -#define MATCH_FCVT_W_Q 0xc6000053 -#define MASK_FCVT_W_Q 0xfff0007f -#define MATCH_FCVT_WU_Q 0xc6100053 -#define MASK_FCVT_WU_Q 0xfff0007f -#define MATCH_FCVT_L_Q 0xc6200053 -#define MASK_FCVT_L_Q 0xfff0007f -#define MATCH_FCVT_LU_Q 0xc6300053 -#define MASK_FCVT_LU_Q 0xfff0007f -#define MATCH_FMV_X_Q 0xe6000053 -#define MASK_FMV_X_Q 0xfff0707f -#define MATCH_FCLASS_Q 0xe6001053 -#define MASK_FCLASS_Q 0xfff0707f -#define MATCH_FCVT_S_W 0xd0000053 -#define MASK_FCVT_S_W 0xfff0007f -#define MATCH_FCVT_S_WU 0xd0100053 -#define MASK_FCVT_S_WU 0xfff0007f -#define MATCH_FCVT_S_L 0xd0200053 -#define MASK_FCVT_S_L 0xfff0007f -#define MATCH_FCVT_S_LU 0xd0300053 -#define MASK_FCVT_S_LU 0xfff0007f -#define MATCH_FMV_W_X 0xf0000053 -#define MASK_FMV_W_X 0xfff0707f -#define MATCH_FCVT_D_W 0xd2000053 -#define MASK_FCVT_D_W 0xfff0007f -#define MATCH_FCVT_D_WU 0xd2100053 -#define MASK_FCVT_D_WU 0xfff0007f -#define MATCH_FCVT_D_L 0xd2200053 -#define MASK_FCVT_D_L 0xfff0007f -#define MATCH_FCVT_D_LU 0xd2300053 -#define MASK_FCVT_D_LU 0xfff0007f -#define MATCH_FMV_D_X 0xf2000053 -#define MASK_FMV_D_X 0xfff0707f -#define MATCH_FCVT_Q_W 0xd6000053 -#define MASK_FCVT_Q_W 0xfff0007f -#define MATCH_FCVT_Q_WU 0xd6100053 -#define MASK_FCVT_Q_WU 0xfff0007f -#define MATCH_FCVT_Q_L 0xd6200053 -#define MASK_FCVT_Q_L 0xfff0007f -#define MATCH_FCVT_Q_LU 0xd6300053 -#define MASK_FCVT_Q_LU 0xfff0007f -#define MATCH_FMV_Q_X 0xf6000053 -#define MASK_FMV_Q_X 0xfff0707f -#define MATCH_FLW 0x2007 -#define MASK_FLW 0x707f -#define MATCH_FLD 0x3007 -#define MASK_FLD 0x707f -#define MATCH_FLQ 0x4007 -#define MASK_FLQ 0x707f -#define MATCH_FSW 0x2027 -#define MASK_FSW 0x707f -#define MATCH_FSD 0x3027 -#define MASK_FSD 0x707f -#define MATCH_FSQ 0x4027 -#define MASK_FSQ 0x707f -#define MATCH_FMADD_S 0x43 -#define MASK_FMADD_S 0x600007f -#define MATCH_FMSUB_S 0x47 -#define MASK_FMSUB_S 0x600007f -#define MATCH_FNMSUB_S 0x4b -#define MASK_FNMSUB_S 0x600007f -#define MATCH_FNMADD_S 0x4f -#define MASK_FNMADD_S 0x600007f -#define MATCH_FMADD_D 0x2000043 -#define MASK_FMADD_D 0x600007f -#define MATCH_FMSUB_D 0x2000047 -#define MASK_FMSUB_D 0x600007f -#define MATCH_FNMSUB_D 0x200004b -#define MASK_FNMSUB_D 0x600007f -#define MATCH_FNMADD_D 0x200004f -#define MASK_FNMADD_D 0x600007f -#define MATCH_FMADD_Q 0x6000043 -#define MASK_FMADD_Q 0x600007f -#define MATCH_FMSUB_Q 0x6000047 -#define MASK_FMSUB_Q 0x600007f -#define MATCH_FNMSUB_Q 0x600004b -#define MASK_FNMSUB_Q 0x600007f -#define MATCH_FNMADD_Q 0x600004f -#define MASK_FNMADD_Q 0x600007f -#define MATCH_C_ADDI4SPN 0x0 -#define MASK_C_ADDI4SPN 0xe003 -#define MATCH_C_FLD 0x2000 -#define MASK_C_FLD 0xe003 -#define MATCH_C_LW 0x4000 -#define MASK_C_LW 0xe003 -#define MATCH_C_FLW 0x6000 -#define MASK_C_FLW 0xe003 -#define MATCH_C_FSD 0xa000 -#define MASK_C_FSD 0xe003 -#define MATCH_C_SW 0xc000 -#define MASK_C_SW 0xe003 -#define MATCH_C_FSW 0xe000 -#define MASK_C_FSW 0xe003 -#define MATCH_C_ADDI 0x1 -#define MASK_C_ADDI 0xe003 -#define MATCH_C_JAL 0x2001 -#define MASK_C_JAL 0xe003 -#define MATCH_C_LI 0x4001 -#define MASK_C_LI 0xe003 -#define MATCH_C_LUI 0x6001 -#define MASK_C_LUI 0xe003 -#define MATCH_C_SRLI 0x8001 -#define MASK_C_SRLI 0xec03 -#define MATCH_C_SRAI 0x8401 -#define MASK_C_SRAI 0xec03 -#define MATCH_C_ANDI 0x8801 -#define MASK_C_ANDI 0xec03 -#define MATCH_C_SUB 0x8c01 -#define MASK_C_SUB 0xfc63 -#define MATCH_C_XOR 0x8c21 -#define MASK_C_XOR 0xfc63 -#define MATCH_C_OR 0x8c41 -#define MASK_C_OR 0xfc63 -#define MATCH_C_AND 0x8c61 -#define MASK_C_AND 0xfc63 -#define MATCH_C_SUBW 0x9c01 -#define MASK_C_SUBW 0xfc63 -#define MATCH_C_ADDW 0x9c21 -#define MASK_C_ADDW 0xfc63 -#define MATCH_C_J 0xa001 -#define MASK_C_J 0xe003 -#define MATCH_C_BEQZ 0xc001 -#define MASK_C_BEQZ 0xe003 -#define MATCH_C_BNEZ 0xe001 -#define MASK_C_BNEZ 0xe003 -#define MATCH_C_SLLI 0x2 -#define MASK_C_SLLI 0xe003 -#define MATCH_C_FLDSP 0x2002 -#define MASK_C_FLDSP 0xe003 -#define MATCH_C_LWSP 0x4002 -#define MASK_C_LWSP 0xe003 -#define MATCH_C_FLWSP 0x6002 -#define MASK_C_FLWSP 0xe003 -#define MATCH_C_MV 0x8002 -#define MASK_C_MV 0xf003 -#define MATCH_C_ADD 0x9002 -#define MASK_C_ADD 0xf003 -#define MATCH_C_FSDSP 0xa002 -#define MASK_C_FSDSP 0xe003 -#define MATCH_C_SWSP 0xc002 -#define MASK_C_SWSP 0xe003 -#define MATCH_C_FSWSP 0xe002 -#define MASK_C_FSWSP 0xe003 -#define CSR_FFLAGS 0x1 -#define CSR_FRM 0x2 -#define CSR_FCSR 0x3 -#define CSR_USTATUS 0x0 -#define CSR_UIE 0x4 -#define CSR_UTVEC 0x5 -#define CSR_VSTART 0x8 -#define CSR_VXSAT 0x9 -#define CSR_VXRM 0xa -#define CSR_USCRATCH 0x40 -#define CSR_UEPC 0x41 -#define CSR_UCAUSE 0x42 -#define CSR_UTVAL 0x43 -#define CSR_UIP 0x44 -#define CSR_CYCLE 0xc00 -#define CSR_TIME 0xc01 -#define CSR_INSTRET 0xc02 -#define CSR_HPMCOUNTER3 0xc03 -#define CSR_HPMCOUNTER4 0xc04 -#define CSR_HPMCOUNTER5 0xc05 -#define CSR_HPMCOUNTER6 0xc06 -#define CSR_HPMCOUNTER7 0xc07 -#define CSR_HPMCOUNTER8 0xc08 -#define CSR_HPMCOUNTER9 0xc09 -#define CSR_HPMCOUNTER10 0xc0a -#define CSR_HPMCOUNTER11 0xc0b -#define CSR_HPMCOUNTER12 0xc0c -#define CSR_HPMCOUNTER13 0xc0d -#define CSR_HPMCOUNTER14 0xc0e -#define CSR_HPMCOUNTER15 0xc0f -#define CSR_HPMCOUNTER16 0xc10 -#define CSR_HPMCOUNTER17 0xc11 -#define CSR_HPMCOUNTER18 0xc12 -#define CSR_HPMCOUNTER19 0xc13 -#define CSR_HPMCOUNTER20 0xc14 -#define CSR_HPMCOUNTER21 0xc15 -#define CSR_HPMCOUNTER22 0xc16 -#define CSR_HPMCOUNTER23 0xc17 -#define CSR_HPMCOUNTER24 0xc18 -#define CSR_HPMCOUNTER25 0xc19 -#define CSR_HPMCOUNTER26 0xc1a -#define CSR_HPMCOUNTER27 0xc1b -#define CSR_HPMCOUNTER28 0xc1c -#define CSR_HPMCOUNTER29 0xc1d -#define CSR_HPMCOUNTER30 0xc1e -#define CSR_HPMCOUNTER31 0xc1f -#define CSR_VL 0xc20 -#define CSR_VTYPE 0xc21 -#define CSR_VLENB 0xc22 -#define CSR_SSTATUS 0x100 -#define CSR_SIE 0x104 -#define CSR_STVEC 0x105 -#define CSR_SCOUNTEREN 0x106 -#define CSR_SSCRATCH 0x140 -#define CSR_SEPC 0x141 -#define CSR_SCAUSE 0x142 -#define CSR_STVAL 0x143 -#define CSR_SIP 0x144 -#define CSR_SATP 0x180 -#define CSR_VSSTATUS 0x200 -#define CSR_VSIE 0x204 -#define CSR_VSTVEC 0x205 -#define CSR_VSSCRATCH 0x240 -#define CSR_VSEPC 0x241 -#define CSR_VSCAUSE 0x242 -#define CSR_VSTVAL 0x243 -#define CSR_VSIP 0x244 -#define CSR_VSATP 0x280 -#define CSR_HSTATUS 0x600 -#define CSR_HEDELEG 0x602 -#define CSR_HIDELEG 0x603 -#define CSR_HCOUNTEREN 0x606 -#define CSR_HGATP 0x680 -#define CSR_UTVT 0x7 -#define CSR_UNXTI 0x45 -#define CSR_UINTSTATUS 0x46 -#define CSR_USCRATCHCSW 0x48 -#define CSR_USCRATCHCSWL 0x49 -#define CSR_STVT 0x107 -#define CSR_SNXTI 0x145 -#define CSR_SINTSTATUS 0x146 -#define CSR_SSCRATCHCSW 0x148 -#define CSR_SSCRATCHCSWL 0x149 -#define CSR_MTVT 0x307 -#define CSR_MNXTI 0x345 -#define CSR_MINTSTATUS 0x346 -#define CSR_MSCRATCHCSW 0x348 -#define CSR_MSCRATCHCSWL 0x349 -#define CSR_MSTATUS 0x300 -#define CSR_MISA 0x301 -#define CSR_MEDELEG 0x302 -#define CSR_MIDELEG 0x303 -#define CSR_MIE 0x304 -#define CSR_MTVEC 0x305 -#define CSR_MCOUNTEREN 0x306 -#define CSR_MSCRATCH 0x340 -#define CSR_MEPC 0x341 -#define CSR_MCAUSE 0x342 -#define CSR_MTVAL 0x343 -#define CSR_MIP 0x344 -#define CSR_PMPCFG0 0x3a0 -#define CSR_PMPCFG1 0x3a1 -#define CSR_PMPCFG2 0x3a2 -#define CSR_PMPCFG3 0x3a3 -#define CSR_PMPADDR0 0x3b0 -#define CSR_PMPADDR1 0x3b1 -#define CSR_PMPADDR2 0x3b2 -#define CSR_PMPADDR3 0x3b3 -#define CSR_PMPADDR4 0x3b4 -#define CSR_PMPADDR5 0x3b5 -#define CSR_PMPADDR6 0x3b6 -#define CSR_PMPADDR7 0x3b7 -#define CSR_PMPADDR8 0x3b8 -#define CSR_PMPADDR9 0x3b9 -#define CSR_PMPADDR10 0x3ba -#define CSR_PMPADDR11 0x3bb -#define CSR_PMPADDR12 0x3bc -#define CSR_PMPADDR13 0x3bd -#define CSR_PMPADDR14 0x3be -#define CSR_PMPADDR15 0x3bf -#define CSR_TSELECT 0x7a0 -#define CSR_TDATA1 0x7a1 -#define CSR_TDATA2 0x7a2 -#define CSR_TDATA3 0x7a3 -#define CSR_DCSR 0x7b0 -#define CSR_DPC 0x7b1 -#define CSR_DSCRATCH 0x7b2 -#define CSR_MCYCLE 0xb00 -#define CSR_MINSTRET 0xb02 -#define CSR_MHPMCOUNTER3 0xb03 -#define CSR_MHPMCOUNTER4 0xb04 -#define CSR_MHPMCOUNTER5 0xb05 -#define CSR_MHPMCOUNTER6 0xb06 -#define CSR_MHPMCOUNTER7 0xb07 -#define CSR_MHPMCOUNTER8 0xb08 -#define CSR_MHPMCOUNTER9 0xb09 -#define CSR_MHPMCOUNTER10 0xb0a -#define CSR_MHPMCOUNTER11 0xb0b -#define CSR_MHPMCOUNTER12 0xb0c -#define CSR_MHPMCOUNTER13 0xb0d -#define CSR_MHPMCOUNTER14 0xb0e -#define CSR_MHPMCOUNTER15 0xb0f -#define CSR_MHPMCOUNTER16 0xb10 -#define CSR_MHPMCOUNTER17 0xb11 -#define CSR_MHPMCOUNTER18 0xb12 -#define CSR_MHPMCOUNTER19 0xb13 -#define CSR_MHPMCOUNTER20 0xb14 -#define CSR_MHPMCOUNTER21 0xb15 -#define CSR_MHPMCOUNTER22 0xb16 -#define CSR_MHPMCOUNTER23 0xb17 -#define CSR_MHPMCOUNTER24 0xb18 -#define CSR_MHPMCOUNTER25 0xb19 -#define CSR_MHPMCOUNTER26 0xb1a -#define CSR_MHPMCOUNTER27 0xb1b -#define CSR_MHPMCOUNTER28 0xb1c -#define CSR_MHPMCOUNTER29 0xb1d -#define CSR_MHPMCOUNTER30 0xb1e -#define CSR_MHPMCOUNTER31 0xb1f -#define CSR_MHPMEVENT3 0x323 -#define CSR_MHPMEVENT4 0x324 -#define CSR_MHPMEVENT5 0x325 -#define CSR_MHPMEVENT6 0x326 -#define CSR_MHPMEVENT7 0x327 -#define CSR_MHPMEVENT8 0x328 -#define CSR_MHPMEVENT9 0x329 -#define CSR_MHPMEVENT10 0x32a -#define CSR_MHPMEVENT11 0x32b -#define CSR_MHPMEVENT12 0x32c -#define CSR_MHPMEVENT13 0x32d -#define CSR_MHPMEVENT14 0x32e -#define CSR_MHPMEVENT15 0x32f -#define CSR_MHPMEVENT16 0x330 -#define CSR_MHPMEVENT17 0x331 -#define CSR_MHPMEVENT18 0x332 -#define CSR_MHPMEVENT19 0x333 -#define CSR_MHPMEVENT20 0x334 -#define CSR_MHPMEVENT21 0x335 -#define CSR_MHPMEVENT22 0x336 -#define CSR_MHPMEVENT23 0x337 -#define CSR_MHPMEVENT24 0x338 -#define CSR_MHPMEVENT25 0x339 -#define CSR_MHPMEVENT26 0x33a -#define CSR_MHPMEVENT27 0x33b -#define CSR_MHPMEVENT28 0x33c -#define CSR_MHPMEVENT29 0x33d -#define CSR_MHPMEVENT30 0x33e -#define CSR_MHPMEVENT31 0x33f -#define CSR_MVENDORID 0xf11 -#define CSR_MARCHID 0xf12 -#define CSR_MIMPID 0xf13 -#define CSR_MHARTID 0xf14 -#define CSR_CYCLEH 0xc80 -#define CSR_TIMEH 0xc81 -#define CSR_INSTRETH 0xc82 -#define CSR_HPMCOUNTER3H 0xc83 -#define CSR_HPMCOUNTER4H 0xc84 -#define CSR_HPMCOUNTER5H 0xc85 -#define CSR_HPMCOUNTER6H 0xc86 -#define CSR_HPMCOUNTER7H 0xc87 -#define CSR_HPMCOUNTER8H 0xc88 -#define CSR_HPMCOUNTER9H 0xc89 -#define CSR_HPMCOUNTER10H 0xc8a -#define CSR_HPMCOUNTER11H 0xc8b -#define CSR_HPMCOUNTER12H 0xc8c -#define CSR_HPMCOUNTER13H 0xc8d -#define CSR_HPMCOUNTER14H 0xc8e -#define CSR_HPMCOUNTER15H 0xc8f -#define CSR_HPMCOUNTER16H 0xc90 -#define CSR_HPMCOUNTER17H 0xc91 -#define CSR_HPMCOUNTER18H 0xc92 -#define CSR_HPMCOUNTER19H 0xc93 -#define CSR_HPMCOUNTER20H 0xc94 -#define CSR_HPMCOUNTER21H 0xc95 -#define CSR_HPMCOUNTER22H 0xc96 -#define CSR_HPMCOUNTER23H 0xc97 -#define CSR_HPMCOUNTER24H 0xc98 -#define CSR_HPMCOUNTER25H 0xc99 -#define CSR_HPMCOUNTER26H 0xc9a -#define CSR_HPMCOUNTER27H 0xc9b -#define CSR_HPMCOUNTER28H 0xc9c -#define CSR_HPMCOUNTER29H 0xc9d -#define CSR_HPMCOUNTER30H 0xc9e -#define CSR_HPMCOUNTER31H 0xc9f -#define CSR_MCYCLEH 0xb80 -#define CSR_MINSTRETH 0xb82 -#define CSR_MHPMCOUNTER3H 0xb83 -#define CSR_MHPMCOUNTER4H 0xb84 -#define CSR_MHPMCOUNTER5H 0xb85 -#define CSR_MHPMCOUNTER6H 0xb86 -#define CSR_MHPMCOUNTER7H 0xb87 -#define CSR_MHPMCOUNTER8H 0xb88 -#define CSR_MHPMCOUNTER9H 0xb89 -#define CSR_MHPMCOUNTER10H 0xb8a -#define CSR_MHPMCOUNTER11H 0xb8b -#define CSR_MHPMCOUNTER12H 0xb8c -#define CSR_MHPMCOUNTER13H 0xb8d -#define CSR_MHPMCOUNTER14H 0xb8e -#define CSR_MHPMCOUNTER15H 0xb8f -#define CSR_MHPMCOUNTER16H 0xb90 -#define CSR_MHPMCOUNTER17H 0xb91 -#define CSR_MHPMCOUNTER18H 0xb92 -#define CSR_MHPMCOUNTER19H 0xb93 -#define CSR_MHPMCOUNTER20H 0xb94 -#define CSR_MHPMCOUNTER21H 0xb95 -#define CSR_MHPMCOUNTER22H 0xb96 -#define CSR_MHPMCOUNTER23H 0xb97 -#define CSR_MHPMCOUNTER24H 0xb98 -#define CSR_MHPMCOUNTER25H 0xb99 -#define CSR_MHPMCOUNTER26H 0xb9a -#define CSR_MHPMCOUNTER27H 0xb9b -#define CSR_MHPMCOUNTER28H 0xb9c -#define CSR_MHPMCOUNTER29H 0xb9d -#define CSR_MHPMCOUNTER30H 0xb9e -#define CSR_MHPMCOUNTER31H 0xb9f -#define CAUSE_MISALIGNED_FETCH 0x0 -#define CAUSE_FETCH_ACCESS 0x1 +#define MATCH_BEQ 0x63 +#define MASK_BEQ 0x707f +#define MATCH_BNE 0x1063 +#define MASK_BNE 0x707f +#define MATCH_BLT 0x4063 +#define MASK_BLT 0x707f +#define MATCH_BGE 0x5063 +#define MASK_BGE 0x707f +#define MATCH_BLTU 0x6063 +#define MASK_BLTU 0x707f +#define MATCH_BGEU 0x7063 +#define MASK_BGEU 0x707f +#define MATCH_JALR 0x67 +#define MASK_JALR 0x707f +#define MATCH_JAL 0x6f +#define MASK_JAL 0x7f +#define MATCH_LUI 0x37 +#define MASK_LUI 0x7f +#define MATCH_AUIPC 0x17 +#define MASK_AUIPC 0x7f +#define MATCH_ADDI 0x13 +#define MASK_ADDI 0x707f +#define MATCH_SLLI 0x1013 +#define MASK_SLLI 0xfc00707f +#define MATCH_SLTI 0x2013 +#define MASK_SLTI 0x707f +#define MATCH_SLTIU 0x3013 +#define MASK_SLTIU 0x707f +#define MATCH_XORI 0x4013 +#define MASK_XORI 0x707f +#define MATCH_SRLI 0x5013 +#define MASK_SRLI 0xfc00707f +#define MATCH_SRAI 0x40005013 +#define MASK_SRAI 0xfc00707f +#define MATCH_ORI 0x6013 +#define MASK_ORI 0x707f +#define MATCH_ANDI 0x7013 +#define MASK_ANDI 0x707f +#define MATCH_ADD 0x33 +#define MASK_ADD 0xfe00707f +#define MATCH_SUB 0x40000033 +#define MASK_SUB 0xfe00707f +#define MATCH_SLL 0x1033 +#define MASK_SLL 0xfe00707f +#define MATCH_SLT 0x2033 +#define MASK_SLT 0xfe00707f +#define MATCH_SLTU 0x3033 +#define MASK_SLTU 0xfe00707f +#define MATCH_XOR 0x4033 +#define MASK_XOR 0xfe00707f +#define MATCH_SRL 0x5033 +#define MASK_SRL 0xfe00707f +#define MATCH_SRA 0x40005033 +#define MASK_SRA 0xfe00707f +#define MATCH_OR 0x6033 +#define MASK_OR 0xfe00707f +#define MATCH_AND 0x7033 +#define MASK_AND 0xfe00707f +#define MATCH_ADDIW 0x1b +#define MASK_ADDIW 0x707f +#define MATCH_SLLIW 0x101b +#define MASK_SLLIW 0xfe00707f +#define MATCH_SRLIW 0x501b +#define MASK_SRLIW 0xfe00707f +#define MATCH_SRAIW 0x4000501b +#define MASK_SRAIW 0xfe00707f +#define MATCH_ADDW 0x3b +#define MASK_ADDW 0xfe00707f +#define MATCH_SUBW 0x4000003b +#define MASK_SUBW 0xfe00707f +#define MATCH_SLLW 0x103b +#define MASK_SLLW 0xfe00707f +#define MATCH_SRLW 0x503b +#define MASK_SRLW 0xfe00707f +#define MATCH_SRAW 0x4000503b +#define MASK_SRAW 0xfe00707f +#define MATCH_LB 0x3 +#define MASK_LB 0x707f +#define MATCH_LH 0x1003 +#define MASK_LH 0x707f +#define MATCH_LW 0x2003 +#define MASK_LW 0x707f +#define MATCH_LD 0x3003 +#define MASK_LD 0x707f +#define MATCH_LBU 0x4003 +#define MASK_LBU 0x707f +#define MATCH_LHU 0x5003 +#define MASK_LHU 0x707f +#define MATCH_LWU 0x6003 +#define MASK_LWU 0x707f +#define MATCH_SB 0x23 +#define MASK_SB 0x707f +#define MATCH_SH 0x1023 +#define MASK_SH 0x707f +#define MATCH_SW 0x2023 +#define MASK_SW 0x707f +#define MATCH_SD 0x3023 +#define MASK_SD 0x707f +#define MATCH_FENCE 0xf +#define MASK_FENCE 0x707f +#define MATCH_FENCE_I 0x100f +#define MASK_FENCE_I 0x707f +#define MATCH_MUL 0x2000033 +#define MASK_MUL 0xfe00707f +#define MATCH_MULH 0x2001033 +#define MASK_MULH 0xfe00707f +#define MATCH_MULHSU 0x2002033 +#define MASK_MULHSU 0xfe00707f +#define MATCH_MULHU 0x2003033 +#define MASK_MULHU 0xfe00707f +#define MATCH_DIV 0x2004033 +#define MASK_DIV 0xfe00707f +#define MATCH_DIVU 0x2005033 +#define MASK_DIVU 0xfe00707f +#define MATCH_REM 0x2006033 +#define MASK_REM 0xfe00707f +#define MATCH_REMU 0x2007033 +#define MASK_REMU 0xfe00707f +#define MATCH_MULW 0x200003b +#define MASK_MULW 0xfe00707f +#define MATCH_DIVW 0x200403b +#define MASK_DIVW 0xfe00707f +#define MATCH_DIVUW 0x200503b +#define MASK_DIVUW 0xfe00707f +#define MATCH_REMW 0x200603b +#define MASK_REMW 0xfe00707f +#define MATCH_REMUW 0x200703b +#define MASK_REMUW 0xfe00707f +#define MATCH_AMOADD_W 0x202f +#define MASK_AMOADD_W 0xf800707f +#define MATCH_AMOXOR_W 0x2000202f +#define MASK_AMOXOR_W 0xf800707f +#define MATCH_AMOOR_W 0x4000202f +#define MASK_AMOOR_W 0xf800707f +#define MATCH_AMOAND_W 0x6000202f +#define MASK_AMOAND_W 0xf800707f +#define MATCH_AMOMIN_W 0x8000202f +#define MASK_AMOMIN_W 0xf800707f +#define MATCH_AMOMAX_W 0xa000202f +#define MASK_AMOMAX_W 0xf800707f +#define MATCH_AMOMINU_W 0xc000202f +#define MASK_AMOMINU_W 0xf800707f +#define MATCH_AMOMAXU_W 0xe000202f +#define MASK_AMOMAXU_W 0xf800707f +#define MATCH_AMOSWAP_W 0x800202f +#define MASK_AMOSWAP_W 0xf800707f +#define MATCH_LR_W 0x1000202f +#define MASK_LR_W 0xf9f0707f +#define MATCH_SC_W 0x1800202f +#define MASK_SC_W 0xf800707f +#define MATCH_AMOADD_D 0x302f +#define MASK_AMOADD_D 0xf800707f +#define MATCH_AMOXOR_D 0x2000302f +#define MASK_AMOXOR_D 0xf800707f +#define MATCH_AMOOR_D 0x4000302f +#define MASK_AMOOR_D 0xf800707f +#define MATCH_AMOAND_D 0x6000302f +#define MASK_AMOAND_D 0xf800707f +#define MATCH_AMOMIN_D 0x8000302f +#define MASK_AMOMIN_D 0xf800707f +#define MATCH_AMOMAX_D 0xa000302f +#define MASK_AMOMAX_D 0xf800707f +#define MATCH_AMOMINU_D 0xc000302f +#define MASK_AMOMINU_D 0xf800707f +#define MATCH_AMOMAXU_D 0xe000302f +#define MASK_AMOMAXU_D 0xf800707f +#define MATCH_AMOSWAP_D 0x800302f +#define MASK_AMOSWAP_D 0xf800707f +#define MATCH_LR_D 0x1000302f +#define MASK_LR_D 0xf9f0707f +#define MATCH_SC_D 0x1800302f +#define MASK_SC_D 0xf800707f +#define MATCH_ECALL 0x73 +#define MASK_ECALL 0xffffffff +#define MATCH_EBREAK 0x100073 +#define MASK_EBREAK 0xffffffff +#define MATCH_URET 0x200073 +#define MASK_URET 0xffffffff +#define MATCH_SRET 0x10200073 +#define MASK_SRET 0xffffffff +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff +#define MATCH_DRET 0x7b200073 +#define MASK_DRET 0xffffffff +#define MATCH_SFENCE_VMA 0x12000073 +#define MASK_SFENCE_VMA 0xfe007fff +#define MATCH_WFI 0x10500073 +#define MASK_WFI 0xffffffff +#define MATCH_CSRRW 0x1073 +#define MASK_CSRRW 0x707f +#define MATCH_CSRRS 0x2073 +#define MASK_CSRRS 0x707f +#define MATCH_CSRRC 0x3073 +#define MASK_CSRRC 0x707f +#define MATCH_CSRRWI 0x5073 +#define MASK_CSRRWI 0x707f +#define MATCH_CSRRSI 0x6073 +#define MASK_CSRRSI 0x707f +#define MATCH_CSRRCI 0x7073 +#define MASK_CSRRCI 0x707f +#define MATCH_HFENCE_VVMA 0x22000073 +#define MASK_HFENCE_VVMA 0xfe007fff +#define MATCH_HFENCE_GVMA 0x62000073 +#define MASK_HFENCE_GVMA 0xfe007fff +#define MATCH_FADD_S 0x53 +#define MASK_FADD_S 0xfe00007f +#define MATCH_FSUB_S 0x8000053 +#define MASK_FSUB_S 0xfe00007f +#define MATCH_FMUL_S 0x10000053 +#define MASK_FMUL_S 0xfe00007f +#define MATCH_FDIV_S 0x18000053 +#define MASK_FDIV_S 0xfe00007f +#define MATCH_FSGNJ_S 0x20000053 +#define MASK_FSGNJ_S 0xfe00707f +#define MATCH_FSGNJN_S 0x20001053 +#define MASK_FSGNJN_S 0xfe00707f +#define MATCH_FSGNJX_S 0x20002053 +#define MASK_FSGNJX_S 0xfe00707f +#define MATCH_FMIN_S 0x28000053 +#define MASK_FMIN_S 0xfe00707f +#define MATCH_FMAX_S 0x28001053 +#define MASK_FMAX_S 0xfe00707f +#define MATCH_FSQRT_S 0x58000053 +#define MASK_FSQRT_S 0xfff0007f +#define MATCH_FADD_D 0x2000053 +#define MASK_FADD_D 0xfe00007f +#define MATCH_FSUB_D 0xa000053 +#define MASK_FSUB_D 0xfe00007f +#define MATCH_FMUL_D 0x12000053 +#define MASK_FMUL_D 0xfe00007f +#define MATCH_FDIV_D 0x1a000053 +#define MASK_FDIV_D 0xfe00007f +#define MATCH_FSGNJ_D 0x22000053 +#define MASK_FSGNJ_D 0xfe00707f +#define MATCH_FSGNJN_D 0x22001053 +#define MASK_FSGNJN_D 0xfe00707f +#define MATCH_FSGNJX_D 0x22002053 +#define MASK_FSGNJX_D 0xfe00707f +#define MATCH_FMIN_D 0x2a000053 +#define MASK_FMIN_D 0xfe00707f +#define MATCH_FMAX_D 0x2a001053 +#define MASK_FMAX_D 0xfe00707f +#define MATCH_FCVT_S_D 0x40100053 +#define MASK_FCVT_S_D 0xfff0007f +#define MATCH_FCVT_D_S 0x42000053 +#define MASK_FCVT_D_S 0xfff0007f +#define MATCH_FSQRT_D 0x5a000053 +#define MASK_FSQRT_D 0xfff0007f +#define MATCH_FADD_Q 0x6000053 +#define MASK_FADD_Q 0xfe00007f +#define MATCH_FSUB_Q 0xe000053 +#define MASK_FSUB_Q 0xfe00007f +#define MATCH_FMUL_Q 0x16000053 +#define MASK_FMUL_Q 0xfe00007f +#define MATCH_FDIV_Q 0x1e000053 +#define MASK_FDIV_Q 0xfe00007f +#define MATCH_FSGNJ_Q 0x26000053 +#define MASK_FSGNJ_Q 0xfe00707f +#define MATCH_FSGNJN_Q 0x26001053 +#define MASK_FSGNJN_Q 0xfe00707f +#define MATCH_FSGNJX_Q 0x26002053 +#define MASK_FSGNJX_Q 0xfe00707f +#define MATCH_FMIN_Q 0x2e000053 +#define MASK_FMIN_Q 0xfe00707f +#define MATCH_FMAX_Q 0x2e001053 +#define MASK_FMAX_Q 0xfe00707f +#define MATCH_FCVT_S_Q 0x40300053 +#define MASK_FCVT_S_Q 0xfff0007f +#define MATCH_FCVT_Q_S 0x46000053 +#define MASK_FCVT_Q_S 0xfff0007f +#define MATCH_FCVT_D_Q 0x42300053 +#define MASK_FCVT_D_Q 0xfff0007f +#define MATCH_FCVT_Q_D 0x46100053 +#define MASK_FCVT_Q_D 0xfff0007f +#define MATCH_FSQRT_Q 0x5e000053 +#define MASK_FSQRT_Q 0xfff0007f +#define MATCH_FLE_S 0xa0000053 +#define MASK_FLE_S 0xfe00707f +#define MATCH_FLT_S 0xa0001053 +#define MASK_FLT_S 0xfe00707f +#define MATCH_FEQ_S 0xa0002053 +#define MASK_FEQ_S 0xfe00707f +#define MATCH_FLE_D 0xa2000053 +#define MASK_FLE_D 0xfe00707f +#define MATCH_FLT_D 0xa2001053 +#define MASK_FLT_D 0xfe00707f +#define MATCH_FEQ_D 0xa2002053 +#define MASK_FEQ_D 0xfe00707f +#define MATCH_FLE_Q 0xa6000053 +#define MASK_FLE_Q 0xfe00707f +#define MATCH_FLT_Q 0xa6001053 +#define MASK_FLT_Q 0xfe00707f +#define MATCH_FEQ_Q 0xa6002053 +#define MASK_FEQ_Q 0xfe00707f +#define MATCH_FCVT_W_S 0xc0000053 +#define MASK_FCVT_W_S 0xfff0007f +#define MATCH_FCVT_WU_S 0xc0100053 +#define MASK_FCVT_WU_S 0xfff0007f +#define MATCH_FCVT_L_S 0xc0200053 +#define MASK_FCVT_L_S 0xfff0007f +#define MATCH_FCVT_LU_S 0xc0300053 +#define MASK_FCVT_LU_S 0xfff0007f +#define MATCH_FMV_X_W 0xe0000053 +#define MASK_FMV_X_W 0xfff0707f +#define MATCH_FCLASS_S 0xe0001053 +#define MASK_FCLASS_S 0xfff0707f +#define MATCH_FCVT_W_D 0xc2000053 +#define MASK_FCVT_W_D 0xfff0007f +#define MATCH_FCVT_WU_D 0xc2100053 +#define MASK_FCVT_WU_D 0xfff0007f +#define MATCH_FCVT_L_D 0xc2200053 +#define MASK_FCVT_L_D 0xfff0007f +#define MATCH_FCVT_LU_D 0xc2300053 +#define MASK_FCVT_LU_D 0xfff0007f +#define MATCH_FMV_X_D 0xe2000053 +#define MASK_FMV_X_D 0xfff0707f +#define MATCH_FCLASS_D 0xe2001053 +#define MASK_FCLASS_D 0xfff0707f +#define MATCH_FCVT_W_Q 0xc6000053 +#define MASK_FCVT_W_Q 0xfff0007f +#define MATCH_FCVT_WU_Q 0xc6100053 +#define MASK_FCVT_WU_Q 0xfff0007f +#define MATCH_FCVT_L_Q 0xc6200053 +#define MASK_FCVT_L_Q 0xfff0007f +#define MATCH_FCVT_LU_Q 0xc6300053 +#define MASK_FCVT_LU_Q 0xfff0007f +#define MATCH_FMV_X_Q 0xe6000053 +#define MASK_FMV_X_Q 0xfff0707f +#define MATCH_FCLASS_Q 0xe6001053 +#define MASK_FCLASS_Q 0xfff0707f +#define MATCH_FCVT_S_W 0xd0000053 +#define MASK_FCVT_S_W 0xfff0007f +#define MATCH_FCVT_S_WU 0xd0100053 +#define MASK_FCVT_S_WU 0xfff0007f +#define MATCH_FCVT_S_L 0xd0200053 +#define MASK_FCVT_S_L 0xfff0007f +#define MATCH_FCVT_S_LU 0xd0300053 +#define MASK_FCVT_S_LU 0xfff0007f +#define MATCH_FMV_W_X 0xf0000053 +#define MASK_FMV_W_X 0xfff0707f +#define MATCH_FCVT_D_W 0xd2000053 +#define MASK_FCVT_D_W 0xfff0007f +#define MATCH_FCVT_D_WU 0xd2100053 +#define MASK_FCVT_D_WU 0xfff0007f +#define MATCH_FCVT_D_L 0xd2200053 +#define MASK_FCVT_D_L 0xfff0007f +#define MATCH_FCVT_D_LU 0xd2300053 +#define MASK_FCVT_D_LU 0xfff0007f +#define MATCH_FMV_D_X 0xf2000053 +#define MASK_FMV_D_X 0xfff0707f +#define MATCH_FCVT_Q_W 0xd6000053 +#define MASK_FCVT_Q_W 0xfff0007f +#define MATCH_FCVT_Q_WU 0xd6100053 +#define MASK_FCVT_Q_WU 0xfff0007f +#define MATCH_FCVT_Q_L 0xd6200053 +#define MASK_FCVT_Q_L 0xfff0007f +#define MATCH_FCVT_Q_LU 0xd6300053 +#define MASK_FCVT_Q_LU 0xfff0007f +#define MATCH_FMV_Q_X 0xf6000053 +#define MASK_FMV_Q_X 0xfff0707f +#define MATCH_FLW 0x2007 +#define MASK_FLW 0x707f +#define MATCH_FLD 0x3007 +#define MASK_FLD 0x707f +#define MATCH_FLQ 0x4007 +#define MASK_FLQ 0x707f +#define MATCH_FSW 0x2027 +#define MASK_FSW 0x707f +#define MATCH_FSD 0x3027 +#define MASK_FSD 0x707f +#define MATCH_FSQ 0x4027 +#define MASK_FSQ 0x707f +#define MATCH_FMADD_S 0x43 +#define MASK_FMADD_S 0x600007f +#define MATCH_FMSUB_S 0x47 +#define MASK_FMSUB_S 0x600007f +#define MATCH_FNMSUB_S 0x4b +#define MASK_FNMSUB_S 0x600007f +#define MATCH_FNMADD_S 0x4f +#define MASK_FNMADD_S 0x600007f +#define MATCH_FMADD_D 0x2000043 +#define MASK_FMADD_D 0x600007f +#define MATCH_FMSUB_D 0x2000047 +#define MASK_FMSUB_D 0x600007f +#define MATCH_FNMSUB_D 0x200004b +#define MASK_FNMSUB_D 0x600007f +#define MATCH_FNMADD_D 0x200004f +#define MASK_FNMADD_D 0x600007f +#define MATCH_FMADD_Q 0x6000043 +#define MASK_FMADD_Q 0x600007f +#define MATCH_FMSUB_Q 0x6000047 +#define MASK_FMSUB_Q 0x600007f +#define MATCH_FNMSUB_Q 0x600004b +#define MASK_FNMSUB_Q 0x600007f +#define MATCH_FNMADD_Q 0x600004f +#define MASK_FNMADD_Q 0x600007f +#define MATCH_C_ADDI4SPN 0x0 +#define MASK_C_ADDI4SPN 0xe003 +#define MATCH_C_FLD 0x2000 +#define MASK_C_FLD 0xe003 +#define MATCH_C_LW 0x4000 +#define MASK_C_LW 0xe003 +#define MATCH_C_FLW 0x6000 +#define MASK_C_FLW 0xe003 +#define MATCH_C_FSD 0xa000 +#define MASK_C_FSD 0xe003 +#define MATCH_C_SW 0xc000 +#define MASK_C_SW 0xe003 +#define MATCH_C_FSW 0xe000 +#define MASK_C_FSW 0xe003 +#define MATCH_C_ADDI 0x1 +#define MASK_C_ADDI 0xe003 +#define MATCH_C_JAL 0x2001 +#define MASK_C_JAL 0xe003 +#define MATCH_C_LI 0x4001 +#define MASK_C_LI 0xe003 +#define MATCH_C_LUI 0x6001 +#define MASK_C_LUI 0xe003 +#define MATCH_C_SRLI 0x8001 +#define MASK_C_SRLI 0xec03 +#define MATCH_C_SRAI 0x8401 +#define MASK_C_SRAI 0xec03 +#define MATCH_C_ANDI 0x8801 +#define MASK_C_ANDI 0xec03 +#define MATCH_C_SUB 0x8c01 +#define MASK_C_SUB 0xfc63 +#define MATCH_C_XOR 0x8c21 +#define MASK_C_XOR 0xfc63 +#define MATCH_C_OR 0x8c41 +#define MASK_C_OR 0xfc63 +#define MATCH_C_AND 0x8c61 +#define MASK_C_AND 0xfc63 +#define MATCH_C_SUBW 0x9c01 +#define MASK_C_SUBW 0xfc63 +#define MATCH_C_ADDW 0x9c21 +#define MASK_C_ADDW 0xfc63 +#define MATCH_C_J 0xa001 +#define MASK_C_J 0xe003 +#define MATCH_C_BEQZ 0xc001 +#define MASK_C_BEQZ 0xe003 +#define MATCH_C_BNEZ 0xe001 +#define MASK_C_BNEZ 0xe003 +#define MATCH_C_SLLI 0x2 +#define MASK_C_SLLI 0xe003 +#define MATCH_C_FLDSP 0x2002 +#define MASK_C_FLDSP 0xe003 +#define MATCH_C_LWSP 0x4002 +#define MASK_C_LWSP 0xe003 +#define MATCH_C_FLWSP 0x6002 +#define MASK_C_FLWSP 0xe003 +#define MATCH_C_MV 0x8002 +#define MASK_C_MV 0xf003 +#define MATCH_C_ADD 0x9002 +#define MASK_C_ADD 0xf003 +#define MATCH_C_FSDSP 0xa002 +#define MASK_C_FSDSP 0xe003 +#define MATCH_C_SWSP 0xc002 +#define MASK_C_SWSP 0xe003 +#define MATCH_C_FSWSP 0xe002 +#define MASK_C_FSWSP 0xe003 +#define CSR_FFLAGS 0x1 +#define CSR_FRM 0x2 +#define CSR_FCSR 0x3 +#define CSR_USTATUS 0x0 +#define CSR_UIE 0x4 +#define CSR_UTVEC 0x5 +#define CSR_VSTART 0x8 +#define CSR_VXSAT 0x9 +#define CSR_VXRM 0xa +#define CSR_USCRATCH 0x40 +#define CSR_UEPC 0x41 +#define CSR_UCAUSE 0x42 +#define CSR_UTVAL 0x43 +#define CSR_UIP 0x44 +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 +#define CSR_SSTATUS 0x100 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGATP 0x680 +#define CSR_UTVT 0x7 +#define CSR_UNXTI 0x45 +#define CSR_UINTSTATUS 0x46 +#define CSR_USCRATCHCSW 0x48 +#define CSR_USCRATCHCSWL 0x49 +#define CSR_STVT 0x107 +#define CSR_SNXTI 0x145 +#define CSR_SINTSTATUS 0x146 +#define CSR_SSCRATCHCSW 0x148 +#define CSR_SSCRATCHCSWL 0x149 +#define CSR_MTVT 0x307 +#define CSR_MNXTI 0x345 +#define CSR_MINTSTATUS 0x346 +#define CSR_MSCRATCHCSW 0x348 +#define CSR_MSCRATCHCSWL 0x349 +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH 0x7b2 +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f +#define CAUSE_MISALIGNED_FETCH 0x0 +#define CAUSE_FETCH_ACCESS 0x1 #define CAUSE_ILLEGAL_INSTRUCTION 0x2 -#define CAUSE_BREAKPOINT 0x3 -#define CAUSE_MISALIGNED_LOAD 0x4 -#define CAUSE_LOAD_ACCESS 0x5 -#define CAUSE_MISALIGNED_STORE 0x6 -#define CAUSE_STORE_ACCESS 0x7 -#define CAUSE_USER_ECALL 0x8 -#define CAUSE_SUPERVISOR_ECALL 0x9 -#define CAUSE_HYPERVISOR_ECALL 0xa -#define CAUSE_MACHINE_ECALL 0xb -#define CAUSE_FETCH_PAGE_FAULT 0xc -#define CAUSE_LOAD_PAGE_FAULT 0xd -#define CAUSE_STORE_PAGE_FAULT 0xf +#define CAUSE_BREAKPOINT 0x3 +#define CAUSE_MISALIGNED_LOAD 0x4 +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_MISALIGNED_STORE 0x6 +#define CAUSE_STORE_ACCESS 0x7 +#define CAUSE_USER_ECALL 0x8 +#define CAUSE_SUPERVISOR_ECALL 0x9 +#define CAUSE_HYPERVISOR_ECALL 0xa +#define CAUSE_MACHINE_ECALL 0xb +#define CAUSE_FETCH_PAGE_FAULT 0xc +#define CAUSE_LOAD_PAGE_FAULT 0xd +#define CAUSE_STORE_PAGE_FAULT 0xf #endif #ifdef DECLARE_INSN DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) @@ -761,7 +761,7 @@ DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) -DECLARE_INSN (xor, MATCH_XOR, MASK_XOR) +DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) DECLARE_INSN(or, MATCH_OR, MASK_OR) diff --git a/src/arch/riscv/inc/arch/page_table.h b/src/arch/riscv/inc/arch/page_table.h index 0be3fd4b2..80fa6fd2c 100644 --- a/src/arch/riscv/inc/arch/page_table.h +++ b/src/arch/riscv/inc/arch/page_table.h @@ -10,64 +10,63 @@ #include #define HYP_ROOT_PT_SIZE (PAGE_SIZE) -#define PAGE_SHIFT (12) +#define PAGE_SHIFT (12) -#define PT_SHARED_LVL (0) +#define PT_SHARED_LVL (0) #if (RV64) -#define PTE_MASK BIT64_MASK +#define PTE_MASK BIT64_MASK #define PTE_ADDR_MSK PTE_MASK(12, 44) #elif (RV32) -#define PTE_MASK BIT32_MASK +#define PTE_MASK BIT32_MASK #define PTE_ADDR_MSK PTE_MASK(12, 22) #endif -#define PTE_FLAGS_MSK PTE_MASK(0, 8) +#define PTE_FLAGS_MSK PTE_MASK(0, 8) -#define PTE_VALID (1ULL << 0) -#define PTE_READ (1ULL << 1) -#define PTE_WRITE (1ULL << 2) -#define PTE_EXECUTE (1ULL << 3) -#define PTE_USER (1ULL << 4) -#define PTE_GLOBAL (1ULL << 5) -#define PTE_ACCESS (1ULL << 6) -#define PTE_DIRTY (1ULL << 7) +#define PTE_VALID (1ULL << 0) +#define PTE_READ (1ULL << 1) +#define PTE_WRITE (1ULL << 2) +#define PTE_EXECUTE (1ULL << 3) +#define PTE_USER (1ULL << 4) +#define PTE_GLOBAL (1ULL << 5) +#define PTE_ACCESS (1ULL << 6) +#define PTE_DIRTY (1ULL << 7) -#define PTE_RO (PTE_READ) -#define PTE_RW (PTE_READ | PTE_WRITE) -#define PTE_XO (PTE_EXECUTE) -#define PTE_RX (PTE_READ | PTE_EXECUTE) -#define PTE_RWX (PTE_READ | PTE_WRITE | PTE_EXECUTE) +#define PTE_RO (PTE_READ) +#define PTE_RW (PTE_READ | PTE_WRITE) +#define PTE_XO (PTE_EXECUTE) +#define PTE_RX (PTE_READ | PTE_EXECUTE) +#define PTE_RWX (PTE_READ | PTE_WRITE | PTE_EXECUTE) -#define PTE_RSW_OFF 8 -#define PTE_RSW_LEN 2 -#define PTE_RSW_MSK PTE_MASK(PTE_RSW_OFF, PTE_RSW_LEN) +#define PTE_RSW_OFF 8 +#define PTE_RSW_LEN 2 +#define PTE_RSW_MSK PTE_MASK(PTE_RSW_OFF, PTE_RSW_LEN) -#define PTE_TABLE (PTE_VALID) -#define PTE_PAGE (PTE_RWX | PTE_VALID) -#define PTE_SUPERPAGE (PTE_PAGE) +#define PTE_TABLE (PTE_VALID) +#define PTE_PAGE (PTE_RWX | PTE_VALID) +#define PTE_SUPERPAGE (PTE_PAGE) /* ------------------------------------------------------------- */ -#define PTE_RSW_EMPT (0x0LL << PTE_RSW_OFF) -#define PTE_RSW_OPEN (0x1LL << PTE_RSW_OFF) -#define PTE_RSW_FULL (0x2LL << PTE_RSW_OFF) -#define PTE_RSW_RSRV (0x3LL << PTE_RSW_OFF) +#define PTE_RSW_EMPT (0x0LL << PTE_RSW_OFF) +#define PTE_RSW_OPEN (0x1LL << PTE_RSW_OFF) +#define PTE_RSW_FULL (0x2LL << PTE_RSW_OFF) +#define PTE_RSW_RSRV (0x3LL << PTE_RSW_OFF) #define PT_ROOT_FLAGS_REC_IND_OFF ? ? ? #define PT_ROOT_FLAGS_REC_IND_LEN ? ? ? -#define PT_ROOT_FLAGS_REC_IND_MSK \ - PTE_MASK(PT_ROOT_FLAGS_REC_IND_OFF, PT_ROOT_FLAGS_REC_IND_LEN) +#define PT_ROOT_FLAGS_REC_IND_MSK PTE_MASK(PT_ROOT_FLAGS_REC_IND_OFF, PT_ROOT_FLAGS_REC_IND_LEN) -#define PT_CPU_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 1) -#define PT_VM_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 2) +#define PT_CPU_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 1) +#define PT_VM_REC_IND (pt_nentries(&cpu()->as.pt, 0) - 2) -#define PTE_INVALID (0) -#define PTE_HYP_FLAGS (PTE_GLOBAL | PTE_ACCESS | PTE_DIRTY) -#define PTE_HYP_DEV_FLAGS PTE_HYP_FLAGS +#define PTE_INVALID (0) +#define PTE_HYP_FLAGS (PTE_GLOBAL | PTE_ACCESS | PTE_DIRTY) +#define PTE_HYP_DEV_FLAGS PTE_HYP_FLAGS -#define PTE_VM_FLAGS (PTE_ACCESS | PTE_DIRTY | PTE_USER) -#define PTE_VM_DEV_FLAGS PTE_VM_FLAGS +#define PTE_VM_FLAGS (PTE_ACCESS | PTE_DIRTY | PTE_USER) +#define PTE_VM_DEV_FLAGS PTE_VM_FLAGS #ifndef __ASSEMBLER__ @@ -77,13 +76,11 @@ typedef pte_t pte_flags_t; struct page_table; -struct page_table_arch { - -}; +struct page_table_arch { }; static inline void pte_set(pte_t* pte, paddr_t addr, pte_type_t type, pte_flags_t flags) { - *pte = ((addr & PTE_ADDR_MSK) >> 2) | + *pte = ((addr & PTE_ADDR_MSK) >> 2) | (((type == PTE_TABLE) ? type : (type | flags)) & PTE_FLAGS_MSK); } diff --git a/src/arch/riscv/inc/arch/platform.h b/src/arch/riscv/inc/arch/platform.h index e96638377..87c4e86e1 100644 --- a/src/arch/riscv/inc/arch/platform.h +++ b/src/arch/riscv/inc/arch/platform.h @@ -22,9 +22,9 @@ struct arch_platform { } irqc; struct { - paddr_t base; // Base address of the IOMMU mmapped IF - unsigned mode; // Overall IOMMU mode (Off, Bypass, DDT-lvl) - irqid_t fq_irq_id; // Fault Queue IRQ ID (wired) + paddr_t base; // Base address of the IOMMU mmapped IF + unsigned mode; // Overall IOMMU mode (Off, Bypass, DDT-lvl) + irqid_t fq_irq_id; // Fault Queue IRQ ID (wired) } iommu; }; diff --git a/src/arch/riscv/inc/arch/sbi.h b/src/arch/riscv/inc/arch/sbi.h index e9809c78d..4011fed6a 100644 --- a/src/arch/riscv/inc/arch/sbi.h +++ b/src/arch/riscv/inc/arch/sbi.h @@ -13,12 +13,12 @@ * From https://github.com/riscv/riscv-sbi-doc */ -#define SBI_SUCCESS (0) -#define SBI_ERR_FAILURE (-1) -#define SBI_ERR_NOT_SUPPORTED (-2) -#define SBI_ERR_INVALID_PARAM (-3) -#define SBI_ERR_DENIED (-4) -#define SBI_ERR_INVALID_ADDRESS (-5) +#define SBI_SUCCESS (0) +#define SBI_ERR_FAILURE (-1) +#define SBI_ERR_NOT_SUPPORTED (-2) +#define SBI_ERR_INVALID_PARAM (-3) +#define SBI_ERR_DENIED (-4) +#define SBI_ERR_INVALID_ADDRESS (-5) #define SBI_ERR_ALREADY_AVAILABLE (-6) struct sbiret { @@ -45,49 +45,31 @@ struct sbiret sbi_get_mvendorid(void); struct sbiret sbi_get_marchid(void); struct sbiret sbi_get_mimpid(void); -struct sbiret sbi_send_ipi(const unsigned long hart_mask, - unsigned long hart_mask_base); +struct sbiret sbi_send_ipi(const unsigned long hart_mask, unsigned long hart_mask_base); struct sbiret sbi_set_timer(uint64_t stime_value); -struct sbiret sbi_remote_fence_i(const unsigned long hart_mask, - unsigned long hart_mask_base); +struct sbiret sbi_remote_fence_i(const unsigned long hart_mask, unsigned long hart_mask_base); -struct sbiret sbi_remote_sfence_vma(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size); +struct sbiret sbi_remote_sfence_vma(const unsigned long hart_mask, unsigned long hart_mask_base, + unsigned long start_addr, unsigned long size); struct sbiret sbi_remote_sfence_vma_asid(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size, - unsigned long asid); + unsigned long hart_mask_base, unsigned long start_addr, unsigned long size, unsigned long asid); struct sbiret sbi_remote_hfence_gvma_vmid(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size, - unsigned long vmid); + unsigned long hart_mask_base, unsigned long start_addr, unsigned long size, unsigned long vmid); -struct sbiret sbi_remote_hfence_gvma(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size); +struct sbiret sbi_remote_hfence_gvma(const unsigned long hart_mask, unsigned long hart_mask_base, + unsigned long start_addr, unsigned long size); struct sbiret sbi_remote_hfence_vvma_asid(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size, - unsigned long asid); - -struct sbiret sbi_remote_hfence_vvma(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size); - -struct sbiret sbi_hart_start(unsigned long hartid, unsigned long start_addr, - unsigned long priv); + unsigned long hart_mask_base, unsigned long start_addr, unsigned long size, unsigned long asid); + +struct sbiret sbi_remote_hfence_vvma(const unsigned long hart_mask, unsigned long hart_mask_base, + unsigned long start_addr, unsigned long size); + +struct sbiret sbi_hart_start(unsigned long hartid, unsigned long start_addr, unsigned long priv); struct sbiret sbi_hart_stop(); struct sbiret sbi_hart_status(unsigned long hartid); diff --git a/src/arch/riscv/inc/arch/spinlock.h b/src/arch/riscv/inc/arch/spinlock.h index 4109e9d26..6407decd5 100644 --- a/src/arch/riscv/inc/arch/spinlock.h +++ b/src/arch/riscv/inc/arch/spinlock.h @@ -8,12 +8,12 @@ #include - typedef struct { +typedef struct { uint32_t ticket; uint32_t next; } spinlock_t; -#define SPINLOCK_INITVAL ((spinlock_t){0,0}) +#define SPINLOCK_INITVAL ((spinlock_t){ 0, 0 }) static inline void spinlock_init(spinlock_t* lock) { @@ -28,28 +28,22 @@ static inline void spin_lock(spinlock_t* lock) uint32_t serving; asm volatile( - /* Increment next ticket */ - "amoadd.w.aqrl %0, %3, %2 \n\t" - "1:\n\t" - "lw %1, %4 \n\t" - /* Acquire barrier */ - "fence r , rw \n\t" - /* Spin on lock if not serving*/ - "bne %0, %1, 1b \n\t" - : "=&r"(ticket), "=&r"(serving), "+A"(lock->next) - : "r"(INCR), "A"(lock->ticket) - : "memory" - ); + /* Increment next ticket */ + "amoadd.w.aqrl %0, %3, %2 \n\t" + "1:\n\t" + "lw %1, %4 \n\t" + /* Acquire barrier */ + "fence r , rw \n\t" + /* Spin on lock if not serving*/ + "bne %0, %1, 1b \n\t" : "=&r"(ticket), "=&r"(serving), "+A"(lock->next) + : "r"(INCR), "A"(lock->ticket) : "memory"); } static inline void spin_unlock(spinlock_t* lock) { uint32_t update_lock = lock->ticket + 1; asm volatile("fence rw, rw\n\t" - "sw %1, %0 \n\t" - :"=A"(lock->ticket) - : "r"(update_lock) - : "memory"); + "sw %1, %0 \n\t" : "=A"(lock->ticket) : "r"(update_lock) : "memory"); } #endif /* __ARCH_SPINLOCK__ */ diff --git a/src/arch/riscv/inc/arch/tlb.h b/src/arch/riscv/inc/arch/tlb.h index 56f5e5588..52f671ad8 100644 --- a/src/arch/riscv/inc/arch/tlb.h +++ b/src/arch/riscv/inc/arch/tlb.h @@ -16,8 +16,7 @@ static inline void tlb_hyp_inv_va(vaddr_t va) { - sbi_remote_sfence_vma((1 << platform.cpu_num) - 1, 0, (unsigned long)va, - PAGE_SIZE); + sbi_remote_sfence_vma((1 << platform.cpu_num) - 1, 0, (unsigned long)va, PAGE_SIZE); } static inline void tlb_hyp_inv_all() @@ -31,8 +30,7 @@ static inline void tlb_hyp_inv_all() static inline void tlb_vm_inv_va(asid_t vmid, vaddr_t va) { - sbi_remote_hfence_gvma_vmid((1 << platform.cpu_num)- 1, 0, (unsigned long)va, - PAGE_SIZE, vmid); + sbi_remote_hfence_gvma_vmid((1 << platform.cpu_num) - 1, 0, (unsigned long)va, PAGE_SIZE, vmid); } static inline void tlb_vm_inv_all(asid_t vmid) diff --git a/src/arch/riscv/inc/arch/vm.h b/src/arch/riscv/inc/arch/vm.h index ba5440b8f..3c1d63ec1 100644 --- a/src/arch/riscv/inc/arch/vm.h +++ b/src/arch/riscv/inc/arch/vm.h @@ -11,37 +11,37 @@ #include #include -#define REG_RA (1) -#define REG_SP (2) -#define REG_GP (3) -#define REG_TP (4) -#define REG_T0 (5) -#define REG_T1 (6) -#define REG_T2 (7) -#define REG_S0 (8) -#define REG_S1 (9) -#define REG_A0 (10) -#define REG_A1 (11) -#define REG_A2 (12) -#define REG_A3 (13) -#define REG_A4 (14) -#define REG_A5 (15) -#define REG_A6 (16) -#define REG_A7 (17) -#define REG_S2 (18) -#define REG_S3 (19) -#define REG_S4 (20) -#define REG_S5 (21) -#define REG_S6 (22) -#define REG_S7 (23) -#define REG_S8 (24) -#define REG_S9 (25) +#define REG_RA (1) +#define REG_SP (2) +#define REG_GP (3) +#define REG_TP (4) +#define REG_T0 (5) +#define REG_T1 (6) +#define REG_T2 (7) +#define REG_S0 (8) +#define REG_S1 (9) +#define REG_A0 (10) +#define REG_A1 (11) +#define REG_A2 (12) +#define REG_A3 (13) +#define REG_A4 (14) +#define REG_A5 (15) +#define REG_A6 (16) +#define REG_A7 (17) +#define REG_S2 (18) +#define REG_S3 (19) +#define REG_S4 (20) +#define REG_S5 (21) +#define REG_S6 (22) +#define REG_S7 (23) +#define REG_S8 (24) +#define REG_S9 (25) #define REG_S10 (26) #define REG_S11 (27) -#define REG_T3 (28) -#define REG_T4 (29) -#define REG_T5 (30) -#define REG_T6 (31) +#define REG_T3 (28) +#define REG_T4 (29) +#define REG_T5 (30) +#define REG_T6 (31) struct arch_vm_platform { union vm_irqc_dscrp { @@ -57,13 +57,13 @@ struct arch_vm_platform { }; struct vm_arch { - #if (IRQC == PLIC) - struct vplic vplic; - #elif ((IRQC == APLIC) || (IRQC == AIA)) +#if (IRQC == PLIC) + struct vplic vplic; +#elif ((IRQC == APLIC) || (IRQC == AIA)) struct vaplic vaplic; - #else - #error "unknown IRQC type " IRQC - #endif +#else +#error "unknown IRQC type " IRQC +#endif }; struct vcpu_arch { @@ -127,12 +127,12 @@ struct arch_regs { void vcpu_arch_entry(); -static inline void vcpu_arch_inject_hw_irq(struct vcpu *vcpu, irqid_t id) +static inline void vcpu_arch_inject_hw_irq(struct vcpu* vcpu, irqid_t id) { virqc_inject(vcpu, id); } -static inline void vcpu_arch_inject_irq(struct vcpu *vcpu, irqid_t id) +static inline void vcpu_arch_inject_irq(struct vcpu* vcpu, irqid_t id) { virqc_inject(vcpu, id); } diff --git a/src/arch/riscv/interrupts.c b/src/arch/riscv/interrupts.c index f5435fd9c..dffb1364a 100644 --- a/src/arch/riscv/interrupts.c +++ b/src/arch/riscv/interrupts.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -17,7 +17,7 @@ void interrupts_arch_init() { - if (cpu()->id == CPU_MASTER) { + if (cpu()->id == CPU_MASTER) { irqc_init(); } @@ -49,15 +49,17 @@ void interrupts_arch_cpu_enable(bool en) void interrupts_arch_enable(irqid_t int_id, bool en) { if (int_id == SOFT_INT_ID) { - if (en) + if (en) { CSRS(sie, SIE_SSIE); - else + } else { CSRC(sie, SIE_SSIE); + } } else if (int_id == TIMR_INT_ID) { - if (en) + if (en) { CSRS(sie, SIE_STIE); - else + } else { CSRC(sie, SIE_STIE); + } } else { irqc_config_irq(int_id, en); } @@ -75,13 +77,11 @@ void interrupts_arch_handle() case SCAUSE_CODE_STI: interrupts_handle(TIMR_INT_ID); /** - * Clearing the timer pending bit actually has no effect. We could - * re-program the timer to "infinity" but we don't know if the - * handler itself re-programed the timer with a new event. - * Therefore, at this point, we must trust the handler either - * correctly re-programms the timer or disables the interrupt so the - * cpu is not starved by continously triggering the timer interrupt - * (spoiler alert, it does!) + * Clearing the timer pending bit actually has no effect. We could re-program the timer + * to "infinity" but we don't know if the handler itself re-programed the timer with a + * new event. Therefore, at this point, we must trust the handler either correctly + * re-programms the timer or disables the interrupt so the cpu is not starved by + * continously triggering the timer interrupt (spoiler alert, it does!) */ break; case SCAUSE_CODE_SEI: @@ -123,7 +123,7 @@ inline bool interrupts_arch_conflict(bitmap_t* interrupt_bitmap, irqid_t int_id) return bitmap_get(interrupt_bitmap, int_id); } -void interrupts_arch_vm_assign(struct vm *vm, irqid_t id) +void interrupts_arch_vm_assign(struct vm* vm, irqid_t id) { virqc_set_hw(vm, id); -} \ No newline at end of file +} diff --git a/src/arch/riscv/iommu.c b/src/arch/riscv/iommu.c index 494fa9c9c..085233f30 100644 --- a/src/arch/riscv/iommu.c +++ b/src/arch/riscv/iommu.c @@ -12,80 +12,78 @@ // We initially use a 1-LVL DDT with DC in extended format // N entries = 4kiB / 64 B p/ entry = 64 Entries -#define DDT_N_ENTRIES (64) +#define DDT_N_ENTRIES (64) -#define FQ_N_ENTRIES (64) -#define FQ_LOG2SZ_1 (5ULL) -#define FQ_INDEX_MASK BIT32_MASK(0, FQ_LOG2SZ_1 + 1) +#define FQ_N_ENTRIES (64) +#define FQ_LOG2SZ_1 (5ULL) +#define FQ_INDEX_MASK BIT32_MASK(0, FQ_LOG2SZ_1 + 1) -#define RV_IOMMU_SUPPORTED_VERSION (0x10) +#define RV_IOMMU_SUPPORTED_VERSION (0x10) -//# Memory-mapped Register Interface -// Capabilities register fields -#define RV_IOMMU_CAPS_VERSION_OFF (0) -#define RV_IOMMU_CAPS_VERSION_LEN (8) +// # Memory-mapped Register Interface +// Capabilities register fields +#define RV_IOMMU_CAPS_VERSION_OFF (0) +#define RV_IOMMU_CAPS_VERSION_LEN (8) -#define RV_IOMMU_CAPS_SV39X4_BIT (0x1ULL << 17) -#define RV_IOMMU_CAPS_MSI_FLAT_BIT (0x1ULL << 22) -#define RV_IOMMU_CAPS_IGS_OFF (28) -#define RV_IOMMU_CAPS_IGS_LEN (2) +#define RV_IOMMU_CAPS_SV39X4_BIT (0x1ULL << 17) +#define RV_IOMMU_CAPS_MSI_FLAT_BIT (0x1ULL << 22) +#define RV_IOMMU_CAPS_IGS_OFF (28) +#define RV_IOMMU_CAPS_IGS_LEN (2) // Features control register fields -#define RV_IOMMU_FCTL_WSI_BIT (0x1UL << 1) -#define RV_IOMMU_FCTL_DEFAULT (RV_IOMMU_FCTL_WSI_BIT) +#define RV_IOMMU_FCTL_WSI_BIT (0x1UL << 1) +#define RV_IOMMU_FCTL_DEFAULT (RV_IOMMU_FCTL_WSI_BIT) // Device Directory Table Pointer register -#define RV_IOMMU_DDTP_MODE_OFF (0ULL) -#define RV_IOMMU_DDTP_MODE_BARE (1ULL) -#define RV_IOMMU_DDTP_MODE_1LVL (2ULL) -#define RV_IOMMU_DDTP_MODE_2LVL (3ULL) -#define RV_IOMMU_DDTP_MODE_3LVL (4ULL) +#define RV_IOMMU_DDTP_MODE_OFF (0ULL) +#define RV_IOMMU_DDTP_MODE_BARE (1ULL) +#define RV_IOMMU_DDTP_MODE_1LVL (2ULL) +#define RV_IOMMU_DDTP_MODE_2LVL (3ULL) +#define RV_IOMMU_DDTP_MODE_3LVL (4ULL) -#define RV_IOMMU_DDTP_BUSY_BIT (0x1ULL << 4) +#define RV_IOMMU_DDTP_BUSY_BIT (0x1ULL << 4) -#define RV_IOMMU_DDTP_PPN_OFF (10) -#define RV_IOMMU_DDTP_PPN_LEN (44) -#define RV_IOMMU_DDTP_PPN_MASK BIT64_MASK(RV_IOMMU_DDTP_PPN_OFF, RV_IOMMU_DDTP_PPN_LEN) +#define RV_IOMMU_DDTP_PPN_OFF (10) +#define RV_IOMMU_DDTP_PPN_LEN (44) +#define RV_IOMMU_DDTP_PPN_MASK BIT64_MASK(RV_IOMMU_DDTP_PPN_OFF, RV_IOMMU_DDTP_PPN_LEN) // Queue management -#define RV_IOMMU_XQB_PPN_OFF (10) -#define RV_IOMMU_XQB_PPN_LEN (44) -#define RV_IOMMU_XQB_PPN_MASK BIT64_MASK(RV_IOMMU_XQB_PPN_OFF, RV_IOMMU_XQB_PPN_LEN) +#define RV_IOMMU_XQB_PPN_OFF (10) +#define RV_IOMMU_XQB_PPN_LEN (44) +#define RV_IOMMU_XQB_PPN_MASK BIT64_MASK(RV_IOMMU_XQB_PPN_OFF, RV_IOMMU_XQB_PPN_LEN) -#define RV_IOMMU_XQCSR_EN_BIT (1ULL << 0) -#define RV_IOMMU_XQCSR_IE_BIT (1ULL << 1) -#define RV_IOMMU_XQCSR_MF_BIT (1ULL << 8) -#define RV_IOMMU_XQCSR_ON_BIT (1ULL << 16) -#define RV_IOMMU_XQCSR_BUSY_BIT (1ULL << 17) +#define RV_IOMMU_XQCSR_EN_BIT (1ULL << 0) +#define RV_IOMMU_XQCSR_IE_BIT (1ULL << 1) +#define RV_IOMMU_XQCSR_MF_BIT (1ULL << 8) +#define RV_IOMMU_XQCSR_ON_BIT (1ULL << 16) +#define RV_IOMMU_XQCSR_BUSY_BIT (1ULL << 17) // FQ CSR -#define RV_IOMMU_FQCSR_OF_BIT (1ULL << 9) -#define RV_IOMMU_FQCSR_DEFAULT (RV_IOMMU_XQCSR_EN_BIT | \ - RV_IOMMU_XQCSR_IE_BIT | \ - RV_IOMMU_XQCSR_MF_BIT | \ - RV_IOMMU_FQCSR_OF_BIT) -#define RV_IOMMU_FQCSR_CLEAR_ERR (RV_IOMMU_XQCSR_MF_BIT | RV_IOMMU_FQCSR_OF_BIT) +#define RV_IOMMU_FQCSR_OF_BIT (1ULL << 9) +#define RV_IOMMU_FQCSR_DEFAULT \ + (RV_IOMMU_XQCSR_EN_BIT | RV_IOMMU_XQCSR_IE_BIT | RV_IOMMU_XQCSR_MF_BIT | RV_IOMMU_FQCSR_OF_BIT) +#define RV_IOMMU_FQCSR_CLEAR_ERR (RV_IOMMU_XQCSR_MF_BIT | RV_IOMMU_FQCSR_OF_BIT) // Interrupt pending register -#define RV_IOMMU_IPSR_FIP_BIT (1UL << 1) -#define RV_IOMMU_IPSR_CLEAR (0x0FUL) +#define RV_IOMMU_IPSR_FIP_BIT (1UL << 1) +#define RV_IOMMU_IPSR_CLEAR (0x0FUL) // Interrupt Vectors -#define RV_IOMMU_ICVEC_CIV_NUM (0ULL) -#define RV_IOMMU_ICVEC_FIV_NUM (1ULL) +#define RV_IOMMU_ICVEC_CIV_NUM (0ULL) +#define RV_IOMMU_ICVEC_FIV_NUM (1ULL) -#define RV_IOMMU_ICVEC_CIV_OFF (0) -#define RV_IOMMU_ICVEC_FIV_OFF (4) +#define RV_IOMMU_ICVEC_CIV_OFF (0) +#define RV_IOMMU_ICVEC_FIV_OFF (4) -#define RV_IOMMU_ICVEC_CIV (RV_IOMMU_ICVEC_CIV_NUM << RV_IOMMU_ICVEC_CIV_OFF) -#define RV_IOMMU_ICVEC_FIV (RV_IOMMU_ICVEC_FIV_NUM << RV_IOMMU_ICVEC_FIV_OFF) -#define RV_IOMMU_ICVEC_DEFAULT (RV_IOMMU_ICVEC_CIV | RV_IOMMU_ICVEC_FIV) +#define RV_IOMMU_ICVEC_CIV (RV_IOMMU_ICVEC_CIV_NUM << RV_IOMMU_ICVEC_CIV_OFF) +#define RV_IOMMU_ICVEC_FIV (RV_IOMMU_ICVEC_FIV_NUM << RV_IOMMU_ICVEC_FIV_OFF) +#define RV_IOMMU_ICVEC_DEFAULT (RV_IOMMU_ICVEC_CIV | RV_IOMMU_ICVEC_FIV) -//# RISC-V IOMMU Memory-Mapped Register Interface +// # RISC-V IOMMU Memory-Mapped Register Interface struct riscv_iommu_regmap { uint64_t caps; uint32_t fctl; - uint8_t __custom1[4]; + uint8_t __custom1[4]; uint64_t ddtp; uint64_t cqb; uint32_t cqh; @@ -108,55 +106,61 @@ struct riscv_iommu_regmap { uint64_t tr_req_iova; uint64_t tr_req_ctl; uint64_t tr_response; - uint8_t __rsv1[64]; - uint8_t __custom2[72]; + uint8_t __rsv1[64]; + uint8_t __custom2[72]; uint64_t icvec; struct { uint64_t addr; uint32_t data; uint32_t vctl; } __attribute__((__packed__)) msi_cfg_tbl[16]; - uint8_t __rsv2[3072]; + uint8_t __rsv2[3072]; } __attribute__((__packed__, __aligned__(PAGE_SIZE))); -//# RISC-V IOMMU Device Directory Table -#define RV_IOMMU_DC_VALID_BIT (1ULL << 0) -#define RV_IOMMU_DC_DTF_BIT (1ULL << 4) - -#define RV_IOMMU_DC_IOHGATP_PPN_OFF (0) -#define RV_IOMMU_DC_IOHGATP_PPN_LEN (44) -#define RV_IOMMU_DC_IOHGATP_PPN_MASK BIT64_MASK(RV_IOMMU_DC_IOHGATP_PPN_OFF, RV_IOMMU_DC_IOHGATP_PPN_LEN) -#define RV_IOMMU_DC_IOHGATP_GSCID_OFF (44) -#define RV_IOMMU_DC_IOHGATP_GSCID_LEN (16) -#define RV_IOMMU_DC_IOHGATP_GSCID_MASK BIT64_MASK(RV_IOMMU_DC_IOHGATP_GSCID_OFF, RV_IOMMU_DC_IOHGATP_GSCID_LEN) -#define RV_IOMMU_DC_IOHGATP_MODE_OFF (60) -#define RV_IOMMU_DC_IOHGATP_MODE_LEN (4) -#define RV_IOMMU_DC_IOHGATP_MODE_MASK BIT64_MASK(RV_IOMMU_DC_IOHGATP_MODE_OFF, RV_IOMMU_DC_IOHGATP_MODE_LEN) -#define RV_IOMMU_IOHGATP_SV39X4 (8ULL << RV_IOMMU_DC_IOHGATP_MODE_OFF) -#define RV_IOMMU_IOHGATP_BARE (0ULL << RV_IOMMU_DC_IOHGATP_MODE_OFF) - -#define RV_IOMMU_DC_IOHGATP_PSCID_OFF (12) -#define RV_IOMMU_DC_IOHGATP_PSCID_LEN (20) -#define RV_IOMMU_DC_IOHGATP_PSCID_MASK BIT64_MASK(RV_IOMMU_DC_IOHGATP_PSCID_OFF, RV_IOMMU_DC_IOHGATP_PSCID_LEN) - -#define RV_IOMMU_DC_FSC_PPN_OFF (0) -#define RV_IOMMU_DC_FSC_PPN_LEN (44) -#define RV_IOMMU_DC_FSC_PPN_MASK BIT64_MASK(RV_IOMMU_DC_FSC_PPN_OFF, RV_IOMMU_DC_FSC_PPN_LEN) -#define RV_IOMMU_DC_FSC_MODE_OFF (60) -#define RV_IOMMU_DC_FSC_MODE_LEN (4) -#define RV_IOMMU_DC_FSC_MODE_MASK BIT64_MASK(RV_IOMMU_DC_FSC_MODE_OFF, RV_IOMMU_DC_FSC_MODE_LEN) - -#define RV_IOMMU_DC_MSIPTP_PPN_OFF (0) -#define RV_IOMMU_DC_MSIPTP_PPN_LEN (44) -#define RV_IOMMU_DC_MSIPTP_PPN_MASK BIT64_MASK(RV_IOMMU_DC_MSIPTP_PPN_OFF, RV_IOMMU_DC_MSIPTP_PPN_LEN) -#define RV_IOMMU_DC_MSIPTP_MODE_OFF (60) -#define RV_IOMMU_DC_MSIPTP_MODE_LEN (4) -#define RV_IOMMU_DC_MSIPTP_MODE_MASK BIT64_MASK(RV_IOMMU_DC_MSIPTP_MODE_OFF, RV_IOMMU_DC_MSIPTP_MODE_LEN) - -#define RV_IOMMU_DC_MSIMASK_OFF (0) -#define RV_IOMMU_DC_MSIMASK_LEN (52) -#define RV_IOMMU_DC_MSIMASK_MASK BIT64_MASK(RV_IOMMU_DC_MSIMASK_OFF, RV_IOMMU_DC_MSIMASK_LEN) +// # RISC-V IOMMU Device Directory Table +#define RV_IOMMU_DC_VALID_BIT (1ULL << 0) +#define RV_IOMMU_DC_DTF_BIT (1ULL << 4) + +#define RV_IOMMU_DC_IOHGATP_PPN_OFF (0) +#define RV_IOMMU_DC_IOHGATP_PPN_LEN (44) +#define RV_IOMMU_DC_IOHGATP_PPN_MASK \ + BIT64_MASK(RV_IOMMU_DC_IOHGATP_PPN_OFF, RV_IOMMU_DC_IOHGATP_PPN_LEN) +#define RV_IOMMU_DC_IOHGATP_GSCID_OFF (44) +#define RV_IOMMU_DC_IOHGATP_GSCID_LEN (16) +#define RV_IOMMU_DC_IOHGATP_GSCID_MASK \ + BIT64_MASK(RV_IOMMU_DC_IOHGATP_GSCID_OFF, RV_IOMMU_DC_IOHGATP_GSCID_LEN) +#define RV_IOMMU_DC_IOHGATP_MODE_OFF (60) +#define RV_IOMMU_DC_IOHGATP_MODE_LEN (4) +#define RV_IOMMU_DC_IOHGATP_MODE_MASK \ + BIT64_MASK(RV_IOMMU_DC_IOHGATP_MODE_OFF, RV_IOMMU_DC_IOHGATP_MODE_LEN) +#define RV_IOMMU_IOHGATP_SV39X4 (8ULL << RV_IOMMU_DC_IOHGATP_MODE_OFF) +#define RV_IOMMU_IOHGATP_BARE (0ULL << RV_IOMMU_DC_IOHGATP_MODE_OFF) + +#define RV_IOMMU_DC_IOHGATP_PSCID_OFF (12) +#define RV_IOMMU_DC_IOHGATP_PSCID_LEN (20) +#define RV_IOMMU_DC_IOHGATP_PSCID_MASK \ + BIT64_MASK(RV_IOMMU_DC_IOHGATP_PSCID_OFF, RV_IOMMU_DC_IOHGATP_PSCID_LEN) + +#define RV_IOMMU_DC_FSC_PPN_OFF (0) +#define RV_IOMMU_DC_FSC_PPN_LEN (44) +#define RV_IOMMU_DC_FSC_PPN_MASK BIT64_MASK(RV_IOMMU_DC_FSC_PPN_OFF, RV_IOMMU_DC_FSC_PPN_LEN) +#define RV_IOMMU_DC_FSC_MODE_OFF (60) +#define RV_IOMMU_DC_FSC_MODE_LEN (4) +#define RV_IOMMU_DC_FSC_MODE_MASK BIT64_MASK(RV_IOMMU_DC_FSC_MODE_OFF, RV_IOMMU_DC_FSC_MODE_LEN) + +#define RV_IOMMU_DC_MSIPTP_PPN_OFF (0) +#define RV_IOMMU_DC_MSIPTP_PPN_LEN (44) +#define RV_IOMMU_DC_MSIPTP_PPN_MASK \ + BIT64_MASK(RV_IOMMU_DC_MSIPTP_PPN_OFF, RV_IOMMU_DC_MSIPTP_PPN_LEN) +#define RV_IOMMU_DC_MSIPTP_MODE_OFF (60) +#define RV_IOMMU_DC_MSIPTP_MODE_LEN (4) +#define RV_IOMMU_DC_MSIPTP_MODE_MASK \ + BIT64_MASK(RV_IOMMU_DC_MSIPTP_MODE_OFF, RV_IOMMU_DC_MSIPTP_MODE_LEN) + +#define RV_IOMMU_DC_MSIMASK_OFF (0) +#define RV_IOMMU_DC_MSIMASK_LEN (52) +#define RV_IOMMU_DC_MSIMASK_MASK BIT64_MASK(RV_IOMMU_DC_MSIMASK_OFF, RV_IOMMU_DC_MSIMASK_LEN) struct ddt_entry { uint64_t tc; @@ -169,16 +173,15 @@ struct ddt_entry { uint64_t __rsv; } __attribute__((__packed__)); - -//# Fault Queue Record -#define RV_IOMMU_FQ_CAUSE_OFF (0) -#define RV_IOMMU_FQ_CAUSE_LEN (12) -#define RV_IOMMU_FQ_PID_OFF (12) -#define RV_IOMMU_FQ_PID_LEN (20) -#define RV_IOMMU_FQ_TTYP_OFF (34) -#define RV_IOMMU_FQ_TTYP_LEN (6) -#define RV_IOMMU_FQ_DID_OFF (40) -#define RV_IOMMU_FQ_DID_LEN (24) +// # Fault Queue Record +#define RV_IOMMU_FQ_CAUSE_OFF (0) +#define RV_IOMMU_FQ_CAUSE_LEN (12) +#define RV_IOMMU_FQ_PID_OFF (12) +#define RV_IOMMU_FQ_PID_LEN (20) +#define RV_IOMMU_FQ_TTYP_OFF (34) +#define RV_IOMMU_FQ_TTYP_LEN (6) +#define RV_IOMMU_FQ_DID_OFF (40) +#define RV_IOMMU_FQ_DID_LEN (24) struct fq_entry { uint64_t tags; @@ -187,12 +190,12 @@ struct fq_entry { uint64_t iotval2; } __attribute__((__packed__)); -//# Memory-mapped and in-memory structures -// TODO: Add CQ +// # Memory-mapped and in-memory structures +// TODO: Add CQ struct riscv_iommu_hw { - volatile struct riscv_iommu_regmap *reg_ptr; - volatile struct ddt_entry *ddt; - volatile struct fq_entry *fq; + volatile struct riscv_iommu_regmap* reg_ptr; + volatile struct ddt_entry* ddt; + volatile struct fq_entry* fq; }; struct riscv_iommu_priv { @@ -212,8 +215,7 @@ struct riscv_iommu_priv rv_iommu; static void rv_iommu_check_features(void) { unsigned long long caps = rv_iommu.hw.reg_ptr->caps; - unsigned version = bit64_extract(caps, - RV_IOMMU_CAPS_VERSION_OFF, RV_IOMMU_CAPS_VERSION_LEN); + unsigned version = bit64_extract(caps, RV_IOMMU_CAPS_VERSION_OFF, RV_IOMMU_CAPS_VERSION_LEN); if (version != RV_IOMMU_SUPPORTED_VERSION) { ERROR("RISC-V IOMMU unsupported version: %d", version); @@ -224,11 +226,12 @@ static void rv_iommu_check_features(void) } if (!(caps & RV_IOMMU_CAPS_MSI_FLAT_BIT)) { - WARNING("RISC-V IOMMU HW does not support MSI Address Translation (basic-translate mode)"); + WARNING("RISC-V IOMMU HW does not support MSI Address Translation " + "(basic-translate mode)"); } unsigned igs = bit64_extract(caps, RV_IOMMU_CAPS_IGS_OFF, RV_IOMMU_CAPS_IGS_LEN); - if ((!igs)) { + if (!igs) { ERROR("RISC-V IOMMU HW does not support WSI generation"); } } @@ -242,21 +245,19 @@ void rv_iommu_fq_irq_handler(irqid_t irq_id) uint32_t ipsr = rv_iommu.hw.reg_ptr->ipsr; // Signal error if fip not set - if (!(ipsr & RV_IOMMU_IPSR_FIP_BIT)) + if (!(ipsr & RV_IOMMU_IPSR_FIP_BIT)) { ERROR("FQ IRQ handler triggered due to non-FQ interrupt"); + } // Read fqcsr error bits and report if any is set uint32_t fqcsr = rv_iommu.hw.reg_ptr->fqcsr; - if (fqcsr & (RV_IOMMU_XQCSR_MF_BIT | RV_IOMMU_FQCSR_OF_BIT)) - { - if (fqcsr & RV_IOMMU_XQCSR_MF_BIT) - { + if (fqcsr & (RV_IOMMU_XQCSR_MF_BIT | RV_IOMMU_FQCSR_OF_BIT)) { + if (fqcsr & RV_IOMMU_XQCSR_MF_BIT) { WARNING("RV IOMMU: FQ Memory Fault error!"); // TODO: MF management } - if (fqcsr & RV_IOMMU_FQCSR_OF_BIT) - { + if (fqcsr & RV_IOMMU_FQCSR_OF_BIT) { WARNING("RV IOMMU: FQ Full!"); // TODO: OF Management } @@ -272,85 +273,78 @@ void rv_iommu_fq_irq_handler(irqid_t irq_id) uint32_t fqh = rv_iommu.hw.reg_ptr->fqh; uint32_t fqt = rv_iommu.hw.reg_ptr->fqt; - while (fqh != fqt) - { + while (fqh != fqt) { struct fq_entry record = rv_iommu.hw.fq[fqh]; WARNING("RV IOMMU FQ: CAUSE: %d | DID: %d | iotval: %x | iotval2: %x", - bit64_extract(record.tags, RV_IOMMU_FQ_CAUSE_OFF, RV_IOMMU_FQ_CAUSE_LEN), - bit64_extract(record.tags, RV_IOMMU_FQ_DID_OFF, RV_IOMMU_FQ_DID_LEN), - record.iotval, - record.iotval2); + bit64_extract(record.tags, RV_IOMMU_FQ_CAUSE_OFF, RV_IOMMU_FQ_CAUSE_LEN), + bit64_extract(record.tags, RV_IOMMU_FQ_DID_OFF, RV_IOMMU_FQ_DID_LEN), record.iotval, + record.iotval2); fqh = (fqh + 1) & FQ_INDEX_MASK; // TODO: Translation faults management } - + // Update fqh rv_iommu.hw.reg_ptr->fqh = fqh; } - /** - * Init and enable RISC-V IOMMU. + * Init and enable RISC-V IOMMU. */ void rv_iommu_init(void) { // Map register IF (4k) vaddr_t reg_ptr = mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.iommu.base, NUM_PAGES(sizeof(struct riscv_iommu_regmap))); + platform.arch.iommu.base, NUM_PAGES(sizeof(struct riscv_iommu_regmap))); rv_iommu.hw.reg_ptr = (struct riscv_iommu_regmap*)reg_ptr; // Read and check caps rv_iommu_check_features(); - // Set fctl.WSI - // We will be first using WSI as IOMMU interrupt mechanism. Then MSIs will be included + // Set fctl.WSI We will be first using WSI as IOMMU interrupt mechanism. Then MSIs will be + // included rv_iommu.hw.reg_ptr->fctl = RV_IOMMU_FCTL_DEFAULT; - // Configure interrupt vectors (icvec) - // We use a different vector for each interrupt source CQ and FQ by now + // Configure interrupt vectors (icvec) We use a different vector for each interrupt source CQ + // and FQ by now rv_iommu.hw.reg_ptr->icvec = RV_IOMMU_ICVEC_DEFAULT; // Clear all IP flags (ipsr) rv_iommu.hw.reg_ptr->ipsr = RV_IOMMU_IPSR_CLEAR; - - // TODO - // Allocate memory for CQ - // Configure cqb with queue size and base address. Clear cqt - // Allocate IRQ for CQ - // Enable CQ (cqcsr) + + // TODO Allocate memory for CQ Configure cqb with queue size and base address. Clear cqt + // Allocate IRQ for CQ Enable CQ (cqcsr) // Allocate memory for FQ (aligned to 4kiB) vaddr_t fq_vaddr = (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct fq_entry) * FQ_N_ENTRIES), - SEC_HYP_GLOBAL, true); + SEC_HYP_GLOBAL, true); memset((void*)fq_vaddr, 0, sizeof(struct fq_entry) * FQ_N_ENTRIES); rv_iommu.hw.fq = (struct fq_entry*)fq_vaddr; // Configure fqb with queue size and base address. Clear fqh paddr_t fq_paddr; mem_translate(&cpu()->as, fq_vaddr, &fq_paddr); - rv_iommu.hw.reg_ptr->fqb = FQ_LOG2SZ_1 | - ((fq_paddr >> 2) & RV_IOMMU_XQB_PPN_MASK); + rv_iommu.hw.reg_ptr->fqb = FQ_LOG2SZ_1 | ((fq_paddr >> 2) & RV_IOMMU_XQB_PPN_MASK); rv_iommu.hw.reg_ptr->fqh = 0; // Allocate IRQ for FQ - if(!interrupts_reserve(platform.arch.iommu.fq_irq_id, rv_iommu_fq_irq_handler)) { + if (!interrupts_reserve(platform.arch.iommu.fq_irq_id, rv_iommu_fq_irq_handler)) { ERROR("Failed to reserve IOMMU FQ interrupt"); } - + interrupts_cpu_enable(platform.arch.iommu.fq_irq_id, true); // Enable FQ (fqcsr) rv_iommu.hw.reg_ptr->fqcsr = RV_IOMMU_FQCSR_DEFAULT; - // TODO: poll fqcsr.busy + // TODO: poll fqcsr.busy // Init DDT bitmap rv_iommu.ddt_lock = SPINLOCK_INITVAL; bitmap_clear_consecutive(rv_iommu.ddt_bitmap, 0, DDT_N_ENTRIES); // Allocate a page of memory (aligned) for the DDT - vaddr_t ddt_vaddr = (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct ddt_entry) * DDT_N_ENTRIES), - SEC_HYP_GLOBAL, true); + vaddr_t ddt_vaddr = (vaddr_t)mem_alloc_page(NUM_PAGES(sizeof(struct ddt_entry) * DDT_N_ENTRIES), + SEC_HYP_GLOBAL, true); // Clear entries memset((void*)ddt_vaddr, 0, sizeof(struct ddt_entry) * DDT_N_ENTRIES); rv_iommu.hw.ddt = (struct ddt_entry*)ddt_vaddr; @@ -358,16 +352,16 @@ void rv_iommu_init(void) // Configure ddtp with DDT base address and IOMMU mode paddr_t ddt_paddr; mem_translate(&cpu()->as, ddt_vaddr, &ddt_paddr); - rv_iommu.hw.reg_ptr->ddtp = (unsigned long long)platform.arch.iommu.mode | - ((ddt_paddr >> 2) & RV_IOMMU_DDTP_PPN_MASK); + rv_iommu.hw.reg_ptr->ddtp = + (unsigned long long)platform.arch.iommu.mode | ((ddt_paddr >> 2) & RV_IOMMU_DDTP_PPN_MASK); // TODO: poll ddtp.busy } /** * Set DDT N bit in the DDT bitmap. - * + * * @dev_id: device_id to be allocated - * + * * @returns true on success, false on error */ bool rv_iommu_alloc_did(deviceid_t dev_id) @@ -376,14 +370,11 @@ bool rv_iommu_alloc_did(deviceid_t dev_id) spin_lock(&rv_iommu.ddt_lock); // Check if DC already exists - if (!bitmap_get(rv_iommu.ddt_bitmap, dev_id)) - { + if (!bitmap_get(rv_iommu.ddt_bitmap, dev_id)) { bitmap_set(rv_iommu.ddt_bitmap, dev_id); allocated = true; - } - else - { - allocated = false; // device_id already exists + } else { + allocated = false; // device_id already exists } spin_unlock(&rv_iommu.ddt_lock); @@ -391,21 +382,19 @@ bool rv_iommu_alloc_did(deviceid_t dev_id) } /** - * Program DDT entry with base address of the root PT, VMID and translation configuration. Enable DC. - * + * Program DDT entry with base address of the root PT, VMID and translation configuration. Enable + * DC. + * * @dev_id: device_id to index DDT * @vm: VM to which the device is being assigned * @root_pt: Base physical address of the root second-stage PT */ -void rv_iommu_write_ddt(deviceid_t dev_id, struct vm *vm, paddr_t root_pt) +void rv_iommu_write_ddt(deviceid_t dev_id, struct vm* vm, paddr_t root_pt) { spin_lock(&rv_iommu.ddt_lock); - if (!bitmap_get(rv_iommu.ddt_bitmap, dev_id)) - { + if (!bitmap_get(rv_iommu.ddt_bitmap, dev_id)) { ERROR("IOMMU DC %d is not allocated", dev_id); - } - else - { + } else { // Configure DC uint64_t tc = 0; tc |= RV_IOMMU_DC_VALID_BIT; @@ -413,14 +402,12 @@ void rv_iommu_write_ddt(deviceid_t dev_id, struct vm *vm, paddr_t root_pt) uint64_t iohgatp = 0; iohgatp |= ((root_pt >> 12) & RV_IOMMU_DC_IOHGATP_PPN_MASK); - iohgatp |= ((vm->id << RV_IOMMU_DC_IOHGATP_GSCID_OFF) - & RV_IOMMU_DC_IOHGATP_GSCID_MASK); + iohgatp |= ((vm->id << RV_IOMMU_DC_IOHGATP_GSCID_OFF) & RV_IOMMU_DC_IOHGATP_GSCID_MASK); iohgatp |= RV_IOMMU_IOHGATP_SV39X4; rv_iommu.hw.ddt[dev_id].iohgatp = iohgatp; - // TODO: - // Configure first-stage translation. Second-stage only by now - // Configure MSI translation + // TODO: Configure first-stage translation. Second-stage only by now Configure MSI + // translation } spin_unlock(&rv_iommu.ddt_lock); } @@ -429,14 +416,13 @@ void rv_iommu_write_ddt(deviceid_t dev_id, struct vm *vm, paddr_t root_pt) /** * IOMMU HW Initialization. - * + * * @returns true on success, false on error. */ bool iommu_arch_init() -{ - +{ // By checking platform.arch.iommu.base we verify if an IOMMU is present in the platform - if(cpu()->id == CPU_MASTER && platform.arch.iommu.base){ + if (cpu()->id == CPU_MASTER && platform.arch.iommu.base) { rv_iommu_init(); return true; } @@ -445,35 +431,29 @@ bool iommu_arch_init() } /** - * Initialize the DDT entry indexed by device_id for the given VM - * Configure corresponding DDT entry with root PT base addr, VMID (GSCID) and device config - * + * Initialize the DDT entry indexed by device_id for the given VM Configure corresponding DDT entry + * with root PT base addr, VMID (GSCID) and device config + * * @vm: VM struct to which the device will be assigned. * @dev_id: device_id of the device to be added. - * + * * @returns true on success, false on error. */ -static bool iommu_vm_arch_add(struct vm *vm, deviceid_t dev_id) +static bool iommu_vm_arch_add(struct vm* vm, deviceid_t dev_id) { - if (dev_id > 0) - { + if (dev_id > 0) { // Check if device was already added to a VM - if(rv_iommu_alloc_did(dev_id)) - { + if (rv_iommu_alloc_did(dev_id)) { paddr_t rootpt; // Translate root PT base address mem_translate(&cpu()->as, (vaddr_t)vm->as.pt.root, &rootpt); // Set DDT entry with root PT base address, VMID and configuration rv_iommu_write_ddt(dev_id, vm, rootpt); - } - else - { + } else { INFO("RV IOMMU: Cannot add one device ID (%d) twice", dev_id); return false; } - } - else - { + } else { INFO("RV IOMMU: Invalid device ID: %d", dev_id); return false; } @@ -483,26 +463,26 @@ static bool iommu_vm_arch_add(struct vm *vm, deviceid_t dev_id) /** * Add device to the VM specified. - * + * * @vm: VM struct to which the device will be assigned. * @dev_id: device_id of the device to be added. - * + * * @returns true on success, false on error. */ -inline bool iommu_arch_vm_add_device(struct vm *vm, deviceid_t dev_id) +inline bool iommu_arch_vm_add_device(struct vm* vm, deviceid_t dev_id) { return iommu_vm_arch_add(vm, dev_id); } /** * Initialize VM-specific, arch-specific IOMMU data. - * + * * @vm: VM under consideration. * @config: VM config. - * + * * @returns true on success, false on error. */ -bool iommu_arch_vm_init(struct vm *vm, const struct vm_config *config) +bool iommu_arch_vm_init(struct vm* vm, const struct vm_config* config) { // For now there is no data to initialize return true; diff --git a/src/arch/riscv/irqc/aia/aplic.c b/src/arch/riscv/irqc/aia/aplic.c index 6f20558bc..dd2f17768 100644 --- a/src/arch/riscv/irqc/aia/aplic.c +++ b/src/arch/riscv/irqc/aia/aplic.c @@ -9,35 +9,35 @@ #include /** APLIC fields and masks defines */ -#define APLIC_DOMAINCFG_CTRL_MASK (0x1FF) +#define APLIC_DOMAINCFG_CTRL_MASK (0x1FF) -#define DOMAINCFG_DM (1U << 2) +#define DOMAINCFG_DM (1U << 2) -#define INTP_IDENTITY (16) -#define INTP_IDENTITY_MASK (0x3FF) +#define INTP_IDENTITY (16) +#define INTP_IDENTITY_MASK (0x3FF) -#define APLIC_DISABLE_IDELIVERY (0) -#define APLIC_ENABLE_IDELIVERY (1) -#define APLIC_DISABLE_IFORCE (0) -#define APLIC_ENABLE_IFORCE (1) -#define APLIC_IDC_ITHRESHOLD_EN_ALL (0) -#define APLIC_IDC_ITHRESHOLD_DISBL_ALL (1) +#define APLIC_DISABLE_IDELIVERY (0) +#define APLIC_ENABLE_IDELIVERY (1) +#define APLIC_DISABLE_IFORCE (0) +#define APLIC_ENABLE_IFORCE (1) +#define APLIC_IDC_ITHRESHOLD_EN_ALL (0) +#define APLIC_IDC_ITHRESHOLD_DISBL_ALL (1) /** APLIC public data */ -volatile struct aplic_control_hw *aplic_control; -volatile struct aplic_idc_hw *aplic_idc; +volatile struct aplic_control_hw* aplic_control; +volatile struct aplic_idc_hw* aplic_idc; uint8_t APLIC_IPRIO_MASK = 0; void aplic_init(void) { /** Maps APLIC device */ - aplic_control = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.irqc.aia.aplic.base, NUM_PAGES(sizeof(struct aplic_control_hw))); - - aplic_idc = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + aplic_control = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.irqc.aia.aplic.base, NUM_PAGES(sizeof(struct aplic_control_hw))); + + aplic_idc = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.arch.irqc.aia.aplic.base + HART_REG_OFF, - NUM_PAGES(sizeof(struct aplic_idc_hw)*IRQC_HART_INST)); - + NUM_PAGES(sizeof(struct aplic_idc_hw) * IRQC_HART_INST)); + /** Ensure that instructions after fence have the APLIC fully mapped */ fence_sync(); @@ -50,17 +50,18 @@ void aplic_init(void) } /** Sets the default value of target and sourcecfg */ - for (size_t i = 0; i < APLIC_NUM_TARGET_REGS; i++){ + for (size_t i = 0; i < APLIC_NUM_TARGET_REGS; i++) { aplic_control->sourcecfg[i] = APLIC_SOURCECFG_SM_INACTIVE; aplic_control->target[i] = APLIC_TARGET_MIN_PRIO; } - APLIC_IPRIO_MASK = aplic_control->target[0] & APLIC_TARGET_IPRIO_MASK; + APLIC_IPRIO_MASK = aplic_control->target[0] & APLIC_TARGET_IPRIO_MASK; aplic_control->domaincfg |= APLIC_DOMAINCFG_IE; } -void aplic_idc_init(void){ +void aplic_idc_init(void) +{ uint32_t idc_index = cpu()->id; - aplic_idc[idc_index].ithreshold = APLIC_IDC_ITHRESHOLD_EN_ALL; + aplic_idc[idc_index].ithreshold = APLIC_IDC_ITHRESHOLD_EN_ALL; aplic_idc[idc_index].iforce = APLIC_DISABLE_IFORCE; aplic_idc[idc_index].idelivery = APLIC_ENABLE_IDELIVERY; } @@ -123,7 +124,8 @@ void aplic_set_enbl_reg(size_t reg_indx, uint32_t reg_val) aplic_control->setie[reg_indx] = reg_val; } -bool aplic_get_enbl(irqid_t intp_id){ +bool aplic_get_enbl(irqid_t intp_id) +{ uint32_t reg_indx = intp_id / 32; uint32_t intp_to_pend_mask = (1U << (intp_id % 32)); @@ -148,8 +150,8 @@ void aplic_set_target_prio(irqid_t intp_id, uint8_t prio) void aplic_set_target_hart(irqid_t intp_id, cpuid_t hart) { - aplic_control->target[intp_id - 1] &= ~(APLIC_TARGET_HART_IDX_MASK << - APLIC_TARGET_HART_IDX_SHIFT); + aplic_control->target[intp_id - 1] &= + ~(APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT); aplic_control->target[intp_id - 1] |= hart << APLIC_TARGET_HART_IDX_SHIFT; } @@ -161,20 +163,20 @@ uint8_t aplic_get_target_prio(irqid_t intp_id) cpuid_t aplic_get_target_hart(irqid_t intp_id) { return (aplic_control->target[intp_id - 1] >> APLIC_TARGET_HART_IDX_SHIFT) & - APLIC_TARGET_HART_IDX_MASK; + APLIC_TARGET_HART_IDX_MASK; } irqid_t aplic_idc_get_claimi_intpid(idcid_t idc_id) { - return (aplic_idc[idc_id].claimi >> IDC_CLAIMI_INTP_ID_SHIFT) - & IDC_CLAIMI_INTP_ID_MASK; + return (aplic_idc[idc_id].claimi >> IDC_CLAIMI_INTP_ID_SHIFT) & IDC_CLAIMI_INTP_ID_MASK; } -void aplic_handle(void){ +void aplic_handle(void) +{ idcid_t idc_id = cpu()->id; irqid_t intp_identity = aplic_idc_get_claimi_intpid(idc_id); - if(intp_identity != 0){ + if (intp_identity != 0) { interrupts_handle(intp_identity); } -} \ No newline at end of file +} diff --git a/src/arch/riscv/irqc/aia/inc/aplic.h b/src/arch/riscv/irqc/aia/inc/aplic.h index a9f64ad73..618ab197f 100644 --- a/src/arch/riscv/irqc/aia/inc/aplic.h +++ b/src/arch/riscv/irqc/aia/inc/aplic.h @@ -9,75 +9,75 @@ #include #include -#define APLIC_DOMAIN_NUM_HARTS (PLAT_CPU_NUM) -#define APLIC_MAX_NUM_HARTS_MAKS (0x3FFF) +#define APLIC_DOMAIN_NUM_HARTS (PLAT_CPU_NUM) +#define APLIC_MAX_NUM_HARTS_MAKS (0x3FFF) /** APLIC Specific types */ typedef cpuid_t idcid_t; /** APLIC Addresses defines */ -#define APLIC_IDC_OFF (0x4000) -#define APLIC_IDC_SIZE (32) +#define APLIC_IDC_OFF (0x4000) +#define APLIC_IDC_SIZE (32) -#define APLIC_MAX_INTERRUPTS (1024) -#define APLIC_NUM_SRCCFG_REGS (APLIC_MAX_INTERRUPTS - 1) -#define APLIC_NUM_TARGET_REGS (APLIC_MAX_INTERRUPTS - 1) +#define APLIC_MAX_INTERRUPTS (1024) +#define APLIC_NUM_SRCCFG_REGS (APLIC_MAX_INTERRUPTS - 1) +#define APLIC_NUM_TARGET_REGS (APLIC_MAX_INTERRUPTS - 1) /** where x = E or P*/ -#define APLIC_NUM_CLRIx_REGS (APLIC_MAX_INTERRUPTS / 32) -#define APLIC_NUM_SETIx_REGS (APLIC_MAX_INTERRUPTS / 32) -#define APLIC_NUM_INTP_PER_REG (APLIC_MAX_INTERRUPTS / APLIC_NUM_SETIx_REGS) +#define APLIC_NUM_CLRIx_REGS (APLIC_MAX_INTERRUPTS / 32) +#define APLIC_NUM_SETIx_REGS (APLIC_MAX_INTERRUPTS / 32) +#define APLIC_NUM_INTP_PER_REG (APLIC_MAX_INTERRUPTS / APLIC_NUM_SETIx_REGS) /** Source Mode defines */ -#define APLIC_SOURCECFG_SM_MASK (0x00000007) -#define APLIC_SOURCECFG_SM_INACTIVE (0x0) -#define APLIC_SOURCECFG_SM_DETACH (0x1) -#define APLIC_SOURCECFG_SM_EDGE_RISE (0x4) -#define APLIC_SOURCECFG_SM_EDGE_FALL (0x5) -#define APLIC_SOURCECFG_SM_LEVEL_HIGH (0x6) -#define APLIC_SOURCECFG_SM_LEVEL_LOW (0x7) +#define APLIC_SOURCECFG_SM_MASK (0x00000007) +#define APLIC_SOURCECFG_SM_INACTIVE (0x0) +#define APLIC_SOURCECFG_SM_DETACH (0x1) +#define APLIC_SOURCECFG_SM_EDGE_RISE (0x4) +#define APLIC_SOURCECFG_SM_EDGE_FALL (0x5) +#define APLIC_SOURCECFG_SM_LEVEL_HIGH (0x6) +#define APLIC_SOURCECFG_SM_LEVEL_LOW (0x7) /** APLIC fields and masks defines */ -#define APLIC_DOMAINCFG_DM (1U << 2) -#define APLIC_DOMAINCFG_IE (1U << 8) -#define APLIC_DOMAINCFG_RO80 (0x80 << 24) - -#define APLIC_SRCCFG_D (1U << 10) -#define APLIC_SRCCFG_SM ((1U << 0) | (1U << 1) | (1U << 2)) - -#define APLIC_TARGET_HART_IDX_SHIFT (18) -#define APLIC_TARGET_GUEST_IDX_SHIFT (12) -#define APLIC_TARGET_HART_IDX_MASK (APLIC_MAX_NUM_HARTS_MAKS) -#define APLIC_TARGET_IPRIO_MASK (0xFF) -#define APLIC_TARGET_EEID_MASK (0x7FF) -#define APLIC_TARGET_GUEST_INDEX_MASK (0x3F) -#define APLIC_TARGET_MIN_PRIO (0xFF) -#define APLIC_TARGET_MAX_PRIO (0x01) -#define APLIC_TARGET_DIRECT_MASK (0xFFFC0000 | APLIC_IPRIO_MASK) -#define APLIC_TARGET_MSI_MASK (0xFFFFF7FF) - -#define IDC_CLAIMI_INTP_ID_SHIFT (16) -#define IDC_CLAIMI_INTP_ID_MASK (0x3FF) +#define APLIC_DOMAINCFG_DM (1U << 2) +#define APLIC_DOMAINCFG_IE (1U << 8) +#define APLIC_DOMAINCFG_RO80 (0x80 << 24) + +#define APLIC_SRCCFG_D (1U << 10) +#define APLIC_SRCCFG_SM ((1U << 0) | (1U << 1) | (1U << 2)) + +#define APLIC_TARGET_HART_IDX_SHIFT (18) +#define APLIC_TARGET_GUEST_IDX_SHIFT (12) +#define APLIC_TARGET_HART_IDX_MASK (APLIC_MAX_NUM_HARTS_MAKS) +#define APLIC_TARGET_IPRIO_MASK (0xFF) +#define APLIC_TARGET_EEID_MASK (0x7FF) +#define APLIC_TARGET_GUEST_INDEX_MASK (0x3F) +#define APLIC_TARGET_MIN_PRIO (0xFF) +#define APLIC_TARGET_MAX_PRIO (0x01) +#define APLIC_TARGET_DIRECT_MASK (0xFFFC0000 | APLIC_IPRIO_MASK) +#define APLIC_TARGET_MSI_MASK (0xFFFFF7FF) + +#define IDC_CLAIMI_INTP_ID_SHIFT (16) +#define IDC_CLAIMI_INTP_ID_MASK (0x3FF) /** Data structures for APLIC devices */ struct aplic_control_hw { uint32_t domaincfg; uint32_t sourcecfg[APLIC_NUM_SRCCFG_REGS]; - uint8_t reserved1[0x1C00 - 0x1000]; + uint8_t reserved1[0x1C00 - 0x1000]; uint32_t setip[APLIC_NUM_SETIx_REGS]; - uint8_t reserved2[0x1CDC - 0x1C80]; + uint8_t reserved2[0x1CDC - 0x1C80]; uint32_t setipnum; - uint8_t reserved3[0x1D00 - 0x1CE0]; + uint8_t reserved3[0x1D00 - 0x1CE0]; uint32_t in_clrip[APLIC_NUM_CLRIx_REGS]; - uint8_t reserved4[0x1DDC - 0x1D80]; + uint8_t reserved4[0x1DDC - 0x1D80]; uint32_t clripnum; - uint8_t reserved5[0x1E00 - 0x1DE0]; + uint8_t reserved5[0x1E00 - 0x1DE0]; uint32_t setie[APLIC_NUM_SETIx_REGS]; - uint8_t reserved6[0x1EDC - 0x1E80]; + uint8_t reserved6[0x1EDC - 0x1E80]; uint32_t setienum; - uint8_t reserved7[0x1F00 - 0x1EE0]; + uint8_t reserved7[0x1F00 - 0x1EE0]; uint32_t clrie[APLIC_NUM_CLRIx_REGS]; - uint8_t reserved8[0x1FDC - 0x1F80]; + uint8_t reserved8[0x1FDC - 0x1F80]; uint32_t clrienum; - uint8_t reserved9[0x2000 - 0x1FE0]; + uint8_t reserved9[0x2000 - 0x1FE0]; uint32_t setipnum_le; uint32_t setipnum_be; uint8_t reserved10[0x3000 - 0x2008]; @@ -89,30 +89,31 @@ struct aplic_idc_hw { uint32_t idelivery; uint32_t iforce; uint32_t ithreshold; - uint8_t reserved[0x18-0x0C]; + uint8_t reserved[0x18 - 0x0C]; uint32_t topi; uint32_t claimi; -}__attribute__((__packed__, aligned(APLIC_IDC_SIZE))); // IDC structure CANNOT be page aligned. +} __attribute__((__packed__, aligned(APLIC_IDC_SIZE))); // IDC structure CANNOT + // be page aligned. -extern volatile struct aplic_control_hw *aplic_control; -extern volatile struct aplic_idc_hw *aplic_idc; +extern volatile struct aplic_control_hw* aplic_control; +extern volatile struct aplic_idc_hw* aplic_idc; extern uint8_t APLIC_IPRIO_MASK; /** * @brief Initialize the APLIC domain. - * + * */ void aplic_init(void); /** - * @brief Initialize the APLIC IDCs. - * + * @brief Initialize the APLIC IDCs. + * */ void aplic_idc_init(void); /** * @brief Write to APLIC's sourcecfg register - * + * * @param intp_id interruption ID identifies the interrupt to be configured. * @param val Value to be written into sourcecfg */ @@ -120,23 +121,23 @@ void aplic_set_sourcecfg(irqid_t intp_id, uint32_t val); /** * @brief Read from APLIC's sourcecfg register - * + * * @param intp_id interruption ID identifies the interrupt to be read. * @return a 32 bit value containing interrupt sourcecfg configuration. */ uint32_t aplic_get_sourcecfg(irqid_t intp_id); /** - * @brief Set a given interrupt as pending. - * + * @brief Set a given interrupt as pending. + * * @param intp_id Interrupt to be set as pending */ void aplic_set_pend(irqid_t intp_id); /** - * @brief Potentially modifies the pending bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. - * + * @brief Potentially modifies the pending bits for interrupt sources reg_indx × 32 through + * reg_indx × 32 + 31. + * * @param reg_indx register index * @param reg_val register value to be written. */ @@ -144,7 +145,7 @@ void aplic_set_pend_reg(size_t reg_indx, uint32_t reg_val); /** * @brief Read the pending value of a given interrut - * + * * @param intp_id interrupt to read from * @return true if interrupt is pending * @return false if interrupt is NOT pending @@ -152,9 +153,8 @@ void aplic_set_pend_reg(size_t reg_indx, uint32_t reg_val); bool aplic_get_pend(irqid_t intp_id); /** - * @brief Reads the pending bits for interrupt sources - * reg_indx × 32 through reg_indx × 32 + 31. - * + * @brief Reads the pending bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. + * * @param reg_indx register index * @return a 32 bit value containing interrupts pending state for reg_indx. */ @@ -162,24 +162,23 @@ uint32_t aplic_get_pend_reg(size_t reg_indx); /** * @brief Clear a pending bit from a inetrrupt writting to in_clripnum. - * + * * @param intp_id interrupt to clear the pending bit from */ void aplic_clr_pend(irqid_t intp_id); /** - * @brief Modifies the pending bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. - * + * @brief Modifies the pending bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. + * * @param reg_indx register index * @return register value to be written. */ void aplic_clr_pend_reg(size_t reg_indx, uint32_t reg_val); /** - * @brief Read the current rectified value for interrupt sources - * reg_indx × 32 through reg_indx × 32 + 31. - * + * @brief Read the current rectified value for interrupt sources reg_indx × 32 through reg_indx × + * 32 + 31. + * * @param reg_indx register index * @return a 32 bit value containing interrupts rectified state for reg_indx. */ @@ -187,15 +186,14 @@ uint32_t aplic_get_inclrip_reg(size_t reg_indx); /** * @brief Enable a given interrupt - * + * * @param intp_id interrupt to be enabled */ void aplic_set_enbl(irqid_t intp_id); /** - * @brief Modifies the enable bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. - * + * @brief Modifies the enable bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. + * * @param reg_indx register index * @param reg_val register value to be written. */ @@ -203,7 +201,7 @@ void aplic_set_enbl_reg(size_t reg_indx, uint32_t reg_val); /** * @brief Read the enable value of a given interrut - * + * * @param intp_id interrupt to read from * @return true if interrupt is enabled * @return false if interrupt is NOT enbaled @@ -211,16 +209,15 @@ void aplic_set_enbl_reg(size_t reg_indx, uint32_t reg_val); bool aplic_get_enbl(irqid_t intp_id); /** - * @brief Disable a given interrupt - * + * @brief Disable a given interrupt + * * @param intp_id Interrupt to disable */ void aplic_clr_enbl(irqid_t intp_id); /** - * @brief Modifies the enable bits for interrupt - * sources reg_indx × 32 through reg_indx × 32 + 31. - * + * @brief Modifies the enable bits for interrupt sources reg_indx × 32 through reg_indx × 32 + 31. + * * @param reg_indx register index * @param reg_val register value to be written. */ @@ -228,7 +225,7 @@ void aplic_clr_enbl_reg(size_t reg_indx, uint32_t reg_val); /** * @brief Write the priority of an interrupt to a given interrupt - * + * * @param intp_id interrupt ID * @param prio priority */ @@ -236,15 +233,15 @@ void aplic_set_target_prio(irqid_t intp_id, uint8_t prio); /** * @brief Write the target hart of an interrupt to a given interrupt - * + * * @param intp_id interrupt ID * @param hart hart index */ void aplic_set_target_hart(irqid_t intp_id, cpuid_t hart); /** - * @brief Return the priority of a given interrupt - * + * @brief Return the priority of a given interrupt + * * @param intp_id interrupt ID * @return uint8_t the interrupt priority */ @@ -252,7 +249,7 @@ uint8_t aplic_get_target_prio(irqid_t intp_id); /** * @brief Return the target hart of a given interrupt - * + * * @param intp_id interrupt ID * @return cpuid_t the interrupt hart index */ @@ -260,10 +257,10 @@ cpuid_t aplic_get_target_hart(irqid_t intp_id); /** * @brief Returns the highest pending and enabled interrupt id. - * - * Claimi has the same value as topi. However, reading claimi has the side - * effect of clearing the pending bit for the reported interrupt identity. - * + * + * Claimi has the same value as topi. However, reading claimi has the side effect of clearing the + * pending bit for the reported interrupt identity. + * * @param idc_id IDC to read and clear the pending-bit the highest-priority * @return uint32_t returns the interrupt identity and interrupt priority. */ @@ -271,8 +268,8 @@ irqid_t aplic_idc_get_claimi_intpid(idcid_t idc_id); /** * @brief Handles an incomming interrupt in irq controller. - * + * */ void aplic_handle(void); -#endif //APLIC_H \ No newline at end of file +#endif // APLIC_H diff --git a/src/arch/riscv/irqc/aia/inc/irqc.h b/src/arch/riscv/irqc/aia/inc/irqc.h index 22e374ea8..adbb5f0f6 100644 --- a/src/arch/riscv/irqc/aia/inc/irqc.h +++ b/src/arch/riscv/irqc/aia/inc/irqc.h @@ -11,13 +11,13 @@ #include #include -#define IRQC_MAX_INTERRUPTS (APLIC_MAX_INTERRUPTS) +#define IRQC_MAX_INTERRUPTS (APLIC_MAX_INTERRUPTS) -#define HART_REG_OFF APLIC_IDC_OFF -#define IRQC_HART_INST APLIC_DOMAIN_NUM_HARTS +#define HART_REG_OFF APLIC_IDC_OFF +#define IRQC_HART_INST APLIC_DOMAIN_NUM_HARTS #define HYP_IRQ_SM_EDGE_RISE APLIC_SOURCECFG_SM_EDGE_RISE -#define HYP_IRQ_SM_INACTIVE APLIC_SOURCECFG_SM_INACTIVE -#define HYP_IRQ_PRIO APLIC_TARGET_MAX_PRIO +#define HYP_IRQ_SM_INACTIVE APLIC_SOURCECFG_SM_INACTIVE +#define HYP_IRQ_PRIO APLIC_TARGET_MAX_PRIO static inline void irqc_init() { @@ -31,7 +31,7 @@ static inline void irqc_cpu_init() static inline void irqc_config_irq(irqid_t int_id, bool en) { - if (en){ + if (en) { aplic_set_sourcecfg(int_id, HYP_IRQ_SM_EDGE_RISE); aplic_set_enbl(int_id); aplic_set_target_hart(int_id, cpu()->id); @@ -56,9 +56,9 @@ static inline void irqc_clr_pend(irqid_t int_id) aplic_clr_pend(int_id); } -static inline void virqc_set_hw(struct vm *vm, irqid_t id) +static inline void virqc_set_hw(struct vm* vm, irqid_t id) { vaplic_set_hw(vm, id); } -#endif //IRQC_H \ No newline at end of file +#endif // IRQC_H diff --git a/src/arch/riscv/irqc/aia/inc/vaplic.h b/src/arch/riscv/irqc/aia/inc/vaplic.h index f045d1806..f8083f30b 100644 --- a/src/arch/riscv/irqc/aia/inc/vaplic.h +++ b/src/arch/riscv/irqc/aia/inc/vaplic.h @@ -17,10 +17,10 @@ struct vaplic { size_t idc_num; uint32_t domaincfg; uint32_t srccfg[APLIC_MAX_INTERRUPTS]; - uint32_t hw[APLIC_MAX_INTERRUPTS/32]; - uint32_t active[APLIC_MAX_INTERRUPTS/32]; - uint32_t ip[APLIC_MAX_INTERRUPTS/32]; - uint32_t ie[APLIC_MAX_INTERRUPTS/32]; + uint32_t hw[APLIC_MAX_INTERRUPTS / 32]; + uint32_t active[APLIC_MAX_INTERRUPTS / 32]; + uint32_t ip[APLIC_MAX_INTERRUPTS / 32]; + uint32_t ie[APLIC_MAX_INTERRUPTS / 32]; uint32_t target[APLIC_MAX_INTERRUPTS]; BITMAP_ALLOC(idelivery, APLIC_DOMAIN_NUM_HARTS); BITMAP_ALLOC(iforce, APLIC_DOMAIN_NUM_HARTS); @@ -36,55 +36,54 @@ union vm_irqc_dscrp; /** * @brief Initialize the virtual APLIC for a given virtual machine. - * + * * @param vm Virtual machine associated to the virtual APLIC * @param vm_irqc_dscrp virtual irqc platform configuration - * + * */ -void vaplic_init(struct vm *vm, const union vm_irqc_dscrp *vm_irqc_dscrp); +void vaplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp); /** * @brief Inject an interrupt into a vm. - * + * * @param vcpu Virtual CPU that will inject the interrupt * @param id interrupt identification - * - * The virtual CPU passed by arg. may not be the CPU to which the interrupt - * is associated. In that case the vcpu will send a msg to the target cpu. + * + * The virtual CPU passed by arg. may not be the CPU to which the interrupt is associated. In that + * case the vcpu will send a msg to the target cpu. */ -void vaplic_inject(struct vcpu *vcpu, irqid_t id); +void vaplic_inject(struct vcpu* vcpu, irqid_t id); /** - * @brief For a given virtual machine and an interrupt, associate this - * interrupt with the physical one. Thus, interrupt id is mapped - * to the physical id source. - * + * @brief For a given virtual machine and an interrupt, associate this interrupt with the physical + * one. Thus, interrupt id is mapped to the physical id source. + * * @param vm Virtual machine to associate the 1-1 virt/phys interrupt * @param id interrupt identification to associate. */ -void vaplic_set_hw(struct vm *vm, irqid_t id); +void vaplic_set_hw(struct vm* vm, irqid_t id); /** * @brief Wrapper for the virtual irqc initialization function - * - * @param vm Virtual Machine + * + * @param vm Virtual Machine * @param vm_irqc_dscrp virtual irqc platform configuration */ -static inline void virqc_init(struct vm *vm, const union vm_irqc_dscrp *vm_irqc_dscrp) +static inline void virqc_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) { vaplic_init(vm, vm_irqc_dscrp); } /** * @brief Injects a given interrupt into a virtual cpu - * + * * @param vcpu target virtual cpu * @param id interrupt id to be injected */ typedef struct vcpu vcpu_t; -static inline void virqc_inject(vcpu_t *vcpu, irqid_t id) +static inline void virqc_inject(vcpu_t* vcpu, irqid_t id) { vaplic_inject(vcpu, id); } -#endif //VAPLIC_H \ No newline at end of file +#endif // VAPLIC_H diff --git a/src/arch/riscv/irqc/aia/vaplic.c b/src/arch/riscv/irqc/aia/vaplic.c index 346d3b402..321d84640 100644 --- a/src/arch/riscv/irqc/aia/vaplic.c +++ b/src/arch/riscv/irqc/aia/vaplic.c @@ -11,107 +11,113 @@ #include #include -#define APLIC_MIN_PRIO (0xFF) -#define UPDATE_ALL_HARTS (-1) -#define MASK_INTP_ZERO (0xFFFFFFFE) +#define APLIC_MIN_PRIO (0xFF) +#define UPDATE_ALL_HARTS (-1) +#define MASK_INTP_ZERO (0xFFFFFFFE) -#define SET_INTP_REG(reg, intp_id) (reg[intp_id/32] =\ - bit32_set(reg[intp_id/32], intp_id%32)) -#define GET_INTP_REG(reg, intp_id) ((bit32_get(reg[intp_id/32], intp_id%32)\ - != 0) ? 1U : 0U) -#define CLR_INTP_REG(reg, intp_id) (reg[intp_id/32] =\ - bit32_clear(reg[intp_id/32], intp_id%32)) +#define SET_INTP_REG(reg, intp_id) (reg[intp_id / 32] = bit32_set(reg[intp_id / 32], intp_id % 32)) +#define GET_INTP_REG(reg, intp_id) ((bit32_get(reg[intp_id / 32], intp_id % 32) != 0) ? 1U : 0U) +#define CLR_INTP_REG(reg, intp_id) \ + (reg[intp_id / 32] = bit32_clear(reg[intp_id / 32], intp_id % 32)) /** * @brief Returns if a given interrupt is valid - * + * * @param intp_id interrupt ID * @return true if the interrupt is valid * @return false if the interrupt is NOT valid */ -static inline bool vaplic_intp_valid(irqid_t intp_id){ +static inline bool vaplic_intp_valid(irqid_t intp_id) +{ return intp_id != 0 && intp_id < APLIC_MAX_INTERRUPTS; } /** * @brief Converts a virtual cpu id into the physical one - * + * * @param vcpu Virtual cpu to convert * @return int The physical cpu id; or INVALID_CPUID in case of error. */ -static inline cpuid_t vaplic_vcpuid_to_pcpuid(struct vcpu *vcpu, vcpuid_t vhart){ +static inline cpuid_t vaplic_vcpuid_to_pcpuid(struct vcpu* vcpu, vcpuid_t vhart) +{ return vm_translate_to_pcpuid(vcpu->vm, vhart); } -static uint32_t vaplic_get_domaincfg(struct vcpu *vcpu); -static uint32_t vaplic_get_target(struct vcpu *vcpu, irqid_t intp_id); -static uint32_t vaplic_get_idelivery(struct vcpu *vcpu, idcid_t idc_id); -static uint32_t vaplic_get_iforce(struct vcpu *vcpu, idcid_t idc_id); -static uint32_t vaplic_get_ithreshold(struct vcpu *vcpu, idcid_t idc_id); +static uint32_t vaplic_get_domaincfg(struct vcpu* vcpu); +static uint32_t vaplic_get_target(struct vcpu* vcpu, irqid_t intp_id); +static uint32_t vaplic_get_idelivery(struct vcpu* vcpu, idcid_t idc_id); +static uint32_t vaplic_get_iforce(struct vcpu* vcpu, idcid_t idc_id); +static uint32_t vaplic_get_ithreshold(struct vcpu* vcpu, idcid_t idc_id); -void vaplic_set_hw(struct vm *vm, irqid_t intp_id) +void vaplic_set_hw(struct vm* vm, irqid_t intp_id) { if (intp_id < APLIC_MAX_INTERRUPTS) { - bitmap_set(vm->arch.vaplic.hw,intp_id); + bitmap_set(vm->arch.vaplic.hw, intp_id); } } /** * @brief Returns the target hart index of a given interrupt - * + * * @param vcpu virtual cpu * @param intp_id interrupt ID * @return vcpuid_t target hart index of the given interrupt */ -static inline vcpuid_t vaplic_get_hart_index(struct vcpu *vcpu, irqid_t intp_id){ +static inline vcpuid_t vaplic_get_hart_index(struct vcpu* vcpu, irqid_t intp_id) +{ return (vaplic_get_target(vcpu, intp_id) >> APLIC_TARGET_HART_IDX_SHIFT) & - APLIC_TARGET_HART_IDX_MASK; + APLIC_TARGET_HART_IDX_MASK; } /** * @brief Returns if a given interrupt is associated to the physical source - * + * * @param vcpu virtual cpu running * @param intp_id interrupt to evaluate * @return true if is a physical intp * @return false if is NOT a physical intp */ -static bool vaplic_get_hw(struct vcpu* vcpu, irqid_t intp_id){ +static bool vaplic_get_hw(struct vcpu* vcpu, irqid_t intp_id) +{ bool ret = false; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (vaplic_intp_valid(intp_id)) ret = bitmap_get(vaplic->hw, intp_id); + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (vaplic_intp_valid(intp_id)) { + ret = bitmap_get(vaplic->hw, intp_id); + } return ret; } /** * @brief Returns if a given interrupt is pending - * + * * @param vcpu virtual cpu * @param intp_id interrupt ID * @return true if the interrupt is pending * @return false if the interrupt is NOT pending */ -static bool vaplic_get_pend(struct vcpu *vcpu, irqid_t intp_id){ +static bool vaplic_get_pend(struct vcpu* vcpu, irqid_t intp_id) +{ uint32_t ret = 0; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (vaplic_intp_valid(intp_id)){ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (vaplic_intp_valid(intp_id)) { ret = !!GET_INTP_REG(vaplic->ip, intp_id); } return ret; } /** - * @brief Returns if a given interrupt is enbaled - * + * @brief Returns if a given interrupt is enbaled + * * @param vcpu virtual cpu * @param intp_id interrupt ID * @return true if the interrupt is enabled * @return false if the interrupt is NOT enabled */ -static bool vaplic_get_enbl(struct vcpu *vcpu, irqid_t intp_id){ +static bool vaplic_get_enbl(struct vcpu* vcpu, irqid_t intp_id) +{ uint32_t ret = 0; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (vaplic_intp_valid(intp_id)){ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (vaplic_intp_valid(intp_id)) { ret = !!GET_INTP_REG(vaplic->ie, intp_id); } return ret; @@ -119,16 +125,17 @@ static bool vaplic_get_enbl(struct vcpu *vcpu, irqid_t intp_id){ /** * @brief Returns if a given interrupt is active for this domain. - * + * * @param vcpu virtual cpu * @param intp_id interrupt id * @return true if the interrupt is active * @return false if the interrupt is NOT active */ -static bool vaplic_get_active(struct vcpu *vcpu, irqid_t intp_id){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static bool vaplic_get_active(struct vcpu* vcpu, irqid_t intp_id) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; bool ret = false; - if (vaplic_intp_valid(intp_id)){ + if (vaplic_intp_valid(intp_id)) { ret = !!GET_INTP_REG(vaplic->active, intp_id); } return ret; @@ -136,22 +143,21 @@ static bool vaplic_get_active(struct vcpu *vcpu, irqid_t intp_id){ /** * @brief Set a given interrupt as pending - * - * @pre This function should only be called by a function that - * has taken the lock. - * + * + * @pre This function should only be called by a function that has taken the lock. + * * @param vcpu virtual cpu * @param intp_id interrupt id * @return true if interrupt was set pending * @return false if interrupt was NOT set pending */ -static bool vaplic_set_pend(struct vcpu *vcpu, irqid_t intp_id){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static bool vaplic_set_pend(struct vcpu* vcpu, irqid_t intp_id) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; bool ret = false; - if (vaplic_intp_valid(intp_id) && - !vaplic_get_pend(vcpu, intp_id) && - vaplic_get_active(vcpu, intp_id)){ + if (vaplic_intp_valid(intp_id) && !vaplic_get_pend(vcpu, intp_id) && + vaplic_get_active(vcpu, intp_id)) { SET_INTP_REG(vaplic->ip, intp_id); ret = true; } @@ -159,15 +165,15 @@ static bool vaplic_set_pend(struct vcpu *vcpu, irqid_t intp_id){ } /** - * @brief Updates the topi register with with the - * highest pend & en interrupt id - * + * @brief Updates the topi register with with the highest pend & en interrupt id + * * @param vcpu virtual cpu * @return true if topi was updated, requiring the handling of the interrupt * @return false if there is no new interrupt to handle */ -static bool vaplic_update_topi(struct vcpu* vcpu){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static bool vaplic_update_topi(struct vcpu* vcpu) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; bool ret = false; uint32_t intp_prio = APLIC_MIN_PRIO; irqid_t intp_id = APLIC_MAX_INTERRUPTS; @@ -175,19 +181,19 @@ static bool vaplic_update_topi(struct vcpu* vcpu){ uint32_t idc_threshold = 0; bool domain_enbl = false; bool idc_enbl = false; - bool idc_force = false; + bool idc_force = false; uint32_t update_topi = 0; /** Find highest pending and enabled interrupt */ for (size_t i = 1; i < APLIC_MAX_INTERRUPTS; i++) { if (vaplic_get_hart_index(vcpu, i) == vcpu->id) { if (vaplic_get_pend(vcpu, i) && vaplic_get_enbl(vcpu, i)) { - prio = vaplic_get_target(vcpu, i) & APLIC_TARGET_IPRIO_MASK; + prio = vaplic_get_target(vcpu, i) & APLIC_TARGET_IPRIO_MASK; if (prio < intp_prio) { intp_prio = prio; intp_id = i; } - } + } } } @@ -196,12 +202,11 @@ static bool vaplic_update_topi(struct vcpu* vcpu){ domain_enbl = !!(vaplic_get_domaincfg(vcpu) & APLIC_DOMAINCFG_IE); idc_enbl = !!(vaplic_get_idelivery(vcpu, vcpu->id)); idc_force = !!(vaplic_get_iforce(vcpu, vcpu->id)); - - if ((intp_id != APLIC_MAX_INTERRUPTS) && - (intp_prio < idc_threshold || idc_threshold == 0) && - idc_enbl && domain_enbl) { - update_topi = (intp_id << 16) | intp_prio; - ret = true; + + if ((intp_id != APLIC_MAX_INTERRUPTS) && (intp_prio < idc_threshold || idc_threshold == 0) && + idc_enbl && domain_enbl) { + update_topi = (intp_id << 16) | intp_prio; + ret = true; } else if (idc_force && idc_enbl && domain_enbl) { ret = true; } @@ -209,66 +214,66 @@ static bool vaplic_update_topi(struct vcpu* vcpu){ return ret; } -enum {UPDATE_HART_LINE}; +enum { UPDATE_HART_LINE }; static void vaplic_ipi_handler(uint32_t event, uint64_t data); CPU_MSG_HANDLER(vaplic_ipi_handler, VPLIC_IPI_ID); /** * @brief Updates the interrupt line for a single hart - * + * * @param vcpu virtual cpu * @param vhart_index hart id to update */ -static void vaplic_update_hart_line(struct vcpu* vcpu, vcpuid_t vhart_index){ +static void vaplic_update_hart_line(struct vcpu* vcpu, vcpuid_t vhart_index) +{ cpuid_t pcpu_id = vaplic_vcpuid_to_pcpuid(vcpu, vhart_index); - /** - * If the current cpu is the targeting cpu, signal the intp - * to the hart - * Else, send a mensage to the targeting cpu + /** + * If the current cpu is the targeting cpu, signal the intp to the hart. Else, send a mensage + * to the targeting cpu */ - if(pcpu_id == cpu()->id) { - if(vaplic_update_topi(vcpu)){ + if (pcpu_id == cpu()->id) { + if (vaplic_update_topi(vcpu)) { CSRS(CSR_HVIP, HIP_VSEIP); - } else { + } else { CSRC(CSR_HVIP, HIP_VSEIP); } } else { - struct cpu_msg msg = {VPLIC_IPI_ID, UPDATE_HART_LINE, vhart_index}; - cpu_send_msg(pcpu_id, &msg); + struct cpu_msg msg = { VPLIC_IPI_ID, UPDATE_HART_LINE, vhart_index }; + cpu_send_msg(pcpu_id, &msg); } } /** * @brief Triggers the hart/harts interrupt line update. - * + * * @param vcpu virtual cpu - * @param vhart_index virtual hart to update the interrupt line. - * If UPDATE_ALL_HARTS were passed, this function will trigger - * the interrupt line update to all virtual harts running in this vm. + * @param vhart_index virtual hart to update the interrupt line. If UPDATE_ALL_HARTS were passed, + * this function will trigger the interrupt line update to all virtual harts running in this + * vm. */ -static void vaplic_update_hart(struct vcpu* vcpu, int16_t vhart_index) +static void vaplic_update_hart(struct vcpu* vcpu, int16_t vhart_index) { - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; - if (vhart_index == UPDATE_ALL_HARTS){ - for(size_t i = 0; i < vaplic->idc_num; i++){ + if (vhart_index == UPDATE_ALL_HARTS) { + for (size_t i = 0; i < vaplic->idc_num; i++) { vaplic_update_hart_line(vcpu, (vcpuid_t)i); } - } else if (vhart_index < vaplic->idc_num){ + } else if (vhart_index < vaplic->idc_num) { vaplic_update_hart_line(vcpu, (vcpuid_t)vhart_index); } } /** * @brief Processes an incoming event. - * + * * @param event the event id * @param data */ -static void vaplic_ipi_handler(uint32_t event, uint64_t data) +static void vaplic_ipi_handler(uint32_t event, uint64_t data) { - switch(event) { + switch (event) { case UPDATE_HART_LINE: vaplic_update_hart(cpu()->vcpu, (int16_t)data); break; @@ -277,12 +282,13 @@ static void vaplic_ipi_handler(uint32_t event, uint64_t data) /** * @brief Write to domaincfg register a new value. - * - * @param vcpu + * + * @param vcpu * @param new_val The new value to write to domaincfg */ -static void vaplic_set_domaincfg(struct vcpu *vcpu, uint32_t new_val){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_domaincfg(struct vcpu* vcpu, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); /** Update only the virtual domaincfg */ /** Only Interrupt Enable is configurable */ @@ -295,27 +301,29 @@ static void vaplic_set_domaincfg(struct vcpu *vcpu, uint32_t new_val){ /** * @brief Read from domaincfg - * + * * @param vcpu virtual hart - * @return uint32_t domaincfg value + * @return uint32_t domaincfg value */ -static uint32_t vaplic_get_domaincfg(struct vcpu *vcpu){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static uint32_t vaplic_get_domaincfg(struct vcpu* vcpu) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; return vaplic->domaincfg; } /** * @brief Read the sourcecfg register of a given interrupt - * + * * @param vcpu virtual hart * @param intp_id interrupt ID * @return uint32_t value with the interrupt sourcecfg value */ -static uint32_t vaplic_get_sourcecfg(struct vcpu *vcpu, irqid_t intp_id){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static uint32_t vaplic_get_sourcecfg(struct vcpu* vcpu, irqid_t intp_id) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t ret = 0; - if(vaplic_intp_valid(intp_id)){ + if (vaplic_intp_valid(intp_id)) { ret = vaplic->srccfg[intp_id]; } return ret; @@ -323,39 +331,41 @@ static uint32_t vaplic_get_sourcecfg(struct vcpu *vcpu, irqid_t intp_id){ /** * @brief Write the sourcecfg register of a given interrupt - * + * * @param vcpu virtual hart * @param intp_id interrupt ID * @param new_val value to write to sourcecfg */ -static void vaplic_set_sourcecfg(struct vcpu *vcpu, irqid_t intp_id, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_sourcecfg(struct vcpu* vcpu, irqid_t intp_id, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (intp_id > 0 && intp_id < APLIC_MAX_INTERRUPTS && + if (intp_id > 0 && intp_id < APLIC_MAX_INTERRUPTS && vaplic_get_sourcecfg(vcpu, intp_id) != new_val) { - /** If intp is being delegated make whole reg 0. - * This happens because a S domain is always a leaf. */ + /** If intp is being delegated make whole reg 0. This happens because a S domain is always + * a leaf. */ new_val &= (new_val & APLIC_SRCCFG_D) ? 0 : APLIC_SRCCFG_SM; /** If SM is reserved make intp inactive */ - if(new_val == 2 || new_val == 3) + if (new_val == 2 || new_val == 3) { new_val = APLIC_SOURCECFG_SM_INACTIVE; - + } + /** Only edge sense can be virtualized for now */ - if(new_val == APLIC_SOURCECFG_SM_LEVEL_HIGH){ + if (new_val == APLIC_SOURCECFG_SM_LEVEL_HIGH) { new_val = APLIC_SOURCECFG_SM_EDGE_RISE; - } else if (new_val == APLIC_SOURCECFG_SM_LEVEL_LOW){ + } else if (new_val == APLIC_SOURCECFG_SM_LEVEL_LOW) { new_val = APLIC_SOURCECFG_SM_EDGE_FALL; } - if(vaplic_get_hw(vcpu, intp_id)){ + if (vaplic_get_hw(vcpu, intp_id)) { aplic_set_sourcecfg(intp_id, new_val); - new_val = aplic_get_sourcecfg(intp_id); + new_val = aplic_get_sourcecfg(intp_id); } vaplic->srccfg[intp_id] = new_val; - if (new_val == APLIC_SOURCECFG_SM_INACTIVE){ + if (new_val == APLIC_SOURCECFG_SM_INACTIVE) { CLR_INTP_REG(vaplic->active, intp_id); /** Zero pend, en and target registers if intp is now inactive */ CLR_INTP_REG(vaplic->ip, intp_id); @@ -371,31 +381,33 @@ static void vaplic_set_sourcecfg(struct vcpu *vcpu, irqid_t intp_id, uint32_t ne /** * @brief Get the pending bits for interrupts [32*reg:(32*reg)+31] - * + * * @param vcpu virtual cpu * @param reg regiter index * @return uint32_t value with pending values bit-mapped */ -static uint32_t vaplic_get_setip(struct vcpu *vcpu, size_t reg){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static uint32_t vaplic_get_setip(struct vcpu* vcpu, size_t reg) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t ret = 0; - if (reg < APLIC_NUM_SETIx_REGS){ + if (reg < APLIC_NUM_SETIx_REGS) { ret = vaplic->ip[reg]; - ret |= (aplic_get_pend_reg(reg) & vaplic->hw[reg]); + ret |= (aplic_get_pend_reg(reg) & vaplic->hw[reg]); } return ret; } /** * @brief Set the pending bits for interrupts [32*reg:(32*reg)+31] - * + * * @param vcpu virtual cpu * @param reg regiter index * @param new_val value with pending interrupts bit-mapped */ -static void vaplic_set_setip(struct vcpu *vcpu, size_t reg, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_setip(struct vcpu* vcpu, size_t reg, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t update_intps = 0; spin_lock(&vaplic->lock); @@ -403,9 +415,9 @@ static void vaplic_set_setip(struct vcpu *vcpu, size_t reg, uint32_t new_val){ new_val &= vaplic->active[reg]; update_intps = (~vaplic->ip[reg]) & new_val; vaplic->ip[reg] |= new_val; - for(size_t i = (reg*APLIC_NUM_INTP_PER_REG); - i < (reg*APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++){ - if (!!bit32_get(update_intps, i%32)){ + for (size_t i = (reg * APLIC_NUM_INTP_PER_REG); + i < (reg * APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++) { + if (!!bit32_get(update_intps, i % 32)) { vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, i)); } } @@ -415,15 +427,16 @@ static void vaplic_set_setip(struct vcpu *vcpu, size_t reg, uint32_t new_val){ /** * @brief Set the pending bit for a given interrupt - * + * * @param vcpu virtual cpu * @param new_val interrupt to set the pending bit */ -static void vaplic_set_setipnum(struct vcpu *vcpu, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_setipnum(struct vcpu* vcpu, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if(vaplic_set_pend(vcpu, new_val)){ + if (vaplic_set_pend(vcpu, new_val)) { vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, new_val)); } spin_unlock(&vaplic->lock); @@ -431,13 +444,14 @@ static void vaplic_set_setipnum(struct vcpu *vcpu, uint32_t new_val){ /** * @brief Clear the pending bits for interrupts [32*reg:(32*reg)+31] - * - * @param vcpu virtual cpu + * + * @param vcpu virtual cpu * @param reg regiter index * @param new_val value with interrupts to be cleared per bit */ -static void vaplic_set_in_clrip(struct vcpu *vcpu, size_t reg, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_in_clrip(struct vcpu* vcpu, size_t reg, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t update_intps = 0; spin_lock(&vaplic->lock); @@ -448,10 +462,10 @@ static void vaplic_set_in_clrip(struct vcpu *vcpu, size_t reg, uint32_t new_val) new_val &= vaplic->hw[reg]; aplic_clr_pend_reg(reg, new_val); vaplic->ip[reg] |= aplic_get_pend_reg(reg); - update_intps &= ~(vaplic->ip[reg]); - for(size_t i = (reg*APLIC_NUM_INTP_PER_REG); - i < (reg*APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++){ - if (!!bit32_get(update_intps, i%32)){ + update_intps &= ~(vaplic->ip[reg]); + for (size_t i = (reg * APLIC_NUM_INTP_PER_REG); + i < (reg * APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++) { + if (!!bit32_get(update_intps, i % 32)) { vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, i)); } } @@ -461,33 +475,35 @@ static void vaplic_set_in_clrip(struct vcpu *vcpu, size_t reg, uint32_t new_val) /** * @brief Get the rectified input values per source - * - * @param vcpu virtual cpu + * + * @param vcpu virtual cpu * @param reg regiter index * @return uint32_t value with rectified intp per bit */ -static uint32_t vaplic_get_in_clrip(struct vcpu *vcpu, size_t reg){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static uint32_t vaplic_get_in_clrip(struct vcpu* vcpu, size_t reg) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t ret = 0; - if (reg < APLIC_NUM_CLRIx_REGS) ret = (aplic_get_inclrip_reg(reg) & - vaplic->hw[reg]); + if (reg < APLIC_NUM_CLRIx_REGS) { + ret = (aplic_get_inclrip_reg(reg) & vaplic->hw[reg]); + } return ret; } /** * @brief Clear the pending bit for a given interrupt - * + * * @param vcpu virtual cpu * @param new_val interrupt to clear the pending bit */ -static void vaplic_set_clripnum(struct vcpu *vcpu, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_clripnum(struct vcpu* vcpu, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (vaplic_get_active(vcpu, new_val) && - vaplic_get_pend(vcpu, new_val)){ - if(vaplic_get_hw(vcpu,new_val)){ + if (vaplic_get_active(vcpu, new_val) && vaplic_get_pend(vcpu, new_val)) { + if (vaplic_get_hw(vcpu, new_val)) { aplic_clr_pend(new_val); - if (!aplic_get_pend(new_val)){ + if (!aplic_get_pend(new_val)) { CLR_INTP_REG(vaplic->ip, new_val); } } else { @@ -500,16 +516,17 @@ static void vaplic_set_clripnum(struct vcpu *vcpu, uint32_t new_val){ /** * @brief Get the enabled bits for interrupts [32*reg:(32*reg)+31] - * + * * @param vcpu virtual cpu * @param reg regiter index * @return uint32_t value with enabled value bit-mapped */ -static uint32_t vaplic_get_setie(struct vcpu *vcpu, uint32_t reg){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static uint32_t vaplic_get_setie(struct vcpu* vcpu, uint32_t reg) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t ret = 0; - if (reg < APLIC_NUM_SETIx_REGS){ + if (reg < APLIC_NUM_SETIx_REGS) { ret = vaplic->ie[reg]; } return ret; @@ -517,26 +534,26 @@ static uint32_t vaplic_get_setie(struct vcpu *vcpu, uint32_t reg){ /** * @brief Set the enabled bits for interrupts [32*reg:(32*reg)+31] - * + * * @param vcpu virtual cpu * @param reg regiter index * @param new_val value with interrupts to be enabled per bit */ -static void vaplic_set_setie(struct vcpu *vcpu, size_t reg, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_setie(struct vcpu* vcpu, size_t reg, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t update_intps = 0; spin_lock(&vaplic->lock); - if (reg < APLIC_NUM_SETIx_REGS && - vaplic_get_setie(vcpu, reg) != new_val) { + if (reg < APLIC_NUM_SETIx_REGS && vaplic_get_setie(vcpu, reg) != new_val) { new_val &= vaplic->active[reg]; update_intps = ~(vaplic->ie[reg]) & new_val; vaplic->ie[reg] |= new_val; new_val &= vaplic->hw[reg]; aplic_set_enbl_reg(reg, new_val); - for(size_t i = (reg*APLIC_NUM_INTP_PER_REG); - i < (reg*APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++){ - if (!!bit32_get(update_intps, i%32)){ + for (size_t i = (reg * APLIC_NUM_INTP_PER_REG); + i < (reg * APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++) { + if (!!bit32_get(update_intps, i % 32)) { vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, i)); } } @@ -546,19 +563,19 @@ static void vaplic_set_setie(struct vcpu *vcpu, size_t reg, uint32_t new_val){ /** * @brief Set the enabled bit for a given interrupt - * + * * @param vcpu virtual cpu * @param new_val interrupt to set the enable bit */ -static void vaplic_set_setienum(struct vcpu *vcpu, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; - +static void vaplic_set_setienum(struct vcpu* vcpu, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + spin_lock(&vaplic->lock); - if (vaplic_get_active(vcpu, new_val) && - !vaplic_get_enbl(vcpu, new_val)) { - if(vaplic_get_hw(vcpu, new_val)){ + if (vaplic_get_active(vcpu, new_val) && !vaplic_get_enbl(vcpu, new_val)) { + if (vaplic_get_hw(vcpu, new_val)) { aplic_set_enbl(new_val); - } + } SET_INTP_REG(vaplic->ie, new_val); vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, new_val)); } @@ -567,25 +584,26 @@ static void vaplic_set_setienum(struct vcpu *vcpu, uint32_t new_val){ /** * @brief Clear the enabled bits for interrupts [32*reg:(32*reg)+31] - * - * @param vcpu virtual cpu + * + * @param vcpu virtual cpu * @param reg regiter index * @param new_val value with interrupts to be cleared per bit */ -static void vaplic_set_clrie(struct vcpu *vcpu, size_t reg, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_clrie(struct vcpu* vcpu, size_t reg, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t update_intps = 0; spin_lock(&vaplic->lock); - if (reg < APLIC_NUM_SETIx_REGS){ + if (reg < APLIC_NUM_SETIx_REGS) { new_val &= vaplic->active[reg]; update_intps = vaplic->ip[reg] & ~new_val; vaplic->ie[reg] &= ~(new_val); new_val &= vaplic->hw[reg]; aplic_clr_enbl_reg(reg, new_val); - for(size_t i = (reg*APLIC_NUM_INTP_PER_REG); - i < (reg*APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++){ - if (!!bit32_get(update_intps, i%32)){ + for (size_t i = (reg * APLIC_NUM_INTP_PER_REG); + i < (reg * APLIC_NUM_INTP_PER_REG) + APLIC_NUM_INTP_PER_REG; i++) { + if (!!bit32_get(update_intps, i % 32)) { vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, i)); } } @@ -595,17 +613,17 @@ static void vaplic_set_clrie(struct vcpu *vcpu, size_t reg, uint32_t new_val){ /** * @brief Clear the enabled bit for a given interrupt - * + * * @param vcpu virtual cpu * @param new_val interrupt to clear the enable bit */ -static void vaplic_set_clrienum(struct vcpu *vcpu, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_clrienum(struct vcpu* vcpu, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (vaplic_get_active(vcpu, new_val) && - vaplic_get_enbl(vcpu, new_val)) { - if(vaplic_get_hw(vcpu, new_val)){ + if (vaplic_get_active(vcpu, new_val) && vaplic_get_enbl(vcpu, new_val)) { + if (vaplic_get_hw(vcpu, new_val)) { aplic_clr_enbl(new_val); } CLR_INTP_REG(vaplic->ie, new_val); @@ -616,44 +634,41 @@ static void vaplic_set_clrienum(struct vcpu *vcpu, uint32_t new_val){ /** * @brief Write to target register of a given interrupt - * + * * @param vcpu virtual cpu * @param intp_id interrupt ID * @param new_val value to write to target */ -static void vaplic_set_target(struct vcpu *vcpu, irqid_t intp_id, uint32_t new_val){ - struct vaplic *vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_target(struct vcpu* vcpu, irqid_t intp_id, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; vcpuid_t hart_index = (new_val >> APLIC_TARGET_HART_IDX_SHIFT) & APLIC_TARGET_HART_IDX_MASK; uint8_t priority = new_val & APLIC_IPRIO_MASK; cpuid_t pcpu_id = vm_translate_to_pcpuid(vcpu->vm, hart_index); - vcpuid_t prev_hart_index = 0; + vcpuid_t prev_hart_index = 0; spin_lock(&vaplic->lock); - if(pcpu_id == INVALID_CPUID){ - /** If the hart index is invalid, make it vcpu = 0 - * and read the new pcpu. - * Software should not write anything other than legal - * values to such a field */ + if (pcpu_id == INVALID_CPUID) { + /** If the hart index is invalid, make it vcpu = 0 and read the new pcpu. Software should + * not write anything other than legal values to such a field */ hart_index = 0; pcpu_id = vm_translate_to_pcpuid(vcpu->vm, hart_index); } - + new_val &= APLIC_TARGET_DIRECT_MASK; - if (priority == 0){ + if (priority == 0) { new_val |= APLIC_TARGET_MAX_PRIO; priority = APLIC_TARGET_MAX_PRIO; } - if (vaplic_get_active(vcpu, intp_id) && - vaplic_get_target(vcpu, intp_id) != new_val) { + if (vaplic_get_active(vcpu, intp_id) && vaplic_get_target(vcpu, intp_id) != new_val) { prev_hart_index = vaplic_get_hart_index(vcpu, intp_id); - if(vaplic_get_hw(vcpu, intp_id)){ + if (vaplic_get_hw(vcpu, intp_id)) { aplic_set_target_hart(intp_id, pcpu_id); aplic_set_target_prio(intp_id, priority); - priority = aplic_get_target_prio(intp_id); + priority = aplic_get_target_prio(intp_id); } - vaplic->target[intp_id] = (hart_index << APLIC_TARGET_HART_IDX_SHIFT) | - priority; - if(prev_hart_index != hart_index){ + vaplic->target[intp_id] = (hart_index << APLIC_TARGET_HART_IDX_SHIFT) | priority; + if (prev_hart_index != hart_index) { vaplic_update_hart(vcpu, prev_hart_index); } vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, intp_id)); @@ -663,36 +678,39 @@ static void vaplic_set_target(struct vcpu *vcpu, irqid_t intp_id, uint32_t new_v /** * @brief Read target register from a given interrupt - * + * * @param vcpu virtual cpu * @param intp_id interrupt ID * @return uint32_t value with target value */ -static uint32_t vaplic_get_target(struct vcpu *vcpu, irqid_t intp_id){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static uint32_t vaplic_get_target(struct vcpu* vcpu, irqid_t intp_id) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; uint32_t ret = 0; - - if (vaplic_intp_valid(intp_id)){ + + if (vaplic_intp_valid(intp_id)) { ret = vaplic->target[intp_id]; } return ret; } /** - * @brief Set idelivery register for a given idc. - * + * @brief Set idelivery register for a given idc. + * * @param vcpu virtual CPU * @param idc_id idc identifier * @param new_val new value to write in idelivery. Only 0 and 1 are allowed. */ -static void vaplic_set_idelivery(struct vcpu *vcpu, idcid_t idc_id, uint32_t new_val){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_idelivery(struct vcpu* vcpu, idcid_t idc_id, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (idc_id < vaplic->idc_num){ - if ((new_val & 0x1) != 0) + if (idc_id < vaplic->idc_num) { + if ((new_val & 0x1) != 0) { bitmap_set(vaplic->idelivery, idc_id); - else + } else { bitmap_clear(vaplic->idelivery, idc_id); + } } vaplic_update_hart(vcpu, idc_id); spin_unlock(&vaplic->lock); @@ -700,33 +718,38 @@ static void vaplic_set_idelivery(struct vcpu *vcpu, idcid_t idc_id, uint32_t new /** * @brief Read idelivery register from a given idc. - * + * * @param vcpu virtual CPU * @param idc_id idc identifier * @return uint32_t value read from idelivery */ -static uint32_t vaplic_get_idelivery(struct vcpu *vcpu, idcid_t idc_id){ +static uint32_t vaplic_get_idelivery(struct vcpu* vcpu, idcid_t idc_id) +{ uint32_t ret = 0; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (idc_id < vaplic->idc_num) ret = bitmap_get( vaplic->idelivery, idc_id); + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (idc_id < vaplic->idc_num) { + ret = bitmap_get(vaplic->idelivery, idc_id); + } return ret; } /** - * @brief Set iforce register for a given idc. - * + * @brief Set iforce register for a given idc. + * * @param vcpu virtual CPU * @param idc_id idc identifier * @param new_val new value to write in iforce. Only 0 and 1 are allowed. */ -static void vaplic_set_iforce(struct vcpu *vcpu, idcid_t idc_id, uint32_t new_val){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_iforce(struct vcpu* vcpu, idcid_t idc_id, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (idc_id < vaplic->idc_num){ - if ((new_val & 0x1) != 0) + if (idc_id < vaplic->idc_num) { + if ((new_val & 0x1) != 0) { bitmap_set(vaplic->iforce, idc_id); - else + } else { bitmap_clear(vaplic->iforce, idc_id); + } } vaplic_update_hart(vcpu, idc_id); spin_unlock(&vaplic->lock); @@ -734,29 +757,33 @@ static void vaplic_set_iforce(struct vcpu *vcpu, idcid_t idc_id, uint32_t new_va /** * @brief Read iforce register from a given idc. - * + * * @param vcpu virtual CPU * @param idc_id idc identifier * @return uint32_t value read from iforce */ -static uint32_t vaplic_get_iforce(struct vcpu *vcpu, idcid_t idc_id){ +static uint32_t vaplic_get_iforce(struct vcpu* vcpu, idcid_t idc_id) +{ uint32_t ret = 0; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (idc_id < vaplic->idc_num) ret = bitmap_get(vaplic->iforce, idc_id); + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (idc_id < vaplic->idc_num) { + ret = bitmap_get(vaplic->iforce, idc_id); + } return ret; } /** * @brief Set ithreshold register for a given idc. - * + * * @param vcpu virtual CPU * @param idc_id idc identifier * @param new_val new value to write in ithreshold */ -static void vaplic_set_ithreshold(struct vcpu *vcpu, idcid_t idc_id, uint32_t new_val){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +static void vaplic_set_ithreshold(struct vcpu* vcpu, idcid_t idc_id, uint32_t new_val) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (idc_id < vaplic->idc_num){ + if (idc_id < vaplic->idc_num) { vaplic->ithreshold[idc_id] = new_val & APLIC_IPRIO_MASK; } vaplic_update_hart(vcpu, idc_id); @@ -765,51 +792,58 @@ static void vaplic_set_ithreshold(struct vcpu *vcpu, idcid_t idc_id, uint32_t ne /** * @brief Read ithreshold register from a given idc. - * + * * @param vcpu virtual CPU * @param idc_id idc identifier * @return uint32_t value read from ithreshold */ -static uint32_t vaplic_get_ithreshold(struct vcpu *vcpu, idcid_t idc_id){ +static uint32_t vaplic_get_ithreshold(struct vcpu* vcpu, idcid_t idc_id) +{ uint32_t ret = 0; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (idc_id < vaplic->idc_num) ret = vaplic->ithreshold[idc_id]; + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (idc_id < vaplic->idc_num) { + ret = vaplic->ithreshold[idc_id]; + } return ret; } /** * @brief Read topi register from a given idc. - * + * * @param vcpu virtual CPU * @param idc_id idc identifier * @return uint32_t value read from topi */ -static uint32_t vaplic_get_topi(struct vcpu *vcpu, idcid_t idc_id){ +static uint32_t vaplic_get_topi(struct vcpu* vcpu, idcid_t idc_id) +{ uint32_t ret = 0; - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; - if (idc_id < vaplic->idc_num) ret = vaplic->topi_claimi[idc_id]; + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; + if (idc_id < vaplic->idc_num) { + ret = vaplic->topi_claimi[idc_id]; + } return ret; } /** * @brief Returns the highest pending and enabled interrupt. - * - * Claimi has the same value as topi. However, reading claimi has the side - * effect of clearing the pending bit for the reported interrupt identity. - * + * + * Claimi has the same value as topi. However, reading claimi has the side effect of clearing the + * pending bit for the reported interrupt identity. + * * @param vcpu virtual CPU * @param idc_id idc identifier * @return 32 bit value read from virt claimi */ -static uint32_t vaplic_get_claimi(struct vcpu *vcpu, idcid_t idc_id){ +static uint32_t vaplic_get_claimi(struct vcpu* vcpu, idcid_t idc_id) +{ uint32_t ret = 0; struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); - if (idc_id < vaplic->idc_num){ + if (idc_id < vaplic->idc_num) { ret = vaplic->topi_claimi[idc_id]; CLR_INTP_REG(vaplic->ip, (ret >> IDC_CLAIMI_INTP_ID_SHIFT)); /** Spurious intp*/ - if (ret == 0){ + if (ret == 0) { bitmap_clear(vaplic->iforce, idc_id); } vaplic_update_hart(vcpu, idc_id); @@ -819,14 +853,14 @@ static uint32_t vaplic_get_claimi(struct vcpu *vcpu, idcid_t idc_id){ } /** - * @brief domaincfg register access emulation function - * + * @brief domaincfg register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_domaincfg_access(struct emul_access *acc){ +static void vaplic_emul_domaincfg_access(struct emul_access* acc) +{ if (acc->write) { vaplic_set_domaincfg(cpu()->vcpu, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -835,15 +869,15 @@ static void vaplic_emul_domaincfg_access(struct emul_access *acc){ } /** - * @brief sourcecfg register access emulation function - * + * @brief sourcecfg register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_srccfg_access(struct emul_access *acc){ - int intp = (acc->addr & 0xFFF)/4; +static void vaplic_emul_srccfg_access(struct emul_access* acc) +{ + int intp = (acc->addr & 0xFFF) / 4; if (acc->write) { vaplic_set_sourcecfg(cpu()->vcpu, intp, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -852,15 +886,15 @@ static void vaplic_emul_srccfg_access(struct emul_access *acc){ } /** - * @brief setip register access emulation function - * + * @brief setip register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_setip_access(struct emul_access *acc){ - int reg = (acc->addr & 0x7F)/4; +static void vaplic_emul_setip_access(struct emul_access* acc) +{ + int reg = (acc->addr & 0x7F) / 4; if (acc->write) { vaplic_set_setip(cpu()->vcpu, reg, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -869,29 +903,29 @@ static void vaplic_emul_setip_access(struct emul_access *acc){ } /** - * @brief setipnum register access emulation function - * + * @brief setipnum register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_setipnum_access(struct emul_access *acc){ +static void vaplic_emul_setipnum_access(struct emul_access* acc) +{ if (acc->write) { vaplic_set_setipnum(cpu()->vcpu, vcpu_readreg(cpu()->vcpu, acc->reg)); } } /** - * @brief clrip register access emulation function - * + * @brief clrip register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_in_clrip_access(struct emul_access *acc){ - int reg = (acc->addr & 0x7F)/4; +static void vaplic_emul_in_clrip_access(struct emul_access* acc) +{ + int reg = (acc->addr & 0x7F) / 4; if (acc->write) { vaplic_set_in_clrip(cpu()->vcpu, reg, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -900,29 +934,29 @@ static void vaplic_emul_in_clrip_access(struct emul_access *acc){ } /** - * @brief clripnum register access emulation function - * + * @brief clripnum register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_clripnum_access(struct emul_access *acc){ +static void vaplic_emul_clripnum_access(struct emul_access* acc) +{ if (acc->write) { vaplic_set_clripnum(cpu()->vcpu, vcpu_readreg(cpu()->vcpu, acc->reg)); } } /** - * @brief setie register access emulation function - * + * @brief setie register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_setie_access(struct emul_access *acc){ - int reg = (acc->addr & 0x7F)/4; +static void vaplic_emul_setie_access(struct emul_access* acc) +{ + int reg = (acc->addr & 0x7F) / 4; if (acc->write) { vaplic_set_setie(cpu()->vcpu, reg, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -931,58 +965,58 @@ static void vaplic_emul_setie_access(struct emul_access *acc){ } /** - * @brief setienum register access emulation function - * + * @brief setienum register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_setienum_access(struct emul_access *acc){ +static void vaplic_emul_setienum_access(struct emul_access* acc) +{ if (acc->write) { vaplic_set_setienum(cpu()->vcpu, vcpu_readreg(cpu()->vcpu, acc->reg)); } } /** - * @brief clrie register access emulation function - * + * @brief clrie register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_clrie_access(struct emul_access *acc){ - int reg = (acc->addr & 0x7F)/4; +static void vaplic_emul_clrie_access(struct emul_access* acc) +{ + int reg = (acc->addr & 0x7F) / 4; if (acc->write) { vaplic_set_clrie(cpu()->vcpu, reg, vcpu_readreg(cpu()->vcpu, acc->reg)); } } /** - * @brief clrienum register access emulation function - * + * @brief clrienum register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_clrienum_access(struct emul_access *acc){ +static void vaplic_emul_clrienum_access(struct emul_access* acc) +{ if (acc->write) { vaplic_set_clrienum(cpu()->vcpu, vcpu_readreg(cpu()->vcpu, acc->reg)); } } /** - * @brief target register access emulation function - * + * @brief target register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_target_access(struct emul_access *acc){ - int intp = (acc->addr & 0xFFF)/4; +static void vaplic_emul_target_access(struct emul_access* acc) +{ + int intp = (acc->addr & 0xFFF) / 4; if (acc->write) { vaplic_set_target(cpu()->vcpu, intp, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -991,14 +1025,14 @@ static void vaplic_emul_target_access(struct emul_access *acc){ } /** - * @brief idelivery register access emulation function - * + * @brief idelivery register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_idelivery_access(struct emul_access *acc, idcid_t idc_id){ +static void vaplic_emul_idelivery_access(struct emul_access* acc, idcid_t idc_id) +{ if (acc->write) { vaplic_set_idelivery(cpu()->vcpu, idc_id, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -1007,14 +1041,14 @@ static void vaplic_emul_idelivery_access(struct emul_access *acc, idcid_t idc_id } /** - * @brief iforce register access emulation function - * + * @brief iforce register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_iforce_access(struct emul_access *acc, idcid_t idc_id){ +static void vaplic_emul_iforce_access(struct emul_access* acc, idcid_t idc_id) +{ if (acc->write) { vaplic_set_iforce(cpu()->vcpu, idc_id, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -1023,14 +1057,14 @@ static void vaplic_emul_iforce_access(struct emul_access *acc, idcid_t idc_id){ } /** - * @brief ithreshold register access emulation function - * + * @brief ithreshold register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_ithreshold_access(struct emul_access *acc, idcid_t idc_id){ +static void vaplic_emul_ithreshold_access(struct emul_access* acc, idcid_t idc_id) +{ if (acc->write) { vaplic_set_ithreshold(cpu()->vcpu, idc_id, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { @@ -1039,45 +1073,46 @@ static void vaplic_emul_ithreshold_access(struct emul_access *acc, idcid_t idc_i } /** - * @brief topi register access emulation function - * + * @brief topi register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_topi_access(struct emul_access *acc, idcid_t idc_id){ - if (!acc->write){ +static void vaplic_emul_topi_access(struct emul_access* acc, idcid_t idc_id) +{ + if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, vaplic_get_topi(cpu()->vcpu, idc_id)); } } /** - * @brief claimi register access emulation function - * + * @brief claimi register access emulation function + * * @param acc access information - * - * It determines whether it needs to call the write or read funcion - * for the choosen register. + * + * It determines whether it needs to call the write or read funcion for the choosen register. */ -static void vaplic_emul_claimi_access(struct emul_access *acc, idcid_t idc_id){ - if (!acc->write){ +static void vaplic_emul_claimi_access(struct emul_access* acc, idcid_t idc_id) +{ + if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, vaplic_get_claimi(cpu()->vcpu, idc_id)); } } /** - * @brief Injects a given interrupt into a given vcpu - * + * @brief Injects a given interrupt into a given vcpu + * * @param vcpu vcpu to inject the interrupt * @param intp_id interrupt unique id */ -void vaplic_inject(struct vcpu *vcpu, irqid_t intp_id){ - struct vaplic * vaplic = &vcpu->vm->arch.vaplic; +void vaplic_inject(struct vcpu* vcpu, irqid_t intp_id) +{ + struct vaplic* vaplic = &vcpu->vm->arch.vaplic; spin_lock(&vaplic->lock); /** If the intp was successfully injected, update the heart line. */ - if (vaplic_set_pend(vcpu, intp_id)){ + if (vaplic_set_pend(vcpu, intp_id)) { vaplic_update_hart(vcpu, vaplic_get_hart_index(vcpu, intp_id)); } spin_unlock(&vaplic->lock); @@ -1085,33 +1120,34 @@ void vaplic_inject(struct vcpu *vcpu, irqid_t intp_id){ /** * @brief Given an address, this function returns if it is reserved - * + * * @param addr address to check * @return true if the address is reserved * @return false if the address is NOT reserved */ -static bool vaplic_domain_emul_reserved (uint16_t addr) { +static bool vaplic_domain_emul_reserved(uint16_t addr) +{ bool ret = false; - if (in_range(addr, offsetof(struct aplic_control_hw, reserved1), - sizeof(aplic_control->reserved1) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved2), - sizeof(aplic_control->reserved2) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved3), - sizeof(aplic_control->reserved3) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved4), - sizeof(aplic_control->reserved4) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved5), - sizeof(aplic_control->reserved5) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved6), - sizeof(aplic_control->reserved6) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved7), - sizeof(aplic_control->reserved7) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved8), - sizeof(aplic_control->reserved8) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved9), - sizeof(aplic_control->reserved9) -4) || - in_range(addr, offsetof(struct aplic_control_hw, reserved10), - sizeof(aplic_control->reserved10)-4)){ + if (in_range(addr, offsetof(struct aplic_control_hw, reserved1), + sizeof(aplic_control->reserved1) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved2), + sizeof(aplic_control->reserved2) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved3), + sizeof(aplic_control->reserved3) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved4), + sizeof(aplic_control->reserved4) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved5), + sizeof(aplic_control->reserved5) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved6), + sizeof(aplic_control->reserved6) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved7), + sizeof(aplic_control->reserved7) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved8), + sizeof(aplic_control->reserved8) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved9), + sizeof(aplic_control->reserved9) - 4) || + in_range(addr, offsetof(struct aplic_control_hw, reserved10), + sizeof(aplic_control->reserved10) - 4)) { ret = true; } return ret; @@ -1119,27 +1155,27 @@ static bool vaplic_domain_emul_reserved (uint16_t addr) { /** * @brief Function to handle writes (or reads) to (from) domain structure. - * + * * @param acc emulated access * @return true if conclude without errors. * @return false if the access is not aligned. */ -static bool vaplic_domain_emul_handler(struct emul_access *acc) +static bool vaplic_domain_emul_handler(struct emul_access* acc) { uint16_t emul_addr = 0; bool read_only_zero = false; // only allow aligned word accesses - if (acc->width != 4 || acc->addr & 0x3) return false; + if (acc->width != 4 || acc->addr & 0x3) { + return false; + } - emul_addr = (acc->addr - - cpu()->vcpu->vm->arch.vaplic.aplic_domain_emul.va_base) & - 0x3fff; + emul_addr = (acc->addr - cpu()->vcpu->vm->arch.vaplic.aplic_domain_emul.va_base) & 0x3fff; - if (vaplic_domain_emul_reserved(emul_addr)){ + if (vaplic_domain_emul_reserved(emul_addr)) { read_only_zero = true; } else { - switch (emul_addr >> 12){ + switch (emul_addr >> 12) { case 0: if (emul_addr == offsetof(struct aplic_control_hw, domaincfg)) { vaplic_emul_domaincfg_access(acc); @@ -1148,34 +1184,34 @@ static bool vaplic_domain_emul_handler(struct emul_access *acc) } break; case 1: - switch (emul_addr >> 7){ - case offsetof(struct aplic_control_hw, setip) >> 7: - vaplic_emul_setip_access(acc); - break; - case offsetof(struct aplic_control_hw, setipnum) >> 7: - vaplic_emul_setipnum_access(acc); - break; - case offsetof(struct aplic_control_hw, in_clrip) >> 7: - vaplic_emul_in_clrip_access(acc); - break; - case offsetof(struct aplic_control_hw, clripnum) >> 7: - vaplic_emul_clripnum_access(acc); - break; - case offsetof(struct aplic_control_hw, setie) >> 7: - vaplic_emul_setie_access(acc); - break; - case offsetof(struct aplic_control_hw, setienum) >> 7: - vaplic_emul_setienum_access(acc); - break; - case offsetof(struct aplic_control_hw, clrie) >> 7: - vaplic_emul_clrie_access(acc); - break; - case offsetof(struct aplic_control_hw, clrienum) >> 7: - vaplic_emul_clrienum_access(acc); - break; - default: - read_only_zero = true; - break; + switch (emul_addr >> 7) { + case offsetof(struct aplic_control_hw, setip) >> 7: + vaplic_emul_setip_access(acc); + break; + case offsetof(struct aplic_control_hw, setipnum) >> 7: + vaplic_emul_setipnum_access(acc); + break; + case offsetof(struct aplic_control_hw, in_clrip) >> 7: + vaplic_emul_in_clrip_access(acc); + break; + case offsetof(struct aplic_control_hw, clripnum) >> 7: + vaplic_emul_clripnum_access(acc); + break; + case offsetof(struct aplic_control_hw, setie) >> 7: + vaplic_emul_setie_access(acc); + break; + case offsetof(struct aplic_control_hw, setienum) >> 7: + vaplic_emul_setienum_access(acc); + break; + case offsetof(struct aplic_control_hw, clrie) >> 7: + vaplic_emul_clrie_access(acc); + break; + case offsetof(struct aplic_control_hw, clrienum) >> 7: + vaplic_emul_clrienum_access(acc); + break; + default: + read_only_zero = true; + break; } break; case 3: @@ -1191,8 +1227,8 @@ static bool vaplic_domain_emul_handler(struct emul_access *acc) } } - if (read_only_zero){ - if(!acc->write) { + if (read_only_zero) { + if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, 0); } } @@ -1201,20 +1237,21 @@ static bool vaplic_domain_emul_handler(struct emul_access *acc) /** * @brief Function to handle writes (or reads) to (from) IDC structure. - * + * * @param acc emulated access * @return true if conclude without errors. * @return false if the access is not aligned. */ -static bool vaplic_idc_emul_handler(struct emul_access *acc) +static bool vaplic_idc_emul_handler(struct emul_access* acc) { // only allow aligned word accesses - if (acc->width != 4 || acc->addr & 0x3) return false; + if (acc->width != 4 || acc->addr & 0x3) { + return false; + } uint32_t addr = acc->addr; - idcid_t idc_id = ((acc->addr - - cpu()->vcpu->vm->arch.vaplic.aplic_idc_emul.va_base) >> 5) - & APLIC_MAX_NUM_HARTS_MAKS; + idcid_t idc_id = ((acc->addr - cpu()->vcpu->vm->arch.vaplic.aplic_idc_emul.va_base) >> 5) & + APLIC_MAX_NUM_HARTS_MAKS; switch (addr & 0x1F) { case offsetof(struct aplic_idc_hw, idelivery): @@ -1233,7 +1270,7 @@ static bool vaplic_idc_emul_handler(struct emul_access *acc) vaplic_emul_claimi_access(acc, idc_id); break; default: - if(!acc->write) { + if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, 0); } break; @@ -1241,25 +1278,24 @@ static bool vaplic_idc_emul_handler(struct emul_access *acc) return true; } -void vaplic_init(struct vm *vm, const union vm_irqc_dscrp *vm_irqc_dscrp){ +void vaplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) +{ if (cpu()->id == vm->master) { /* 1 IDC per hart */ vm->arch.vaplic.idc_num = vm->cpu_num; - vm->arch.vaplic.aplic_domain_emul = (struct emul_mem) { - .va_base = vm_irqc_dscrp->aia.aplic.base, - .size = sizeof(struct aplic_control_hw), - .handler = vaplic_domain_emul_handler - }; + vm->arch.vaplic.aplic_domain_emul = + (struct emul_mem){ .va_base = vm_irqc_dscrp->aia.aplic.base, + .size = sizeof(struct aplic_control_hw), + .handler = vaplic_domain_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vaplic.aplic_domain_emul); - vm->arch.vaplic.aplic_idc_emul = (struct emul_mem) { - .va_base = vm_irqc_dscrp->aia.aplic.base + APLIC_IDC_OFF, - .size = sizeof(struct aplic_idc_hw)*vm->arch.vaplic.idc_num, - .handler = vaplic_idc_emul_handler - }; + vm->arch.vaplic.aplic_idc_emul = + (struct emul_mem){ .va_base = vm_irqc_dscrp->aia.aplic.base + APLIC_IDC_OFF, + .size = sizeof(struct aplic_idc_hw) * vm->arch.vaplic.idc_num, + .handler = vaplic_idc_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vaplic.aplic_idc_emul); } -} \ No newline at end of file +} diff --git a/src/arch/riscv/irqc/plic/inc/irqc.h b/src/arch/riscv/irqc/plic/inc/irqc.h index 430e8eeb1..1c022ec00 100644 --- a/src/arch/riscv/irqc/plic/inc/irqc.h +++ b/src/arch/riscv/irqc/plic/inc/irqc.h @@ -13,8 +13,8 @@ #define IRQC_MAX_INTERRUPTS (PLIC_MAX_INTERRUPTS) -#define HART_REG_OFF PLIC_THRESHOLD_OFF -#define IRQC_HART_INST PLIC_PLAT_CNTXT_NUM +#define HART_REG_OFF PLIC_THRESHOLD_OFF +#define IRQC_HART_INST PLIC_PLAT_CNTXT_NUM static inline void irqc_init() { @@ -47,9 +47,9 @@ static inline void irqc_clr_pend(irqid_t int_id) WARNING("trying to clear external interrupt"); } -static inline void virqc_set_hw(struct vm *vm, irqid_t id) +static inline void virqc_set_hw(struct vm* vm, irqid_t id) { vplic_set_hw(vm, id); } -#endif //IRQC_H \ No newline at end of file +#endif // IRQC_H diff --git a/src/arch/riscv/irqc/plic/inc/plic.h b/src/arch/riscv/irqc/plic/inc/plic.h index 66b4c337c..e08b7a909 100644 --- a/src/arch/riscv/irqc/plic/inc/plic.h +++ b/src/arch/riscv/irqc/plic/inc/plic.h @@ -10,12 +10,12 @@ #include #define PLIC_MAX_INTERRUPTS (1024) -#define PLIC_NUM_PRIO_REGS (PLIC_MAX_INTERRUPTS) -#define PLIC_NUM_PEND_REGS (PLIC_MAX_INTERRUPTS) -#define PLIC_NUM_ENBL_REGS (PLIC_MAX_INTERRUPTS / 32) +#define PLIC_NUM_PRIO_REGS (PLIC_MAX_INTERRUPTS) +#define PLIC_NUM_PEND_REGS (PLIC_MAX_INTERRUPTS) +#define PLIC_NUM_ENBL_REGS (PLIC_MAX_INTERRUPTS / 32) -#define PLIC_ENBL_OFF (0x002000) -#define PLIC_THRESHOLD_OFF (0x200000) +#define PLIC_ENBL_OFF (0x002000) +#define PLIC_THRESHOLD_OFF (0x200000) #ifndef PLAT_PLIC_CNTXT_PER_HART #define PLAT_PLIC_CNTXT_PER_HART 2 @@ -38,8 +38,8 @@ struct plic_hart_hw { uint8_t res[0x1000 - 0x0008]; } __attribute__((__packed__, aligned(PAGE_SIZE))); -extern volatile struct plic_global_hw *plic_global; -extern volatile struct plic_hart_hw *plic_hart; +extern volatile struct plic_global_hw* plic_global; +extern volatile struct plic_hart_hw* plic_hart; extern size_t PLIC_IMPL_INTERRUPTS; void plic_init(); diff --git a/src/arch/riscv/irqc/plic/inc/vplic.h b/src/arch/riscv/irqc/plic/inc/vplic.h index fcccba7e4..0d3325984 100644 --- a/src/arch/riscv/irqc/plic/inc/vplic.h +++ b/src/arch/riscv/irqc/plic/inc/vplic.h @@ -28,17 +28,17 @@ struct vplic { struct vm; struct vcpu; union vm_irqc_dscrp; -void vplic_init(struct vm *vm, const union vm_irqc_dscrp *vm_irqc_dscrp); -void vplic_inject(struct vcpu *vcpu, irqid_t id); -void vplic_set_hw(struct vm *vm, irqid_t id); +void vplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp); +void vplic_inject(struct vcpu* vcpu, irqid_t id); +void vplic_set_hw(struct vm* vm, irqid_t id); -static inline void virqc_init(struct vm *vm, const union vm_irqc_dscrp *vm_irqc_dscrp) +static inline void virqc_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) { vplic_init(vm, vm_irqc_dscrp); } typedef struct vcpu vcpu_t; -static inline void virqc_inject(vcpu_t *vcpu, irqid_t id) +static inline void virqc_inject(vcpu_t* vcpu, irqid_t id) { vplic_inject(vcpu, id); } diff --git a/src/arch/riscv/irqc/plic/plic.c b/src/arch/riscv/irqc/plic/plic.c index 97148d4be..2b41cc3a5 100644 --- a/src/arch/riscv/irqc/plic/plic.c +++ b/src/arch/riscv/irqc/plic/plic.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -10,9 +10,9 @@ size_t PLIC_IMPL_INTERRUPTS; -volatile struct plic_global_hw *plic_global; +volatile struct plic_global_hw* plic_global; -volatile struct plic_hart_hw *plic_hart; +volatile struct plic_hart_hw* plic_hart; static size_t plic_scan_max_int() { @@ -31,13 +31,13 @@ static size_t plic_scan_max_int() void plic_init() { /** Maps PLIC device */ - plic_global = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.irqc.plic.base, NUM_PAGES(sizeof(struct plic_global_hw))); - - plic_hart = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, - platform.arch.irqc.plic.base + HART_REG_OFF, - NUM_PAGES(sizeof(struct plic_hart_hw)*IRQC_HART_INST)); - + plic_global = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.irqc.plic.base, NUM_PAGES(sizeof(struct plic_global_hw))); + + plic_hart = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + platform.arch.irqc.plic.base + HART_REG_OFF, + NUM_PAGES(sizeof(struct plic_hart_hw) * IRQC_HART_INST)); + /** Ensure that instructions after fence have the PLIC fully mapped */ fence_sync(); @@ -56,11 +56,12 @@ void plic_init() void plic_cpu_init() { - cpu()->arch.plic_cntxt = plic_plat_cntxt_to_id((struct plic_cntxt){cpu()->id, PRIV_S}); + cpu()->arch.plic_cntxt = plic_plat_cntxt_to_id((struct plic_cntxt){ cpu()->id, PRIV_S }); plic_hart[cpu()->arch.plic_cntxt].threshold = 0; } -bool plic_cntxt_valid(unsigned cntxt_id) { +bool plic_cntxt_valid(unsigned cntxt_id) +{ struct plic_cntxt cntxt = plic_plat_id_to_cntxt(cntxt_id); return (cntxt_id < PLIC_PLAT_CNTXT_NUM) && (cntxt.mode <= PRIV_S); } @@ -70,8 +71,7 @@ void plic_set_enbl(unsigned cntxt, irqid_t int_id, bool en) int reg_ind = int_id / (sizeof(uint32_t) * 8); uint32_t mask = 1U << (int_id % (sizeof(uint32_t) * 8)); - - if (int_id <= PLIC_IMPL_INTERRUPTS && plic_cntxt_valid(cntxt)) { + if (int_id <= PLIC_IMPL_INTERRUPTS && plic_cntxt_valid(cntxt)) { if (en) { plic_global->enbl[cntxt][reg_ind] |= mask; } else { @@ -85,10 +85,11 @@ bool plic_get_enbl(unsigned cntxt, irqid_t int_id) int reg_ind = int_id / (sizeof(uint32_t) * 8); uint32_t mask = 1U << (int_id % (sizeof(uint32_t) * 8)); - if (int_id <= PLIC_IMPL_INTERRUPTS && plic_cntxt_valid(cntxt)) + if (int_id <= PLIC_IMPL_INTERRUPTS && plic_cntxt_valid(cntxt)) { return plic_global->enbl[cntxt][reg_ind] & mask; - else + } else { return false; + } } void plic_set_prio(irqid_t int_id, uint32_t prio) @@ -100,10 +101,11 @@ void plic_set_prio(irqid_t int_id, uint32_t prio) uint32_t plic_get_prio(irqid_t int_id) { - if (int_id <= PLIC_IMPL_INTERRUPTS) + if (int_id <= PLIC_IMPL_INTERRUPTS) { return plic_global->prio[int_id]; - else + } else { return 0; + } } bool plic_get_pend(irqid_t int_id) @@ -111,15 +113,16 @@ bool plic_get_pend(irqid_t int_id) int reg_ind = int_id / 32; int mask = (1U << (int_id % 32)); - if (int_id <= PLIC_IMPL_INTERRUPTS) + if (int_id <= PLIC_IMPL_INTERRUPTS) { return plic_global->pend[reg_ind] & mask; - else + } else { return false; + } } void plic_set_threshold(unsigned cntxt, uint32_t threshold) { - if(plic_cntxt_valid(cntxt)) { + if (plic_cntxt_valid(cntxt)) { plic_hart[cntxt].threshold = threshold; } } @@ -127,7 +130,7 @@ void plic_set_threshold(unsigned cntxt, uint32_t threshold) uint32_t plic_get_threshold(unsigned cntxt) { uint32_t threshold = 0; - if(plic_cntxt_valid(cntxt)) { + if (plic_cntxt_valid(cntxt)) { threshold = plic_hart[cntxt].threshold; } return threshold; @@ -139,29 +142,33 @@ void plic_handle() if (id != 0) { enum irq_res res = interrupts_handle(id); - if (res == HANDLED_BY_HYP) plic_hart[cpu()->arch.plic_cntxt].complete = id; + if (res == HANDLED_BY_HYP) { + plic_hart[cpu()->arch.plic_cntxt].complete = id; + } } } /** - * Context organization is spec-out by the vendor, this is the default - * mapping found in sifive's plic. + * Context organization is spec-out by the vendor, this is the default mapping found in sifive's + * plic. */ -__attribute__((weak)) -int plic_plat_cntxt_to_id(struct plic_cntxt cntxt){ - if(cntxt.mode != PRIV_M && cntxt.mode != PRIV_S) return -1; - return (cntxt.hart_id*2) + (cntxt.mode == PRIV_M ? 0 : 1); +__attribute__((weak)) int plic_plat_cntxt_to_id(struct plic_cntxt cntxt) +{ + if (cntxt.mode != PRIV_M && cntxt.mode != PRIV_S) { + return -1; + } + return (cntxt.hart_id * 2) + (cntxt.mode == PRIV_M ? 0 : 1); } -__attribute__((weak)) -struct plic_cntxt plic_plat_id_to_cntxt(int id){ +__attribute__((weak)) struct plic_cntxt plic_plat_id_to_cntxt(int id) +{ struct plic_cntxt cntxt; - if(id < PLIC_PLAT_CNTXT_NUM){ - cntxt.hart_id = id/2; - cntxt.mode = (id%2) == 0 ? PRIV_M : PRIV_S; + if (id < PLIC_PLAT_CNTXT_NUM) { + cntxt.hart_id = id / 2; + cntxt.mode = (id % 2) == 0 ? PRIV_M : PRIV_S; } else { - return (struct plic_cntxt){-1}; + return (struct plic_cntxt){ -1 }; } return cntxt; } diff --git a/src/arch/riscv/irqc/plic/vplic.c b/src/arch/riscv/irqc/plic/vplic.c index 3ae93686c..3bc5dcb25 100644 --- a/src/arch/riscv/irqc/plic/vplic.c +++ b/src/arch/riscv/irqc/plic/vplic.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -11,7 +11,7 @@ #include #include -static int vplic_vcntxt_to_pcntxt(struct vcpu *vcpu, int vcntxt_id) +static int vplic_vcntxt_to_pcntxt(struct vcpu* vcpu, int vcntxt_id) { struct plic_cntxt vcntxt = plic_plat_id_to_cntxt(vcntxt_id); struct plic_cntxt pcntxt; @@ -26,76 +26,83 @@ static int vplic_vcntxt_to_pcntxt(struct vcpu *vcpu, int vcntxt_id) return pcntxt_id; } -static bool vplic_vcntxt_valid(struct vcpu *vcpu, int vcntxt_id) { +static bool vplic_vcntxt_valid(struct vcpu* vcpu, int vcntxt_id) +{ struct plic_cntxt vcntxt = plic_plat_id_to_cntxt(vcntxt_id); - return vcntxt_id < vcpu->vm->arch.vplic.cntxt_num && vcntxt.mode <= PRIV_S ; + return vcntxt_id < vcpu->vm->arch.vplic.cntxt_num && vcntxt.mode <= PRIV_S; } static bool vplic_get_pend(struct vcpu* vcpu, irqid_t id) { bool ret = false; - struct vplic * vplic = &vcpu->vm->arch.vplic; - if (id < PLIC_MAX_INTERRUPTS) ret = bitmap_get(vplic->pend, id); + struct vplic* vplic = &vcpu->vm->arch.vplic; + if (id < PLIC_MAX_INTERRUPTS) { + ret = bitmap_get(vplic->pend, id); + } return ret; } static bool vplic_get_act(struct vcpu* vcpu, irqid_t id) { bool ret = false; - struct vplic * vplic = &vcpu->vm->arch.vplic; - if (id < PLIC_MAX_INTERRUPTS) ret = bitmap_get(vplic->act, id); + struct vplic* vplic = &vcpu->vm->arch.vplic; + if (id < PLIC_MAX_INTERRUPTS) { + ret = bitmap_get(vplic->act, id); + } return ret; } static bool vplic_get_enbl(struct vcpu* vcpu, int vcntxt, irqid_t id) { bool ret = false; - struct vplic * vplic = &vcpu->vm->arch.vplic; - if (id < PLIC_MAX_INTERRUPTS) ret = !!bitmap_get(vplic->enbl[vcntxt], id); + struct vplic* vplic = &vcpu->vm->arch.vplic; + if (id < PLIC_MAX_INTERRUPTS) { + ret = !!bitmap_get(vplic->enbl[vcntxt], id); + } return ret; } - -static uint32_t vplic_get_prio(struct vcpu *vcpu, irqid_t id) +static uint32_t vplic_get_prio(struct vcpu* vcpu, irqid_t id) { uint32_t ret = 0; - struct vplic * vplic = &vcpu->vm->arch.vplic; - if (id < PLIC_MAX_INTERRUPTS) ret = vplic->prio[id]; + struct vplic* vplic = &vcpu->vm->arch.vplic; + if (id < PLIC_MAX_INTERRUPTS) { + ret = vplic->prio[id]; + } return ret; } - -void vplic_set_hw(struct vm *vm, irqid_t id) +void vplic_set_hw(struct vm* vm, irqid_t id) { if (id < PLIC_MAX_INTERRUPTS) { - bitmap_set(vm->arch.vplic.hw,id); + bitmap_set(vm->arch.vplic.hw, id); } } static bool vplic_get_hw(struct vcpu* vcpu, irqid_t id) { bool ret = false; - struct vplic * vplic = &vcpu->vm->arch.vplic; - if (id < PLIC_MAX_INTERRUPTS) ret = bitmap_get(vplic->hw, id); + struct vplic* vplic = &vcpu->vm->arch.vplic; + if (id < PLIC_MAX_INTERRUPTS) { + ret = bitmap_get(vplic->hw, id); + } return ret; } static uint32_t vplic_get_threshold(struct vcpu* vcpu, int vcntxt) { - struct vplic * vplic = &vcpu->vm->arch.vplic; + struct vplic* vplic = &vcpu->vm->arch.vplic; return vplic->threshold[vcntxt]; } -static irqid_t vplic_next_pending(struct vcpu *vcpu, int vcntxt) +static irqid_t vplic_next_pending(struct vcpu* vcpu, int vcntxt) { uint32_t max_prio = 0; irqid_t int_id = 0; for (size_t i = 0; i < PLIC_MAX_INTERRUPTS; i++) { - if (vplic_get_pend(vcpu, i) && !vplic_get_act(vcpu, i) && - vplic_get_enbl(vcpu, vcntxt, i)) { - - uint32_t prio = vplic_get_prio(vcpu,i); + if (vplic_get_pend(vcpu, i) && !vplic_get_act(vcpu, i) && vplic_get_enbl(vcpu, vcntxt, i)) { + uint32_t prio = vplic_get_prio(vcpu, i); if (prio > max_prio) { max_prio = prio; int_id = i; @@ -103,45 +110,46 @@ static irqid_t vplic_next_pending(struct vcpu *vcpu, int vcntxt) } } - if (max_prio > vplic_get_threshold(vcpu, vcntxt)) + if (max_prio > vplic_get_threshold(vcpu, vcntxt)) { return int_id; - else + } else { return 0; + } } -enum {UPDATE_HART_LINE}; +enum { UPDATE_HART_LINE }; static void vplic_ipi_handler(uint32_t event, uint64_t data); CPU_MSG_HANDLER(vplic_ipi_handler, VPLIC_IPI_ID); -void vplic_update_hart_line(struct vcpu* vcpu, int vcntxt) +void vplic_update_hart_line(struct vcpu* vcpu, int vcntxt) { int pcntxt_id = vplic_vcntxt_to_pcntxt(vcpu, vcntxt); struct plic_cntxt pcntxt = plic_plat_id_to_cntxt(pcntxt_id); - if(pcntxt.hart_id == cpu()->id) { + if (pcntxt.hart_id == cpu()->id) { int id = vplic_next_pending(vcpu, vcntxt); - if(id != 0){ + if (id != 0) { CSRS(CSR_HVIP, HIP_VSEIP); - } else { + } else { CSRC(CSR_HVIP, HIP_VSEIP); } } else { - struct cpu_msg msg = {VPLIC_IPI_ID, UPDATE_HART_LINE, vcntxt}; - cpu_send_msg(pcntxt.hart_id, &msg); + struct cpu_msg msg = { VPLIC_IPI_ID, UPDATE_HART_LINE, vcntxt }; + cpu_send_msg(pcntxt.hart_id, &msg); } } -static void vplic_ipi_handler(uint32_t event, uint64_t data) +static void vplic_ipi_handler(uint32_t event, uint64_t data) { - switch(event) { + switch (event) { case UPDATE_HART_LINE: vplic_update_hart_line(cpu()->vcpu, data); break; } } -static void vplic_set_threshold(struct vcpu* vcpu, int vcntxt, uint32_t threshold) +static void vplic_set_threshold(struct vcpu* vcpu, int vcntxt, uint32_t threshold) { - struct vplic * vplic = &vcpu->vm->arch.vplic; + struct vplic* vplic = &vcpu->vm->arch.vplic; spin_lock(&vplic->lock); vplic->threshold[vcntxt] = threshold; int pcntxt = vplic_vcntxt_to_pcntxt(vcpu, vcntxt); @@ -153,16 +161,16 @@ static void vplic_set_threshold(struct vcpu* vcpu, int vcntxt, uint32_t threshol static void vplic_set_enbl(struct vcpu* vcpu, int vcntxt, irqid_t id, bool set) { - struct vplic * vplic = &vcpu->vm->arch.vplic; + struct vplic* vplic = &vcpu->vm->arch.vplic; spin_lock(&vplic->lock); if (id < PLIC_MAX_INTERRUPTS && vplic_get_enbl(vcpu, vcntxt, id) != set) { - if(set){ - bitmap_set(vplic->enbl[vcntxt],id); + if (set) { + bitmap_set(vplic->enbl[vcntxt], id); } else { - bitmap_clear(vplic->enbl[vcntxt],id); + bitmap_clear(vplic->enbl[vcntxt], id); } - if(vplic_get_hw(vcpu, id)){ + if (vplic_get_hw(vcpu, id)) { int pcntxt_id = vplic_vcntxt_to_pcntxt(vcpu, vcntxt); plic_set_enbl(pcntxt_id, id, set); } else { @@ -172,18 +180,20 @@ static void vplic_set_enbl(struct vcpu* vcpu, int vcntxt, irqid_t id, bool set) spin_unlock(&vplic->lock); } -static void vplic_set_prio(struct vcpu *vcpu, irqid_t id, uint32_t prio) +static void vplic_set_prio(struct vcpu* vcpu, irqid_t id, uint32_t prio) { - struct vplic *vplic = &vcpu->vm->arch.vplic; + struct vplic* vplic = &vcpu->vm->arch.vplic; spin_lock(&vplic->lock); if (id < PLIC_MAX_INTERRUPTS && vplic_get_prio(vcpu, id) != prio) { vplic->prio[id] = prio; - if(vplic_get_hw(vcpu,id)){ + if (vplic_get_hw(vcpu, id)) { plic_set_prio(id, prio); } else { - for(size_t i = 0; i < vplic->cntxt_num; i++) { - if(plic_plat_id_to_cntxt(i).mode != PRIV_S) continue; - if(vplic_get_enbl(vcpu, i, id)) { + for (size_t i = 0; i < vplic->cntxt_num; i++) { + if (plic_plat_id_to_cntxt(i).mode != PRIV_S) { + continue; + } + if (vplic_get_enbl(vcpu, i, id)) { vplic_update_hart_line(vcpu, i); } } @@ -192,7 +202,7 @@ static void vplic_set_prio(struct vcpu *vcpu, irqid_t id, uint32_t prio) spin_unlock(&vplic->lock); } -static irqid_t vplic_claim(struct vcpu *vcpu, int vcntxt) +static irqid_t vplic_claim(struct vcpu* vcpu, int vcntxt) { spin_lock(&vcpu->vm->arch.vplic.lock); irqid_t int_id = vplic_next_pending(vcpu, vcntxt); @@ -204,9 +214,9 @@ static irqid_t vplic_claim(struct vcpu *vcpu, int vcntxt) return int_id; } -static void vplic_complete(struct vcpu *vcpu, int vcntxt, irqid_t int_id) +static void vplic_complete(struct vcpu* vcpu, int vcntxt, irqid_t int_id) { - if(vplic_get_hw(vcpu ,int_id)){ + if (vplic_get_hw(vcpu, int_id)) { plic_hart[cpu()->arch.plic_cntxt].complete = int_id; } @@ -217,23 +227,24 @@ static void vplic_complete(struct vcpu *vcpu, int vcntxt, irqid_t int_id) vplic_update_hart_line(vcpu, vcntxt); } -void vplic_inject(struct vcpu *vcpu, irqid_t id) +void vplic_inject(struct vcpu* vcpu, irqid_t id) { - struct vplic * vplic = &vcpu->vm->arch.vplic; + struct vplic* vplic = &vcpu->vm->arch.vplic; spin_lock(&vplic->lock); if (id > 0 && id < PLIC_MAX_INTERRUPTS && !vplic_get_pend(vcpu, id)) { - bitmap_set(vplic->pend, id); - if(vplic_get_hw(vcpu, id)) { - struct plic_cntxt vcntxt = {vcpu->id, PRIV_S}; + if (vplic_get_hw(vcpu, id)) { + struct plic_cntxt vcntxt = { vcpu->id, PRIV_S }; int vcntxt_id = plic_plat_cntxt_to_id(vcntxt); vplic_update_hart_line(vcpu, vcntxt_id); } else { - for(size_t i = 0; i < vplic->cntxt_num; i++) { - if(plic_plat_id_to_cntxt(i).mode != PRIV_S) continue; - if(vplic_get_enbl(vcpu, i, id) && - vplic_get_prio(vcpu, id) > vplic_get_threshold(vcpu, i)) { + for (size_t i = 0; i < vplic->cntxt_num; i++) { + if (plic_plat_id_to_cntxt(i).mode != PRIV_S) { + continue; + } + if (vplic_get_enbl(vcpu, i, id) && + vplic_get_prio(vcpu, id) > vplic_get_threshold(vcpu, i)) { vplic_update_hart_line(vcpu, i); } } @@ -242,20 +253,22 @@ void vplic_inject(struct vcpu *vcpu, irqid_t id) spin_unlock(&vplic->lock); } -static void vplic_emul_prio_access(struct emul_access *acc) +static void vplic_emul_prio_access(struct emul_access* acc) { irqid_t int_id = (acc->addr & 0xfff) / 4; if (acc->write) { - vplic_set_prio(cpu()->vcpu,int_id, vcpu_readreg(cpu()->vcpu, acc->reg)); + vplic_set_prio(cpu()->vcpu, int_id, vcpu_readreg(cpu()->vcpu, acc->reg)); } else { - vcpu_writereg(cpu()->vcpu, acc->reg, vplic_get_prio(cpu()->vcpu,int_id)); + vcpu_writereg(cpu()->vcpu, acc->reg, vplic_get_prio(cpu()->vcpu, int_id)); } } -static void vplic_emul_pend_access(struct emul_access *acc) +static void vplic_emul_pend_access(struct emul_access* acc) { // pend registers are read only - if (acc->write) return; + if (acc->write) { + return; + } irqid_t first_int = ((acc->addr & 0xfff) / 4) * 32; @@ -269,20 +282,18 @@ static void vplic_emul_pend_access(struct emul_access *acc) vcpu_writereg(cpu()->vcpu, acc->reg, val); } -static void vplic_emul_enbl_access(struct emul_access *acc) +static void vplic_emul_enbl_access(struct emul_access* acc) { - int vcntxt_id = - (((acc->addr - 0x2000) & 0x1fffff) / 4) / PLIC_NUM_ENBL_REGS; + int vcntxt_id = (((acc->addr - 0x2000) & 0x1fffff) / 4) / PLIC_NUM_ENBL_REGS; irqid_t first_int = ((acc->addr & 0x7f) / 4) * 32; unsigned long val = acc->write ? vcpu_readreg(cpu()->vcpu, acc->reg) : 0; - if(vplic_vcntxt_valid(cpu()->vcpu, vcntxt_id)) { + if (vplic_vcntxt_valid(cpu()->vcpu, vcntxt_id)) { for (size_t i = 0; i < 32; i++) { if (acc->write) { vplic_set_enbl(cpu()->vcpu, vcntxt_id, first_int + i, val & (1U << i)); } else { - val |= (vplic_get_enbl(cpu()->vcpu, vcntxt_id, first_int + i) ? (1U << i) - : 0); + val |= (vplic_get_enbl(cpu()->vcpu, vcntxt_id, first_int + i) ? (1U << i) : 0); } } } @@ -292,10 +303,12 @@ static void vplic_emul_enbl_access(struct emul_access *acc) } } -static bool vplic_global_emul_handler(struct emul_access *acc) +static bool vplic_global_emul_handler(struct emul_access* acc) { // only allow aligned word accesses - if (acc->width != 4 || acc->addr & 0x3) return false; + if (acc->width != 4 || acc->addr & 0x3) { + return false; + } switch ((acc->addr >> 12) & 0x3) { case 0: @@ -312,14 +325,16 @@ static bool vplic_global_emul_handler(struct emul_access *acc) return true; } -static bool vplic_hart_emul_handler(struct emul_access *acc) +static bool vplic_hart_emul_handler(struct emul_access* acc) { // only allow aligned word accesses - if (acc->width > 4 || acc->addr & 0x3) return false; + if (acc->width > 4 || acc->addr & 0x3) { + return false; + } int vcntxt = ((acc->addr - PLIC_THRESHOLD_OFF) >> 12) & 0x3ff; - if(!vplic_vcntxt_valid(cpu()->vcpu, vcntxt)) { - if(!acc->write) { + if (!vplic_vcntxt_valid(cpu()->vcpu, vcntxt)) { + if (!acc->write) { vcpu_writereg(cpu()->vcpu, acc->reg, 0); } return true; @@ -345,27 +360,23 @@ static bool vplic_hart_emul_handler(struct emul_access *acc) return true; } -void vplic_init(struct vm *vm, const union vm_irqc_dscrp *vm_irqc_dscrp) +void vplic_init(struct vm* vm, const union vm_irqc_dscrp* vm_irqc_dscrp) { if (cpu()->id == vm->master) { - - vm->arch.vplic.plic_global_emul = (struct emul_mem) { - .va_base = vm_irqc_dscrp->plic.base, + vm->arch.vplic.plic_global_emul = (struct emul_mem){ .va_base = vm_irqc_dscrp->plic.base, .size = sizeof(struct plic_global_hw), - .handler = vplic_global_emul_handler - }; + .handler = vplic_global_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vplic.plic_global_emul); - vm->arch.vplic.plic_threshold_emul = (struct emul_mem) { - .va_base = vm_irqc_dscrp->plic.base + PLIC_THRESHOLD_OFF, - .size = sizeof(struct plic_hart_hw) * vm->cpu_num * PLAT_PLIC_CNTXT_PER_HART, - .handler = vplic_hart_emul_handler - }; + vm->arch.vplic.plic_threshold_emul = + (struct emul_mem){ .va_base = vm_irqc_dscrp->plic.base + PLIC_THRESHOLD_OFF, + .size = sizeof(struct plic_hart_hw) * vm->cpu_num * PLAT_PLIC_CNTXT_PER_HART, + .handler = vplic_hart_emul_handler }; vm_emul_add_mem(vm, &vm->arch.vplic.plic_threshold_emul); /* assumes 2 contexts per hart */ - vm->arch.vplic.cntxt_num = vm->cpu_num * 2; + vm->arch.vplic.cntxt_num = vm->cpu_num * 2; } } diff --git a/src/arch/riscv/mem.c b/src/arch/riscv/mem.c index 8c1927616..bb5eb8a2e 100644 --- a/src/arch/riscv/mem.c +++ b/src/arch/riscv/mem.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -8,20 +8,20 @@ #include #include -static inline void as_map_physical_identity(struct addr_space *as) { +static inline void as_map_physical_identity(struct addr_space* as) +{ const size_t lvl = 0; size_t lvl_size = pt_lvlsize(&as->pt, lvl); paddr_t lvl_mask = ~((paddr_t)lvl_size - 1); - pte_t *pt = as->pt.root; + pte_t* pt = as->pt.root; /** - * Create identity mapping of existing physical memory regions using - * the largest pages possible pte (in riscv this is always at level 0 - * pt). + * Create identity mapping of existing physical memory regions using the largest pages + * possible pte (in riscv this is always at level 0 pt). */ for (size_t i = 0; i < platform.region_num; i++) { - struct mem_region *reg = &platform.regions[i]; + struct mem_region* reg = &platform.regions[i]; paddr_t base = reg->base & lvl_mask; paddr_t top = ALIGN((reg->base + reg->size), lvl_size) & lvl_mask; size_t num_entries = ((top - base - 1) / lvl_size) + 1; @@ -35,15 +35,14 @@ static inline void as_map_physical_identity(struct addr_space *as) { } } -void as_arch_init(struct addr_space *as) { - - if(as->type == AS_HYP) { +void as_arch_init(struct addr_space* as) +{ + if (as->type == AS_HYP) { as_map_physical_identity(as); } - } -bool mem_translate(struct addr_space *as, vaddr_t va, paddr_t *pa) +bool mem_translate(struct addr_space* as, vaddr_t va, paddr_t* pa) { size_t pte_index = pt_getpteindex_by_va(&as->pt, va, 0); pte_t* pte = &(as->pt.root[pte_index]); @@ -51,10 +50,10 @@ bool mem_translate(struct addr_space *as, vaddr_t va, paddr_t *pa) for (size_t i = 0; i < as->pt.dscr->lvls; i++) { if (!pte_valid(pte) || !pte_table(&as->pt, pte, i)) { lvl = i; - break; + break; } pte = (pte_t*)pte_addr(pte); - int index = pt_getpteindex_by_va(&as->pt, va, i+1); + int index = pt_getpteindex_by_va(&as->pt, va, i + 1); pte = &pte[index]; } if (pte && pte_valid(pte)) { diff --git a/src/arch/riscv/page_table.c b/src/arch/riscv/page_table.c index 454dc57e2..802dcba54 100644 --- a/src/arch/riscv/page_table.c +++ b/src/arch/riscv/page_table.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -7,25 +7,32 @@ #include #if (SV32) -struct page_table_dscr sv32_pt_dscr = {.lvls = 2, - .lvl_wdt = (size_t[]){32, 22}, - .lvl_off = (size_t[]){22, 12}, - .lvl_term = (bool[]){true, true}}; -struct page_table_dscr sv32x4_pt_dscr = {.lvls = 2, - .lvl_wdt = (size_t[]){34, 22}, - .lvl_off = (size_t[]){22, 12}, - .lvl_term = (bool[]){true, true}}; +struct page_table_dscr sv32_pt_dscr = { .lvls = 2, + .lvl_wdt = (size_t[]){ 32, 22 }, + .lvl_off = (size_t[]){ 22, 12 }, + .lvl_term = (bool[]){ true, true } }, + ; +struct page_table_dscr sv32x4_pt_dscr = { + .lvls = 2, + .lvl_wdt = (size_t[]){ 34, 22 }, + .lvl_off = (size_t[]){ 22, 12 }, + .lvl_term = (bool[]){ true, true }, +}; struct page_table_dscr* hyp_pt_dscr = &sv32_pt_dscr; struct page_table_dscr* vm_pt_dscr = &sv32x2_pt_dscr; #elif (RV64) -struct page_table_dscr sv39_pt_dscr = {.lvls = 3, - .lvl_wdt = (size_t[]){39, 30, 21}, - .lvl_off = (size_t[]){30, 21, 12}, - .lvl_term = (bool[]){true, true, true}}; -struct page_table_dscr sv39x4_pt_dscr = {.lvls = 3, - .lvl_wdt = (size_t[]){41, 30, 21}, - .lvl_off = (size_t[]){30, 21, 12}, - .lvl_term = (bool[]){true, true, true}}; +struct page_table_dscr sv39_pt_dscr = { + .lvls = 3, + .lvl_wdt = (size_t[]){ 39, 30, 21 }, + .lvl_off = (size_t[]){ 30, 21, 12 }, + .lvl_term = (bool[]){ true, true, true }, +}; +struct page_table_dscr sv39x4_pt_dscr = { + .lvls = 3, + .lvl_wdt = (size_t[]){ 41, 30, 21 }, + .lvl_off = (size_t[]){ 30, 21, 12 }, + .lvl_term = (bool[]){ true, true, true }, +}; struct page_table_dscr* hyp_pt_dscr = &sv39_pt_dscr; struct page_table_dscr* vm_pt_dscr = &sv39x4_pt_dscr; #endif @@ -36,7 +43,9 @@ pte_t* pt_get_pte(struct page_table* pt, size_t lvl, vaddr_t va) pte_t* pte = &(pt->root[pte_index]); for (size_t i = 0; i < lvl; i++) { - if (!pte_valid(pte)) return NULL; + if (!pte_valid(pte)) { + return NULL; + } pte = (pte_t*)pte_addr(pte); size_t index = pt_getpteindex_by_va(pt, va, i + 1); pte = &pte[index]; diff --git a/src/arch/riscv/relocate.S b/src/arch/riscv/relocate.S index 4d1a5d943..fbe0c94a2 100644 --- a/src/arch/riscv/relocate.S +++ b/src/arch/riscv/relocate.S @@ -60,9 +60,8 @@ switch_space: csrw satp, a1 /** - * Invalidate TLB: we can do this directly here without sbi support - * because we don't really need any shootdown as all harts must go - * through here. + * Invalidate TLB: we can do this directly here without sbi support because we don't really + * need any shootdown as all harts must go through here. */ sfence.vma diff --git a/src/arch/riscv/sbi.c b/src/arch/riscv/sbi.c index 39b120c6f..e880d4118 100644 --- a/src/arch/riscv/sbi.c +++ b/src/arch/riscv/sbi.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -11,43 +11,43 @@ #include #include -#define SBI_EXTID_BASE (0x10) -#define SBI_GET_SBI_SPEC_VERSION_FID (0) -#define SBI_GET_SBI_IMPL_ID_FID (1) -#define SBI_GET_SBI_IMPL_VERSION_FID (2) -#define SBI_PROBE_EXTENSION_FID (3) -#define SBI_GET_MVENDORID_FID (4) -#define SBI_GET_MARCHID_FID (5) -#define SBI_GET_MIMPID_FID (6) - -#define SBI_EXTID_TIME (0x54494D45) -#define SBI_SET_TIMER_FID (0x0) - -#define SBI_EXTID_IPI (0x735049) -#define SBI_SEND_IPI_FID (0x0) - -#define SBI_EXTID_HSM (0x48534D) -#define SBI_HART_START_FID (0) -#define SBI_HART_STOP_FID (1) -#define SBI_HART_STATUS_FID (2) - -#define SBI_EXTID_RFNC (0x52464E43) -#define SBI_REMOTE_FENCE_I_FID (0) -#define SBI_REMOTE_SFENCE_VMA_FID (1) -#define SBI_REMOTE_SFENCE_VMA_ASID_FID (2) -#define SBI_REMOTE_HFENCE_GVMA_FID (3) +#define SBI_EXTID_BASE (0x10) +#define SBI_GET_SBI_SPEC_VERSION_FID (0) +#define SBI_GET_SBI_IMPL_ID_FID (1) +#define SBI_GET_SBI_IMPL_VERSION_FID (2) +#define SBI_PROBE_EXTENSION_FID (3) +#define SBI_GET_MVENDORID_FID (4) +#define SBI_GET_MARCHID_FID (5) +#define SBI_GET_MIMPID_FID (6) + +#define SBI_EXTID_TIME (0x54494D45) +#define SBI_SET_TIMER_FID (0x0) + +#define SBI_EXTID_IPI (0x735049) +#define SBI_SEND_IPI_FID (0x0) + +#define SBI_EXTID_HSM (0x48534D) +#define SBI_HART_START_FID (0) +#define SBI_HART_STOP_FID (1) +#define SBI_HART_STATUS_FID (2) + +#define SBI_EXTID_RFNC (0x52464E43) +#define SBI_REMOTE_FENCE_I_FID (0) +#define SBI_REMOTE_SFENCE_VMA_FID (1) +#define SBI_REMOTE_SFENCE_VMA_ASID_FID (2) +#define SBI_REMOTE_HFENCE_GVMA_FID (3) #define SBI_REMOTE_HFENCE_GVMA_VMID_FID (4) -#define SBI_REMOTE_HFENCE_VVMA_FID (5) +#define SBI_REMOTE_HFENCE_VVMA_FID (5) #define SBI_REMOTE_HFENCE_VVMA_ASID_FID (6) /** - * For now we're defining bao specific ecalls, ie, hypercall, under the - * experimental extension id space. + * For now we're defining bao specific ecalls, ie, hypercall, under the experimental extension id + * space. */ -#define SBI_EXTID_BAO (0x08000ba0) +#define SBI_EXTID_BAO (0x08000ba0) -static inline struct sbiret sbi_ecall(long eid, long fid, long a0, long a1, - long a2, long a3, long a4, long a5) +static inline struct sbiret sbi_ecall(long eid, long fid, long a0, long a1, long a2, long a3, + long a4, long a5) { long register _a0 asm("a0") = a0; long register _a1 asm("a1") = a1; @@ -58,12 +58,10 @@ static inline struct sbiret sbi_ecall(long eid, long fid, long a0, long a1, long register _a6 asm("a6") = fid; long register _a7 asm("a7") = eid; - asm volatile("ecall" - : "+r"(_a0), "+r"(_a1) - : "r"(_a2), "r"(_a3), "r"(_a4), "r"(_a5), "r"(_a6), "r"(_a7) - : "memory"); + asm volatile("ecall" : "+r"(_a0), "+r"(_a1) : "r"(_a2), "r"(_a3), "r"(_a4), "r"(_a5), "r"(_a6), + "r"(_a7) : "memory"); - struct sbiret ret = {.error = _a0, .value = _a1}; + struct sbiret ret = { .error = _a0, .value = _a1 }; return ret; } @@ -75,8 +73,7 @@ void sbi_console_putchar(int ch) struct sbiret sbi_get_spec_version(void) { - return sbi_ecall(SBI_EXTID_BASE, SBI_GET_SBI_SPEC_VERSION_FID, 0, 0, 0, 0, - 0, 0); + return sbi_ecall(SBI_EXTID_BASE, SBI_GET_SBI_SPEC_VERSION_FID, 0, 0, 0, 0, 0, 0); } struct sbiret sbi_get_impl_id(void) { @@ -84,13 +81,11 @@ struct sbiret sbi_get_impl_id(void) } struct sbiret sbi_get_impl_version(void) { - return sbi_ecall(SBI_EXTID_BASE, SBI_GET_SBI_IMPL_VERSION_FID, 0, 0, 0, 0, - 0, 0); + return sbi_ecall(SBI_EXTID_BASE, SBI_GET_SBI_IMPL_VERSION_FID, 0, 0, 0, 0, 0, 0); } struct sbiret sbi_probe_extension(long extension_id) { - return sbi_ecall(SBI_EXTID_BASE, SBI_PROBE_EXTENSION_FID, extension_id, 0, - 0, 0, 0, 0); + return sbi_ecall(SBI_EXTID_BASE, SBI_PROBE_EXTENSION_FID, extension_id, 0, 0, 0, 0, 0); } struct sbiret sbi_get_mvendorid(void) { @@ -105,97 +100,73 @@ struct sbiret sbi_get_mimpid(void) return sbi_ecall(SBI_EXTID_BASE, SBI_GET_MIMPID_FID, 0, 0, 0, 0, 0, 0); } -struct sbiret sbi_send_ipi(const unsigned long hart_mask, - unsigned long hart_mask_base) +struct sbiret sbi_send_ipi(const unsigned long hart_mask, unsigned long hart_mask_base) { - return sbi_ecall(SBI_EXTID_IPI, SBI_SEND_IPI_FID, hart_mask, hart_mask_base, - 0, 0, 0, 0); + return sbi_ecall(SBI_EXTID_IPI, SBI_SEND_IPI_FID, hart_mask, hart_mask_base, 0, 0, 0, 0); } struct sbiret sbi_set_timer(uint64_t stime_value) { - return sbi_ecall(SBI_EXTID_TIME, SBI_SET_TIMER_FID, stime_value, 0, 0, 0, 0, - 0); + return sbi_ecall(SBI_EXTID_TIME, SBI_SET_TIMER_FID, stime_value, 0, 0, 0, 0, 0); } -struct sbiret sbi_remote_fence_i(const unsigned long hart_mask, - unsigned long hart_mask_base) +struct sbiret sbi_remote_fence_i(const unsigned long hart_mask, unsigned long hart_mask_base) { - return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_FENCE_I_FID, hart_mask, - hart_mask_base, 0, 0, 0, 0); + return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_FENCE_I_FID, hart_mask, hart_mask_base, 0, 0, 0, 0); } -struct sbiret sbi_remote_sfence_vma(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size) +struct sbiret sbi_remote_sfence_vma(const unsigned long hart_mask, unsigned long hart_mask_base, + unsigned long start_addr, unsigned long size) { - return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_SFENCE_VMA_FID, hart_mask, - hart_mask_base, start_addr, size, 0, 0); + return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_SFENCE_VMA_FID, hart_mask, hart_mask_base, + start_addr, size, 0, 0); } -struct sbiret sbi_remote_hfence_gvma(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size) +struct sbiret sbi_remote_hfence_gvma(const unsigned long hart_mask, unsigned long hart_mask_base, + unsigned long start_addr, unsigned long size) { - return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_GVMA_FID, hart_mask, - hart_mask_base, start_addr, size, 0, 0); + return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_GVMA_FID, hart_mask, hart_mask_base, + start_addr, size, 0, 0); } struct sbiret sbi_remote_hfence_gvma_vmid(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size, - unsigned long vmid) + unsigned long hart_mask_base, unsigned long start_addr, unsigned long size, unsigned long vmid) { - return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_GVMA_VMID_FID, hart_mask, - hart_mask_base, start_addr, size, vmid, 0); + return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_GVMA_VMID_FID, hart_mask, hart_mask_base, + start_addr, size, vmid, 0); } struct sbiret sbi_remote_hfence_vvma_asid(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size, - unsigned long asid) + unsigned long hart_mask_base, unsigned long start_addr, unsigned long size, unsigned long asid) { - return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_VVMA_ASID_FID, hart_mask, - hart_mask_base, start_addr, size, asid, 0); + return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_VVMA_ASID_FID, hart_mask, hart_mask_base, + start_addr, size, asid, 0); } -struct sbiret sbi_remote_hfence_vvma(const unsigned long hart_mask, - unsigned long hart_mask_base, - unsigned long start_addr, - unsigned long size) +struct sbiret sbi_remote_hfence_vvma(const unsigned long hart_mask, unsigned long hart_mask_base, + unsigned long start_addr, unsigned long size) { - return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_VVMA_FID, hart_mask, - hart_mask_base, start_addr, size, 0, 0); + return sbi_ecall(SBI_EXTID_RFNC, SBI_REMOTE_HFENCE_VVMA_FID, hart_mask, hart_mask_base, + start_addr, size, 0, 0); } -struct sbiret sbi_hart_start(unsigned long hartid, unsigned long start_addr, - unsigned long priv) +struct sbiret sbi_hart_start(unsigned long hartid, unsigned long start_addr, unsigned long priv) { - return sbi_ecall(SBI_EXTID_HSM, SBI_HART_START_FID, hartid, - start_addr, priv, 0, 0, 0); + return sbi_ecall(SBI_EXTID_HSM, SBI_HART_START_FID, hartid, start_addr, priv, 0, 0, 0); } struct sbiret sbi_hart_stop() { - return sbi_ecall(SBI_EXTID_HSM, SBI_HART_STOP_FID, 0, - 0, 0, 0, 0, 0); + return sbi_ecall(SBI_EXTID_HSM, SBI_HART_STOP_FID, 0, 0, 0, 0, 0, 0); } struct sbiret sbi_hart_status(unsigned long hartid) { - return sbi_ecall(SBI_EXTID_HSM, SBI_HART_STATUS_FID, hartid, - 0, 0, 0, 0, 0); + return sbi_ecall(SBI_EXTID_HSM, SBI_HART_STATUS_FID, hartid, 0, 0, 0, 0, 0); } -static unsigned long ext_table[] = {SBI_EXTID_BASE, - SBI_EXTID_TIME, - SBI_EXTID_IPI, - SBI_EXTID_RFNC, - SBI_EXTID_HSM}; +static unsigned long ext_table[] = { SBI_EXTID_BASE, SBI_EXTID_TIME, SBI_EXTID_IPI, SBI_EXTID_RFNC, + SBI_EXTID_HSM }; static const size_t NUM_EXT = sizeof(ext_table) / sizeof(unsigned long); @@ -212,11 +183,11 @@ void sbi_msg_handler(uint32_t event, uint64_t data) break; case HART_START: { spin_lock(&cpu()->vcpu->arch.sbi_ctx.lock); - if(cpu()->vcpu->arch.sbi_ctx.state == START_PENDING) { + if (cpu()->vcpu->arch.sbi_ctx.state == START_PENDING) { vcpu_arch_reset(cpu()->vcpu, cpu()->vcpu->arch.sbi_ctx.start_addr); - vcpu_writereg(cpu()->vcpu, REG_A1, cpu()->vcpu->arch.sbi_ctx.priv); + vcpu_writereg(cpu()->vcpu, REG_A1, cpu()->vcpu->arch.sbi_ctx.priv); cpu()->vcpu->arch.sbi_ctx.state = STARTED; - } + } spin_unlock(&cpu()->vcpu->arch.sbi_ctx.lock); } break; default: @@ -227,18 +198,20 @@ void sbi_msg_handler(uint32_t event, uint64_t data) struct sbiret sbi_time_handler(unsigned long fid) { - if (fid != SBI_SET_TIMER_FID) return (struct sbiret){SBI_ERR_NOT_SUPPORTED}; + if (fid != SBI_SET_TIMER_FID) { + return (struct sbiret){ SBI_ERR_NOT_SUPPORTED }; + } uint64_t stime_value = vcpu_readreg(cpu()->vcpu, REG_A0); - if(CPU_HAS_EXTENSION(CPU_EXT_SSTC)) { + if (CPU_HAS_EXTENSION(CPU_EXT_SSTC)) { CSRW(CSR_VSTIMECMP, stime_value); } else { - sbi_set_timer(stime_value); // assumes always success + sbi_set_timer(stime_value); // assumes always success CSRC(CSR_HVIP, HIP_VSTIP); CSRS(sie, SIE_STIE); } - return (struct sbiret){SBI_SUCCESS}; + return (struct sbiret){ SBI_SUCCESS }; } void sbi_timer_irq_handler() @@ -249,7 +222,9 @@ void sbi_timer_irq_handler() struct sbiret sbi_ipi_handler(unsigned long fid) { - if (fid != SBI_SEND_IPI_FID) return (struct sbiret){SBI_ERR_NOT_SUPPORTED}; + if (fid != SBI_SEND_IPI_FID) { + return (struct sbiret){ SBI_ERR_NOT_SUPPORTED }; + } unsigned long hart_mask = vcpu_readreg(cpu()->vcpu, REG_A0); unsigned long hart_mask_base = vcpu_readreg(cpu()->vcpu, REG_A1); @@ -263,16 +238,18 @@ struct sbiret sbi_ipi_handler(unsigned long fid) if (bit_get(hart_mask, i)) { vcpuid_t vhart_id = hart_mask_base + i; cpuid_t phart_id = vm_translate_to_pcpuid(cpu()->vcpu->vm, vhart_id); - if(phart_id != INVALID_CPUID) cpu_send_msg(phart_id, &msg); + if (phart_id != INVALID_CPUID) { + cpu_send_msg(phart_id, &msg); + } } } - return (struct sbiret){SBI_SUCCESS}; + return (struct sbiret){ SBI_SUCCESS }; } struct sbiret sbi_base_handler(unsigned long fid) { - struct sbiret ret = {.error = SBI_SUCCESS}; + struct sbiret ret = { .error = SBI_SUCCESS }; unsigned long extid = vcpu_readreg(cpu()->vcpu, REG_A0); switch (fid) { @@ -308,17 +285,16 @@ struct sbiret sbi_rfence_handler(unsigned long fid) unsigned long asid = vcpu_readreg(cpu()->vcpu, REG_A4); const size_t hart_mask_width = sizeof(hart_mask) * 8; - if ((hart_mask_base != 0) && ((hart_mask_base >= hart_mask_width) || - ((hart_mask << hart_mask_base) == 0))) - { + if ((hart_mask_base != 0) && + ((hart_mask_base >= hart_mask_width) || ((hart_mask << hart_mask_base) == 0))) { WARNING("sbi invalid hart_mask"); - return (struct sbiret){SBI_ERR_INVALID_PARAM}; + return (struct sbiret){ SBI_ERR_INVALID_PARAM }; } hart_mask = hart_mask << hart_mask_base; - unsigned long phart_mask = vm_translate_to_pcpu_mask( - cpu()->vcpu->vm, hart_mask, sizeof(hart_mask) * 8); + unsigned long phart_mask = + vm_translate_to_pcpu_mask(cpu()->vcpu->vm, hart_mask, sizeof(hart_mask) * 8); switch (fid) { case SBI_REMOTE_FENCE_I_FID: @@ -337,18 +313,18 @@ struct sbiret sbi_rfence_handler(unsigned long fid) return ret; } -struct sbiret sbi_hsm_start_handler() { - +struct sbiret sbi_hsm_start_handler() +{ struct sbiret ret; vcpuid_t vhart_id = vcpu_readreg(cpu()->vcpu, REG_A0); - - if(vhart_id == cpu()->vcpu->id){ + + if (vhart_id == cpu()->vcpu->id) { ret.error = SBI_ERR_ALREADY_AVAILABLE; } else { - struct vcpu *vcpu = vm_get_vcpu(cpu()->vcpu->vm, vhart_id); - if(vcpu == NULL) { + struct vcpu* vcpu = vm_get_vcpu(cpu()->vcpu->vm, vhart_id); + if (vcpu == NULL) { ret.error = SBI_ERR_INVALID_PARAM; - } else { + } else { spin_lock(&vcpu->arch.sbi_ctx.lock); if (vcpu->arch.sbi_ctx.state == STARTED) { ret.error = SBI_ERR_ALREADY_AVAILABLE; @@ -366,26 +342,26 @@ struct sbiret sbi_hsm_start_handler() { struct cpu_msg msg = { .handler = SBI_MSG_ID, .event = HART_START, - .data = 0xdeadbeef + .data = 0xdeadbeef, }; cpu_send_msg(vcpu->phys_id, &msg); - - ret.error = SBI_SUCCESS; + + ret.error = SBI_SUCCESS; } spin_unlock(&vcpu->arch.sbi_ctx.lock); - } - } + } + } return ret; } -struct sbiret sbi_hsm_status_handler() { - +struct sbiret sbi_hsm_status_handler() +{ struct sbiret ret; vcpuid_t vhart_id = vcpu_readreg(cpu()->vcpu, REG_A0); - struct vcpu *vhart = vm_get_vcpu(cpu()->vcpu->vm, vhart_id); + struct vcpu* vhart = vm_get_vcpu(cpu()->vcpu->vm, vhart_id); - if(vhart != NULL) { + if (vhart != NULL) { ret.error = SBI_SUCCESS; ret.value = vhart->arch.sbi_ctx.state; } else { @@ -395,31 +371,31 @@ struct sbiret sbi_hsm_status_handler() { return ret; } -struct sbiret sbi_hsm_handler(unsigned long fid){ - +struct sbiret sbi_hsm_handler(unsigned long fid) +{ struct sbiret ret; - switch(fid) { + switch (fid) { case SBI_HART_START_FID: ret = sbi_hsm_start_handler(); - break; + break; case SBI_HART_STATUS_FID: - ret = sbi_hsm_status_handler(); - break; + ret = sbi_hsm_status_handler(); + break; default: ret.error = SBI_ERR_NOT_SUPPORTED; - } + } - return ret; + return ret; } -struct sbiret sbi_bao_handler(unsigned long fid){ - - struct sbiret ret; +struct sbiret sbi_bao_handler(unsigned long fid) +{ + struct sbiret ret; - ret.error = hypercall(fid); + ret.error = hypercall(fid); - return ret; + return ret; } size_t sbi_vs_handler() @@ -448,8 +424,7 @@ size_t sbi_vs_handler() ret = sbi_bao_handler(fid); break; default: - WARNING("guest issued unsupport sbi extension call (%d)", - extid); + WARNING("guest issued unsupport sbi extension call (%d)", extid); ret.value = SBI_ERR_NOT_SUPPORTED; } @@ -476,7 +451,7 @@ void sbi_init() } } - if(!interrupts_reserve(TIMR_INT_ID, sbi_timer_irq_handler)) { + if (!interrupts_reserve(TIMR_INT_ID, sbi_timer_irq_handler)) { ERROR("Failed to reserve SBI TIMR_INT_ID interrupt"); } } diff --git a/src/arch/riscv/sync_exceptions.c b/src/arch/riscv/sync_exceptions.c index 6f6056b86..28debf687 100644 --- a/src/arch/riscv/sync_exceptions.c +++ b/src/arch/riscv/sync_exceptions.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -10,9 +10,9 @@ #include #include -void internal_exception_handler(unsigned long gprs[]) { - - for(int i = 0; i < 31; i++) { +void internal_exception_handler(unsigned long gprs[]) +{ + for (int i = 0; i < 31; i++) { console_printk("x%d:\t\t0x%0lx\n", i, gprs[i]); } console_printk("sstatus:\t0x%0lx\n", CSRR(sstatus)); @@ -25,13 +25,13 @@ static uint32_t read_ins(uintptr_t ins_addr) { uint32_t ins = 0; - if(ins_addr & 0b1) { + if (ins_addr & 0b1) { ERROR("trying to read guest unaligned instruction"); } /** - * Read 16 bits at a time to make sure the access is aligned. If - * the instruction is not compressed, read the following 16-bits. + * Read 16 bits at a time to make sure the access is aligned. If the instruction is not + * compressed, read the following 16-bits. */ ins = hlvxhu(ins_addr); if ((ins & 0b11) == 3) { @@ -45,11 +45,10 @@ typedef size_t (*sync_handler_t)(); extern size_t sbi_vs_handler(); -static inline bool ins_ldst_decode(vaddr_t ins, struct emul_access *emul) +static inline bool ins_ldst_decode(vaddr_t ins, struct emul_access* emul) { if (INS_COMPRESSED(ins)) { - if (INS_C_OPCODE(ins) != MATCH_C_LOAD && - INS_C_OPCODE(ins) != MATCH_C_STORE) { + if (INS_C_OPCODE(ins) != MATCH_C_LOAD && INS_C_OPCODE(ins) != MATCH_C_STORE) { return false; } @@ -64,9 +63,7 @@ static inline bool ins_ldst_decode(vaddr_t ins, struct emul_access *emul) } unsigned funct3 = INS_FUNCT3(ins); - emul->width = (funct3 & 3) == 0 - ? 1 - : (funct3 & 3) == 1 ? 2 : (funct3 & 3) == 2 ? 4 : 8; + emul->width = (funct3 & 3) == 0 ? 1 : (funct3 & 3) == 1 ? 2 : (funct3 & 3) == 2 ? 4 : 8; emul->reg_width = REGLEN; emul->write = (INS_OPCODE(ins) == MATCH_STORE); emul->reg = emul->write ? INS_RS2(ins) : INS_RD(ins); @@ -76,7 +73,8 @@ static inline bool ins_ldst_decode(vaddr_t ins, struct emul_access *emul) return true; } -static inline bool is_pseudo_ins(uint32_t ins) { +static inline bool is_pseudo_ins(uint32_t ins) +{ return ins == TINST_PSEUDO_STORE || ins == TINST_PSEUDO_LOAD; } @@ -86,26 +84,23 @@ size_t guest_page_fault_handler() emul_handler_t handler = vm_emul_get_mem(cpu()->vcpu->vm, addr); if (handler != NULL) { - unsigned long ins = CSRR(CSR_HTINST); size_t ins_size; - if(ins == 0) { + if (ins == 0) { /** - * If htinst does not provide information about the trap, - * we must read the instruction from the guest's memory - * manually. + * If htinst does not provide information about the trap, we must read the instruction + * from the guest's memory manually. */ vaddr_t ins_addr = CSRR(sepc); ins = read_ins(ins_addr); ins_size = INS_SIZE(ins); } else if (is_pseudo_ins(ins)) { - //TODO: we should reinject this in the guest as a fault access + // TODO: we should reinject this in the guest as a fault access ERROR("fault on 1st stage page table walk"); } else { /** - * If htinst is valid and is not a pseudo isntruction make sure - * the opcode is valid even if it was a compressed instruction, - * but before save the real instruction size. + * If htinst is valid and is not a pseudo isntruction make sure the opcode is valid + * even if it was a compressed instruction, but before save the real instruction size. */ ins_size = TINST_INS_SIZE(ins); ins = ins | 0b10; @@ -118,8 +113,7 @@ size_t guest_page_fault_handler() emul.addr = addr; /** - * TODO: check if the access is aligned. - * If not, inject an exception in the vm. + * TODO: check if the access is aligned. If not, inject an exception in the vm. */ if (handler(&emul)) { @@ -138,20 +132,18 @@ sync_handler_t sync_handler_table[] = { [SCAUSE_CODE_SGPF] = guest_page_fault_handler, }; -static const size_t sync_handler_table_size = - sizeof(sync_handler_table) / sizeof(sync_handler_t); +static const size_t sync_handler_table_size = sizeof(sync_handler_table) / sizeof(sync_handler_t); void sync_exception_handler() { size_t pc_step = 0; unsigned long _scause = CSRR(scause); - if(!(CSRR(CSR_HSTATUS) & HSTATUS_SPV)) { + if (!(CSRR(CSR_HSTATUS) & HSTATUS_SPV)) { internal_exception_handler(&cpu()->vcpu->regs.x[0]); } - // TODO: Do we need to check call comes from VS-mode and not VU-mode - // or U-mode ? + // TODO: Do we need to check call comes from VS-mode and not VU-mode or U-mode ? if (_scause < sync_handler_table_size && sync_handler_table[_scause]) { pc_step = sync_handler_table[_scause](); diff --git a/src/arch/riscv/vm.c b/src/arch/riscv/vm.c index d9b96e078..e29b46316 100644 --- a/src/arch/riscv/vm.c +++ b/src/arch/riscv/vm.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -11,37 +11,38 @@ #include #include -void vm_arch_init(struct vm *vm, const struct vm_config *config) +void vm_arch_init(struct vm* vm, const struct vm_config* config) { paddr_t root_pt_pa; mem_translate(&cpu()->as, (vaddr_t)vm->as.pt.root, &root_pt_pa); unsigned long hgatp = (root_pt_pa >> PAGE_SHIFT) | (HGATP_MODE_DFLT) | - ((vm->id << HGATP_VMID_OFF) & HGATP_VMID_MSK); + ((vm->id << HGATP_VMID_OFF) & HGATP_VMID_MSK); CSRW(CSR_HGATP, hgatp); virqc_init(vm, &config->platform.arch.irqc); } -void vcpu_arch_init(struct vcpu *vcpu, struct vm *vm) { +void vcpu_arch_init(struct vcpu* vcpu, struct vm* vm) +{ vcpu->arch.sbi_ctx.lock = SPINLOCK_INITVAL; - vcpu->arch.sbi_ctx.state = vcpu->id == 0 ? STARTED : STOPPED; + vcpu->arch.sbi_ctx.state = vcpu->id == 0 ? STARTED : STOPPED; } -void vcpu_arch_reset(struct vcpu *vcpu, vaddr_t entry) +void vcpu_arch_reset(struct vcpu* vcpu, vaddr_t entry) { memset(&vcpu->regs, 0, sizeof(struct arch_regs)); - + CSRW(sscratch, &vcpu->regs); vcpu->regs.hstatus = HSTATUS_SPV | HSTATUS_VSXL_64; vcpu->regs.sstatus = SSTATUS_SPP_BIT | SSTATUS_FS_DIRTY | SSTATUS_XS_DIRTY; vcpu->regs.sepc = entry; vcpu->regs.a0 = vcpu->arch.hart_id = vcpu->id; - vcpu->regs.a1 = 0; // according to sbi it should be the dtb load address + vcpu->regs.a1 = 0; // according to sbi it should be the dtb load address - if(CPU_HAS_EXTENSION(CPU_EXT_SSTC)) { + if (CPU_HAS_EXTENSION(CPU_EXT_SSTC)) { CSRW(CSR_STIMECMP, -1); CSRS(CSR_HENVCFG, HENVCFG_STCE); } else { @@ -60,34 +61,37 @@ void vcpu_arch_reset(struct vcpu *vcpu, vaddr_t entry) CSRW(CSR_VSATP, 0); } -unsigned long vcpu_readreg(struct vcpu *vcpu, unsigned long reg) +unsigned long vcpu_readreg(struct vcpu* vcpu, unsigned long reg) { - if ((reg <= 0) || (reg > 31)) return 0; + if ((reg <= 0) || (reg > 31)) { + return 0; + } return vcpu->regs.x[reg - 1]; } -void vcpu_writereg(struct vcpu *vcpu, unsigned long reg, unsigned long val) +void vcpu_writereg(struct vcpu* vcpu, unsigned long reg, unsigned long val) { - if ((reg <= 0) || (reg > 31)) return; + if ((reg <= 0) || (reg > 31)) { + return; + } vcpu->regs.x[reg - 1] = val; } -unsigned long vcpu_readpc(struct vcpu *vcpu) +unsigned long vcpu_readpc(struct vcpu* vcpu) { return vcpu->regs.sepc; } -void vcpu_writepc(struct vcpu *vcpu, unsigned long pc) +void vcpu_writepc(struct vcpu* vcpu, unsigned long pc) { vcpu->regs.sepc = pc; } -void vcpu_arch_run(struct vcpu *vcpu){ - - if(vcpu->arch.sbi_ctx.state == STARTED){ +void vcpu_arch_run(struct vcpu* vcpu) +{ + if (vcpu->arch.sbi_ctx.state == STARTED) { vcpu_arch_entry(); } else { cpu_idle(); - } - + } } diff --git a/src/arch/riscv/vmm.c b/src/arch/riscv/vmm.c index 500300130..45826ee04 100644 --- a/src/arch/riscv/vmm.c +++ b/src/arch/riscv/vmm.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -9,22 +9,19 @@ void vmm_arch_init() { /** - * At this point, we should make sure misa's H bit is set (at least by - * reading it). However, current SBI does not allow us to even read it. - * So we assume it is set - if not, the first acess to an hypervisor - * register will set an illegal inst fault. + * At this point, we should make sure misa's H bit is set (at least by reading it). However, + * current SBI does not allow us to even read it. So we assume it is set - if not, the first + * acess to an hypervisor register will set an illegal inst fault. */ /** - * Delegate all interrupts and exceptions not meant to be dealt by - * the hypervisor + * Delegate all interrupts and exceptions not meant to be dealt by the hypervisor */ CSRW(CSR_HIDELEG, HIDELEG_VSSI | HIDELEG_VSTI | HIDELEG_VSEI); CSRW(CSR_HEDELEG, HEDELEG_ECU | HEDELEG_IPF | HEDELEG_LPF | HEDELEG_SPF); /** - * TODO: consider delegating other exceptions e.g. breakpoint or ins - * misaligned + * TODO: consider delegating other exceptions e.g. breakpoint or ins misaligned */ } diff --git a/src/core/cache.c b/src/core/cache.c index 600859d00..48764b111 100644 --- a/src/core/cache.c +++ b/src/core/cache.c @@ -19,18 +19,17 @@ static void cache_calc_colors(struct cache* dscrp, size_t page_size) size_t llc = dscrp->min_shared_lvl; - if ((dscrp->type[llc] != UNIFIED) || (dscrp->indexed[llc][0] != PIPT)) + if ((dscrp->type[llc] != UNIFIED) || (dscrp->indexed[llc][0] != PIPT)) { return; + } - size_t llc_way_size = - dscrp->numset[llc][UNIFIED] * dscrp->line_size[llc][UNIFIED]; + size_t llc_way_size = dscrp->numset[llc][UNIFIED] * dscrp->line_size[llc][UNIFIED]; size_t flc_way_size = 0; - if ((dscrp->type[0] != UNIFIED)) { + if (dscrp->type[0] != UNIFIED) { flc_way_size = dscrp->numset[0][0] * dscrp->line_size[0][0]; size_t flc_i_way_size = dscrp->numset[0][1] * dscrp->line_size[0][1]; - if (((dscrp->indexed[0][0] == PIPT) || - (flc_i_way_size < flc_way_size)) && + if (((dscrp->indexed[0][0] == PIPT) || (flc_i_way_size < flc_way_size)) && (dscrp->indexed[0][1] == PIPT)) { flc_way_size = flc_i_way_size; } diff --git a/src/core/config.c b/src/core/config.c index 25fcffda3..ef1753400 100644 --- a/src/core/config.c +++ b/src/core/config.c @@ -8,17 +8,17 @@ void config_adjust_vm_image_addr(paddr_t load_addr) { for (size_t i = 0; i < config.vmlist_size; i++) { - struct vm_config *vm_config = &config.vmlist[i]; + struct vm_config* vm_config = &config.vmlist[i]; if (!vm_config->image.separately_loaded) { - vm_config->image.load_addr = - (vm_config->image.load_addr - BAO_VAS_BASE) + load_addr; + vm_config->image.load_addr = (vm_config->image.load_addr - BAO_VAS_BASE) + load_addr; } } } -__attribute__((weak)) void config_mem_prot_init(paddr_t load_addr) {} +__attribute__((weak)) void config_mem_prot_init(paddr_t load_addr) { } -void config_init(paddr_t load_addr) { +void config_init(paddr_t load_addr) +{ config_adjust_vm_image_addr(load_addr); config_mem_prot_init(load_addr); } diff --git a/src/core/console.c b/src/core/console.c index e9c30d7af..3d758630d 100644 --- a/src/core/console.c +++ b/src/core/console.c @@ -14,18 +14,18 @@ #include #include -static volatile bao_uart_t *uart; +static volatile bao_uart_t* uart; static bool console_ready = false; static spinlock_t console_lock = SPINLOCK_INITVAL; void console_init() { if (cpu()->id == CPU_MASTER) { - if((platform.console.base & PAGE_OFFSET_MASK) != 0) { + if ((platform.console.base & PAGE_OFFSET_MASK) != 0) { WARNING("console base must be page aligned"); } - uart = (void*) mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, + uart = (void*)mem_alloc_map_dev(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, platform.console.base, NUM_PAGES(sizeof(*uart))); fence_sync_write(); @@ -39,10 +39,10 @@ void console_init() cpu_sync_and_clear_msgs(&cpu_glb_sync); } - void console_write(const char* buf, size_t n) { - while (!console_ready); + while (!console_ready) + ; for (size_t i = 0; i < n; i++) { if (buf[i] == '\n') { uart_putc(uart, '\r'); @@ -51,7 +51,7 @@ void console_write(const char* buf, size_t n) } } -#define PRINTF_BUFFER_LEN (256) +#define PRINTF_BUFFER_LEN (256) static char console_bufffer[PRINTF_BUFFER_LEN]; __attribute__((format(printf, 1, 2))) void console_printk(const char* fmt, ...) @@ -63,8 +63,7 @@ __attribute__((format(printf, 1, 2))) void console_printk(const char* fmt, ...) va_start(args, fmt); spin_lock(&console_lock); while (*fmt_it != '\0') { - chars_writen = - vsnprintk(console_bufffer, PRINTF_BUFFER_LEN, &fmt_it, &args); + chars_writen = vsnprintk(console_bufffer, PRINTF_BUFFER_LEN, &fmt_it, &args); console_write(console_bufffer, min(PRINTF_BUFFER_LEN, chars_writen)); } spin_unlock(&console_lock); diff --git a/src/core/cpu.c b/src/core/cpu.c index c1a5ae63b..aa6bfe8e8 100644 --- a/src/core/cpu.c +++ b/src/core/cpu.c @@ -22,7 +22,7 @@ struct cpu_msg_node { OBJPOOL_ALLOC(msg_pool, struct cpu_msg_node, CPU_MSG_POOL_SIZE); -struct cpu_synctoken cpu_glb_sync = {.ready = false}; +struct cpu_synctoken cpu_glb_sync = { .ready = false }; extern cpu_msg_handler_t ipi_cpumsg_handlers[]; extern uint8_t _ipi_cpumsg_handlers_size; @@ -44,8 +44,7 @@ void cpu_init(cpuid_t cpu_id, paddr_t load_addr) if (cpu()->id == CPU_MASTER) { cpu_sync_init(&cpu_glb_sync, platform.cpu_num); - ipi_cpumsg_handler_num = - ((size_t)&_ipi_cpumsg_handlers_size) / sizeof(cpu_msg_handler_t); + ipi_cpumsg_handler_num = ((size_t)&_ipi_cpumsg_handlers_size) / sizeof(cpu_msg_handler_t); for (size_t i = 0; i < ipi_cpumsg_handler_num; i++) { ((size_t*)_ipi_cpumsg_handlers_id_start)[i] = i; } @@ -54,21 +53,22 @@ void cpu_init(cpuid_t cpu_id, paddr_t load_addr) cpu_sync_barrier(&cpu_glb_sync); } -void cpu_send_msg(cpuid_t trgtcpu, struct cpu_msg *msg) +void cpu_send_msg(cpuid_t trgtcpu, struct cpu_msg* msg) { - struct cpu_msg_node *node = objpool_alloc(&msg_pool); - if (node == NULL) ERROR("cant allocate msg node"); + struct cpu_msg_node* node = objpool_alloc(&msg_pool); + if (node == NULL) { + ERROR("cant allocate msg node"); + } node->msg = *msg; - list_push(&cpu_if(trgtcpu)->event_list, (node_t *)node); + list_push(&cpu_if(trgtcpu)->event_list, (node_t*)node); fence_sync_write(); interrupts_cpu_sendipi(trgtcpu, IPI_CPU_MSG); } -bool cpu_get_msg(struct cpu_msg *msg) +bool cpu_get_msg(struct cpu_msg* msg) { - struct cpu_msg_node *node = NULL; - if ((node = (struct cpu_msg_node *)list_pop(&cpu()->interface->event_list)) != - NULL) { + struct cpu_msg_node* node = NULL; + if ((node = (struct cpu_msg_node*)list_pop(&cpu()->interface->event_list)) != NULL) { *msg = node->msg; objpool_free(&msg_pool, node); return true; @@ -81,8 +81,7 @@ void cpu_msg_handler() cpu()->handling_msgs = true; struct cpu_msg msg; while (cpu_get_msg(&msg)) { - if (msg.handler < ipi_cpumsg_handler_num && - ipi_cpumsg_handlers[msg.handler]) { + if (msg.handler < ipi_cpumsg_handler_num && ipi_cpumsg_handlers[msg.handler]) { ipi_cpumsg_handlers[msg.handler](msg.event, msg.data); } } @@ -94,9 +93,8 @@ void cpu_idle() cpu_arch_idle(); /** - * Should not return here. - * cpu should "wake up" from idle in cpu_idle_wakeup - * with a rewinded stack. + * Should not return here. cpu should "wake up" from idle in cpu_idle_wakeup with a rewinded + * stack. */ ERROR("Spurious idle wake up"); } diff --git a/src/core/hypercall.c b/src/core/hypercall.c index 006746771..a4ca03726 100644 --- a/src/core/hypercall.c +++ b/src/core/hypercall.c @@ -8,17 +8,18 @@ #include #include -long int hypercall(unsigned long id) { +long int hypercall(unsigned long id) +{ long int ret = -HC_E_INVAL_ID; unsigned long ipc_id = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(0)); - unsigned long arg1 = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(1)); - unsigned long arg2 = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(2)); + unsigned long arg1 = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(1)); + unsigned long arg2 = vcpu_readreg(cpu()->vcpu, HYPCALL_ARG_REG(2)); - switch(id){ + switch (id) { case HC_IPC: ret = ipc_hypercall(ipc_id, arg1, arg2); - break; + break; default: WARNING("Unknown hypercall id %d", id); } diff --git a/src/core/inc/bao.h b/src/core/inc/bao.h index 388e8b2a7..22aa82bdf 100644 --- a/src/core/inc/bao.h +++ b/src/core/inc/bao.h @@ -14,17 +14,14 @@ #include #include -#define INFO(args, ...) \ - console_printk("BAO INFO: " args "\n" __VA_OPT__(, ) __VA_ARGS__); +#define INFO(args, ...) console_printk("BAO INFO: " args "\n" __VA_OPT__(, ) __VA_ARGS__); -#define WARNING(args, ...) \ - console_printk("BAO WARNING: " args "\n" __VA_OPT__(, ) __VA_ARGS__); +#define WARNING(args, ...) console_printk("BAO WARNING: " args "\n" __VA_OPT__(, ) __VA_ARGS__); #define ERROR(args, ...) \ { \ console_printk("BAO ERROR: " args "\n" __VA_OPT__(, ) __VA_ARGS__); \ - while (1) \ - ; \ + while (1) { } \ } #endif /* __ASSEMBLER__ */ diff --git a/src/core/inc/config.h b/src/core/inc/config.h index 2e328ef0c..bd8c0d8ad 100644 --- a/src/core/inc/config.h +++ b/src/core/inc/config.h @@ -11,8 +11,10 @@ #include #include - #ifndef GENERATING_DEFS +// clang-format wont correctly recognize the syntax of assembly strings interleaved with +// stringified tokens via XSTR and will format it in an unreadable way +// clang-format off #define VM_IMAGE(img_name, img_path) \ extern uint8_t _##img_name##_vm_size; \ extern uint8_t _##img_name##_vm_beg; \ @@ -25,29 +27,26 @@ ".set _" XSTR(img_name) "_vm_size, (_" XSTR(img_name) "_vm_end - _" \ #img_name "_vm_beg)\n\t" \ ".popsection"); +// clang-format on -#define VM_IMAGE_OFFSET(img_name) ((paddr_t)&_##img_name##_vm_beg) -#define VM_IMAGE_SIZE(img_name) ((size_t)&_##img_name##_vm_size) +#define VM_IMAGE_OFFSET(img_name) ((paddr_t) & _##img_name##_vm_beg) +#define VM_IMAGE_SIZE(img_name) ((size_t) & _##img_name##_vm_size) #else #define VM_IMAGE(img_name, img_path) #define VM_IMAGE_OFFSET(img_name) ((paddr_t)0) -#define VM_IMAGE_SIZE(img_name) ((size_t)0) +#define VM_IMAGE_SIZE(img_name) ((size_t)0) #endif -#define VM_IMAGE_BUILTIN(img_name, image_base_addr) \ - {\ - .base_addr = image_base_addr,\ - .load_addr = VM_IMAGE_OFFSET(img_name),\ - .size = VM_IMAGE_SIZE(img_name),\ - .separately_loaded = false,\ +#define VM_IMAGE_BUILTIN(img_name, image_base_addr) \ + { \ + .base_addr = image_base_addr, .load_addr = VM_IMAGE_OFFSET(img_name), \ + .size = VM_IMAGE_SIZE(img_name), .separately_loaded = false, \ } -#define VM_IMAGE_LOADED(image_base_addr, image_load_addr, image_size) \ - {\ - .base_addr = image_base_addr,\ - .load_addr = image_load_addr,\ - .size = image_size,\ - .separately_loaded = true,\ +#define VM_IMAGE_LOADED(image_base_addr, image_load_addr, image_size) \ + { \ + .base_addr = image_base_addr, .load_addr = image_load_addr, .size = image_size, \ + .separately_loaded = true, \ } /* CONFIG_HEADER is just defined for compatibility with older configs */ @@ -55,8 +54,8 @@ struct vm_config { /** - * To setup the image field either the VM_IMAGE_BUILTIN or VM_IMAGE_LOADED - * macros should be used. + * To setup the image field either the VM_IMAGE_BUILTIN or VM_IMAGE_LOADED macros should be + * used. */ struct { /* Image load address in VM's address space */ @@ -66,8 +65,7 @@ struct vm_config { /* Image size */ size_t size; /** - * Informs the hypervisor if the VM image is to be loaded - * separately by a bootloader. + * Informs the hypervisor if the VM image is to be loaded separately by a bootloader. */ bool separately_loaded; /* Dont copy the image */ @@ -77,36 +75,33 @@ struct vm_config { /* Entry point address in VM's address space */ vaddr_t entry; /** - * A bitmap signaling the preferred physical cpus assigned to the VM. - * If this value is each mutual exclusive for all the VMs, this field - * allows to direcly assign specific physical cpus to the VM. + * A bitmap signaling the preferred physical cpus assigned to the VM. If this value is each + * mutual exclusive for all the VMs, this field allows to direcly assign specific physical cpus + * to the VM. */ cpumap_t cpu_affinity; /** - * A bitmap for the assigned colors of the VM. This value is truncated - * depending on the number of available colors calculated at runtime + * A bitmap for the assigned colors of the VM. This value is truncated depending on the number + * of available colors calculated at runtime */ colormap_t colors; /** - * A description of the virtual platform available to the guest, i.e., - * the virtual machine itself. + * A description of the virtual platform available to the guest, i.e., the virtual machine + * itself. */ struct vm_platform platform; - }; extern struct config { - struct { /** - * Only meaningful for MPU-based platforms. The hypervisor base address - * will default to the platform's base address, i.e., the base address - * of the first region defined in the target platform's description. - * If the user wishes to relocate it to another address, they must set - * relocate to true and provide the new base address. + * Only meaningful for MPU-based platforms. The hypervisor base address will default to the + * platform's base address, i.e., the base address of the first region defined in the + * target platform's description. If the user wishes to relocate it to another address, + * they must set relocate to true and provide the new base address. */ bool relocate; paddr_t base_addr; @@ -117,7 +112,7 @@ extern struct config { /* Definition of shared memory regions to be used by VMs */ size_t shmemlist_size; - struct shmem *shmemlist; + struct shmem* shmemlist; /* The number of VMs specified by this configuration */ size_t vmlist_size; diff --git a/src/core/inc/config_defs.h b/src/core/inc/config_defs.h old mode 100755 new mode 100644 index f5c18c990..207a65d00 --- a/src/core/inc/config_defs.h +++ b/src/core/inc/config_defs.h @@ -8,8 +8,8 @@ #ifdef GENERATING_DEFS -#define CONFIG_VCPU_NUM 1 -#define CONFIG_VM_NUM 1 +#define CONFIG_VCPU_NUM 1 +#define CONFIG_VM_NUM 1 #define CONFIG_HYP_BASE_ADDR 0 #else /* GENERATING_DEFS */ diff --git a/src/core/inc/cpu.h b/src/core/inc/cpu.h index b5f77cd7f..0dbddd696 100644 --- a/src/core/inc/cpu.h +++ b/src/core/inc/cpu.h @@ -18,7 +18,7 @@ struct cpuif { struct list event_list; -} __attribute__((aligned(PAGE_SIZE))) ; +} __attribute__((aligned(PAGE_SIZE))); struct vcpu; @@ -26,7 +26,7 @@ struct cpu { cpuid_t id; bool handling_msgs; - + struct addr_space as; struct vcpu* vcpu; @@ -36,7 +36,7 @@ struct cpu { struct cpuif* interface; uint8_t stack[STACK_SIZE] __attribute__((aligned(PAGE_SIZE))); - + } __attribute__((aligned(PAGE_SIZE))); struct cpu_msg { uint32_t handler; @@ -48,11 +48,10 @@ void cpu_send_msg(cpuid_t cpu, struct cpu_msg* msg); typedef void (*cpu_msg_handler_t)(uint32_t event, uint64_t data); -#define CPU_MSG_HANDLER(handler, handler_id) \ - __attribute__((section(".ipi_cpumsg_handlers"), used)) \ - cpu_msg_handler_t __cpumsg_handler_##handler = handler; \ - __attribute__((section(".ipi_cpumsg_handlers_id"), \ - used)) volatile const size_t handler_id; +#define CPU_MSG_HANDLER(handler, handler_id) \ + __attribute__((section(".ipi_cpumsg_handlers"), used)) \ + cpu_msg_handler_t __cpumsg_handler_##handler = handler; \ + __attribute__((section(".ipi_cpumsg_handlers_id"), used)) volatile const size_t handler_id; struct cpu_synctoken { spinlock_t lock; @@ -94,21 +93,21 @@ static inline void cpu_sync_barrier(struct cpu_synctoken* token) size_t next_count = 0; - while (!token->ready); + while (!token->ready) { } spin_lock(&token->lock); token->count++; next_count = ALIGN(token->count, token->n); spin_unlock(&token->lock); - while (token->count < next_count); + while (token->count < next_count) { } } static inline void cpu_sync_and_clear_msgs(struct cpu_synctoken* token) { size_t next_count = 0; - while (!token->ready); + while (!token->ready) { } spin_lock(&token->lock); token->count++; @@ -116,15 +115,18 @@ static inline void cpu_sync_and_clear_msgs(struct cpu_synctoken* token) spin_unlock(&token->lock); while (token->count < next_count) { - if (!cpu()->handling_msgs) cpu_msg_handler(); + if (!cpu()->handling_msgs) { + cpu_msg_handler(); + } } - if (!cpu()->handling_msgs) cpu_msg_handler(); + if (!cpu()->handling_msgs) { + cpu_msg_handler(); + } cpu_sync_barrier(token); } - #endif /* __ASSEMBLER__ */ #endif /* __CPU_H__ */ diff --git a/src/core/inc/emul.h b/src/core/inc/emul.h index e2e54a139..85b68b3a0 100644 --- a/src/core/inc/emul.h +++ b/src/core/inc/emul.h @@ -63,16 +63,13 @@ static inline unsigned long emul_read(struct emul_access* emul) switch (emul->width) { case 1: - val = emul->sign_ext ? *((int8_t*)emul->addr) - : *((uint8_t*)emul->addr); + val = emul->sign_ext ? *((int8_t*)emul->addr) : *((uint8_t*)emul->addr); break; case 2: - val = emul->sign_ext ? *((int16_t*)emul->addr) - : *((uint16_t*)emul->addr); + val = emul->sign_ext ? *((int16_t*)emul->addr) : *((uint16_t*)emul->addr); break; case 4: - val = emul->sign_ext ? *((int32_t*)emul->addr) - : *((uint32_t*)emul->addr); + val = emul->sign_ext ? *((int32_t*)emul->addr) : *((uint32_t*)emul->addr); break; case 8: val = *((uint64_t*)emul->addr); diff --git a/src/core/inc/hypercall.h b/src/core/inc/hypercall.h index a1fbf22e0..cb510c47f 100644 --- a/src/core/inc/hypercall.h +++ b/src/core/inc/hypercall.h @@ -9,21 +9,12 @@ #include #include -enum { - HC_INVAL = 0, - HC_IPC = 1 -}; +enum { HC_INVAL = 0, HC_IPC = 1 }; -enum { - HC_E_SUCCESS = 0, - HC_E_FAILURE = 1, - HC_E_INVAL_ID = 2, - HC_E_INVAL_ARGS = 3 -}; +enum { HC_E_SUCCESS = 0, HC_E_FAILURE = 1, HC_E_INVAL_ID = 2, HC_E_INVAL_ARGS = 3 }; -typedef unsigned long (*hypercall_handler)( unsigned long arg0, - unsigned long arg1, - unsigned long arg2); +typedef unsigned long (*hypercall_handler)(unsigned long arg0, unsigned long arg1, + unsigned long arg2); long int hypercall(unsigned long id); diff --git a/src/core/inc/interrupts.h b/src/core/inc/interrupts.h index c6fddfa8e..08b8df53f 100644 --- a/src/core/inc/interrupts.h +++ b/src/core/inc/interrupts.h @@ -27,7 +27,7 @@ void interrupts_clear(irqid_t int_id); enum irq_res { HANDLED_BY_HYP, FORWARD_TO_VM }; enum irq_res interrupts_handle(irqid_t int_id); -bool interrupts_vm_assign(struct vm *vm, irqid_t id); +bool interrupts_vm_assign(struct vm* vm, irqid_t id); /* Must be implemented by architecture */ @@ -36,7 +36,7 @@ void interrupts_arch_enable(irqid_t int_id, bool en); bool interrupts_arch_check(irqid_t int_id); void interrupts_arch_clear(irqid_t int_id); void interrupts_arch_ipi_send(cpuid_t cpu_target, irqid_t ipi_id); -void interrupts_arch_vm_assign(struct vm *vm, irqid_t id); +void interrupts_arch_vm_assign(struct vm* vm, irqid_t id); bool interrupts_arch_conflict(bitmap_t* interrupt_bitmap, irqid_t id); #endif /* __INTERRUPTS_H__ */ diff --git a/src/core/inc/io.h b/src/core/inc/io.h index 9f95af5a6..2215c52d9 100644 --- a/src/core/inc/io.h +++ b/src/core/inc/io.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ @@ -21,7 +21,7 @@ struct vm_io { void io_init(); /* iommu api for vms. */ -bool io_vm_init(struct vm *vm, const struct vm_config *config); -bool io_vm_add_device(struct vm *vm, deviceid_t dev_id); +bool io_vm_init(struct vm* vm, const struct vm_config* config); +bool io_vm_add_device(struct vm* vm, deviceid_t dev_id); #endif /* IO_H_ */ diff --git a/src/core/inc/ipc.h b/src/core/inc/ipc.h index a0841e49d..b53b93e8b 100644 --- a/src/core/inc/ipc.h +++ b/src/core/inc/ipc.h @@ -14,7 +14,7 @@ struct ipc { size_t size; size_t shmem_id; size_t interrupt_num; - irqid_t *interrupts; + irqid_t* interrupts; }; struct vm_config; diff --git a/src/core/inc/mem.h b/src/core/inc/mem.h index 0f1093763..a92c5927c 100644 --- a/src/core/inc/mem.h +++ b/src/core/inc/mem.h @@ -51,7 +51,7 @@ struct shmem { static inline struct ppages mem_ppages_get(paddr_t base, size_t num_pages) { - return (struct ppages){.colors = 0, .base = base, .num_pages = num_pages}; + return (struct ppages){ .colors = 0, .base = base, .num_pages = num_pages }; } static inline bool all_clrs(colormap_t clrs) @@ -64,18 +64,16 @@ static inline bool all_clrs(colormap_t clrs) void mem_init(paddr_t load_addr); void* mem_alloc_page(size_t num_pages, enum AS_SEC sec, bool phys_aligned); struct ppages mem_alloc_ppages(colormap_t colors, size_t num_pages, bool aligned); -vaddr_t mem_alloc_map(struct addr_space* as, enum AS_SEC section, struct ppages *page, - vaddr_t at, size_t num_pages, mem_flags_t flags); -vaddr_t mem_alloc_map_dev(struct addr_space* as, enum AS_SEC section, - vaddr_t at, paddr_t pa, size_t size); -void mem_unmap(struct addr_space* as, vaddr_t at, size_t num_pages, - bool free_ppages); -bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, - size_t num_pages, mem_flags_t flags); -vaddr_t mem_map_cpy(struct addr_space *ass, struct addr_space *asd, vaddr_t vas, - vaddr_t vad, size_t num_pages); -bool pp_alloc(struct page_pool *pool, size_t num_pages, bool aligned, - struct ppages *ppages); +vaddr_t mem_alloc_map(struct addr_space* as, enum AS_SEC section, struct ppages* page, vaddr_t at, + size_t num_pages, mem_flags_t flags); +vaddr_t mem_alloc_map_dev(struct addr_space* as, enum AS_SEC section, vaddr_t at, paddr_t pa, + size_t size); +void mem_unmap(struct addr_space* as, vaddr_t at, size_t num_pages, bool free_ppages); +bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, size_t num_pages, + mem_flags_t flags); +vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas, vaddr_t vad, + size_t num_pages); +bool pp_alloc(struct page_pool* pool, size_t num_pages, bool aligned, struct ppages* ppages); void mem_prot_init(); size_t mem_cpu_boot_alloc_size(); diff --git a/src/core/inc/objpool.h b/src/core/inc/objpool.h index cb86cd490..33957d769 100644 --- a/src/core/inc/objpool.h +++ b/src/core/inc/objpool.h @@ -19,19 +19,19 @@ struct objpool { spinlock_t lock; }; -#define OBJPOOL_ALLOC(NAME, TYPE, N) \ - TYPE _##NAME##_array[N];\ - BITMAP_ALLOC(_##NAME##_array_bitmap, N);\ - struct objpool NAME = {\ - .pool = _##NAME##_array,\ - .bitmap = _##NAME##_array_bitmap,\ - .objsize = sizeof(TYPE),\ - .num = N,\ - .lock = SPINLOCK_INITVAL,\ +#define OBJPOOL_ALLOC(NAME, TYPE, N) \ + TYPE _##NAME##_array[N]; \ + BITMAP_ALLOC(_##NAME##_array_bitmap, N); \ + struct objpool NAME = { \ + .pool = _##NAME##_array, \ + .bitmap = _##NAME##_array_bitmap, \ + .objsize = sizeof(TYPE), \ + .num = N, \ + .lock = SPINLOCK_INITVAL, \ } -void objpool_init(struct objpool *objpool); -void* objpool_alloc(struct objpool *objpool); -void objpool_free(struct objpool *objpool, void* obj); +void objpool_init(struct objpool* objpool); +void* objpool_alloc(struct objpool* objpool); +void objpool_free(struct objpool* objpool, void* obj); #endif /* OBJPOOL_H */ diff --git a/src/core/inc/page_table.h b/src/core/inc/page_table.h index 86188612b..6df0f9671 100644 --- a/src/core/inc/page_table.h +++ b/src/core/inc/page_table.h @@ -47,8 +47,7 @@ static inline size_t pt_getpteindex(struct page_table* pt, pte_t* pte, size_t lv return (size_t)(((size_t)pte) & (pt_size(pt, lvl) - 1)) / sizeof(pte_t); } -static inline size_t pt_getpteindex_by_va(struct page_table* pt, vaddr_t va, - size_t lvl) +static inline size_t pt_getpteindex_by_va(struct page_table* pt, vaddr_t va, size_t lvl) { return (va >> pt->dscr->lvl_off[lvl]) & (pt_nentries(pt, lvl) - 1); } diff --git a/src/core/inc/platform.h b/src/core/inc/platform.h index 0900d2e74..e711c40c6 100644 --- a/src/core/inc/platform.h +++ b/src/core/inc/platform.h @@ -15,11 +15,11 @@ struct platform { size_t cpu_num; - bool cpu_master_fixed; + bool cpu_master_fixed; cpuid_t cpu_master; size_t region_num; - struct mem_region *regions; + struct mem_region* regions; struct { paddr_t base; diff --git a/src/core/inc/platform_defs.h b/src/core/inc/platform_defs.h old mode 100755 new mode 100644 index f15d5b4d1..910b44fd1 --- a/src/core/inc/platform_defs.h +++ b/src/core/inc/platform_defs.h @@ -8,8 +8,8 @@ #ifdef GENERATING_DEFS -#define PLAT_CPU_NUM 1 -#define PLAT_BASE_ADDR 0 +#define PLAT_CPU_NUM 1 +#define PLAT_BASE_ADDR 0 #else /* GENERATING_DEFS */ diff --git a/src/core/inc/tlb.h b/src/core/inc/tlb.h index 260561c78..884f712c3 100644 --- a/src/core/inc/tlb.h +++ b/src/core/inc/tlb.h @@ -11,7 +11,7 @@ #include -static inline void tlb_inv_va(struct addr_space *as, vaddr_t va) +static inline void tlb_inv_va(struct addr_space* as, vaddr_t va) { if (as->type == AS_HYP) { tlb_hyp_inv_va(va); @@ -21,7 +21,7 @@ static inline void tlb_inv_va(struct addr_space *as, vaddr_t va) } } -static inline void tlb_inv_all(struct addr_space *as) +static inline void tlb_inv_all(struct addr_space* as) { if (as->type == AS_HYP) { tlb_hyp_inv_all(); diff --git a/src/core/inc/types.h b/src/core/inc/types.h index 06de1dd37..99ff33de5 100644 --- a/src/core/inc/types.h +++ b/src/core/inc/types.h @@ -12,10 +12,9 @@ #include /** - * We assume LP64 and ILP32 for 64- and 32-bit architectures, respectively, as - * throughout the code `unsigned long` is the type used for values of the - * architecture's word width. This is just a sanity check to verify this is the - * ABI the compiler is effectively using. + * We assume LP64 and ILP32 for 64- and 32-bit architectures, respectively, as throughout the code + * `unsigned long` is the type used for values of the architecture's word width. This is just a + * sanity check to verify this is the ABI the compiler is effectively using. */ #if UINTPTR_WIDTH != ULONG_WIDTH #error "Unsigned long type width is not the same as the architecture´s word with" @@ -30,18 +29,18 @@ typedef unsigned long vmid_t; typedef uintptr_t paddr_t; typedef uintptr_t regaddr_t; typedef uintptr_t vaddr_t; -#define MAX_VA ((vaddr_t)-1) +#define MAX_VA ((vaddr_t)-1) #define INVALID_VA MAX_VA typedef ssize_t mpid_t; -#define INVALID_MPID ((mpid_t)-1) +#define INVALID_MPID ((mpid_t)-1) typedef unsigned long colormap_t; typedef unsigned long cpuid_t; typedef unsigned long vcpuid_t; typedef unsigned long cpumap_t; -#define INVALID_CPUID ((cpuid_t)-1) +#define INVALID_CPUID ((cpuid_t)-1) typedef unsigned irqid_t; @@ -64,11 +63,11 @@ typedef enum AS_TYPE { AS_HYP = 0, AS_VM, AS_HYP_CPY } as_type_t; typedef enum { PRIV_NONE = 0, PRIV_HYP = 1, PRIV_VM = 2 } priv_t; -typedef enum { PERM_X = 1, PERM_R = 2, PERM_W = 4} perms_t; -#define PERM_NONE ((perms_t)0) -#define PERM_RW (PERM_R | PERM_W) -#define PERM_RX (PERM_R | PERM_X) -#define PERM_RWX (PERM_R | PERM_W | PERM_X) +typedef enum { PERM_X = 1, PERM_R = 2, PERM_W = 4 } perms_t; +#define PERM_NONE ((perms_t)0) +#define PERM_RW (PERM_R | PERM_W) +#define PERM_RX (PERM_R | PERM_X) +#define PERM_RWX (PERM_R | PERM_W | PERM_X) typedef unsigned long mem_attrs_t; diff --git a/src/core/inc/vm.h b/src/core/inc/vm.h index 62a11b2f2..a9dc9af80 100644 --- a/src/core/inc/vm.h +++ b/src/core/inc/vm.h @@ -31,24 +31,24 @@ struct vm_dev_region { vaddr_t va; size_t size; size_t interrupt_num; - irqid_t *interrupts; + irqid_t* interrupts; deviceid_t id; /* bus master id for iommu effects */ }; - + struct vm_platform { size_t cpu_num; size_t region_num; - struct vm_mem_region *regions; + struct vm_mem_region* regions; size_t ipc_num; - struct ipc *ipcs; + struct ipc* ipcs; size_t dev_num; - struct vm_dev_region *devs; + struct vm_dev_region* devs; // /** - // * In MPU-based platforms which might also support virtual memory + // * In MPU-based platforms which might also support virtual memory // * (i.e. aarch64 cortex-r) the hypervisor sets up the VM using an MPU by // * default. If the user wants this VM to use the MMU they must set the // * config mmu parameter to true; @@ -67,7 +67,7 @@ struct vm { struct cpu_synctoken sync; cpuid_t master; - struct vcpu *vcpus; + struct vcpu* vcpus; size_t cpu_num; cpumap_t cpus; @@ -83,7 +83,7 @@ struct vm { BITMAP_ALLOC(interrupt_bitmap, MAX_INTERRUPTS); size_t ipc_num; - struct ipc *ipcs; + struct ipc* ipcs; }; struct vcpu { @@ -102,14 +102,14 @@ struct vcpu { struct vm_allocation { vaddr_t base; size_t size; - struct vm *vm; - struct vcpu *vcpus; + struct vm* vm; + struct vcpu* vcpus; }; extern struct vm vm; -struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* config, - bool master, vmid_t vm_id); +struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* config, bool master, + vmid_t vm_id); void vm_start(struct vm* vm, vaddr_t entry); void vm_emul_add_mem(struct vm* vm, struct emul_mem* emu); void vm_emul_add_reg(struct vm* vm, struct emul_reg* emu); @@ -120,7 +120,8 @@ void vm_msg_broadcast(struct vm* vm, struct cpu_msg* msg); cpumap_t vm_translate_to_pcpu_mask(struct vm* vm, cpumap_t mask, size_t len); cpumap_t vm_translate_to_vcpu_mask(struct vm* vm, cpumap_t mask, size_t len); -static inline struct vcpu* vm_get_vcpu(struct vm* vm, vcpuid_t vcpuid) { +static inline struct vcpu* vm_get_vcpu(struct vm* vm, vcpuid_t vcpuid) +{ if (vcpuid < vm->cpu_num) { return &vm->vcpus[vcpuid]; } @@ -129,9 +130,9 @@ static inline struct vcpu* vm_get_vcpu(struct vm* vm, vcpuid_t vcpuid) { static inline cpuid_t vm_translate_to_pcpuid(struct vm* vm, vcpuid_t vcpuid) { - struct vcpu *vcpu = vm_get_vcpu(vm, vcpuid); + struct vcpu* vcpu = vm_get_vcpu(vm, vcpuid); - if(vcpu == NULL) { + if (vcpu == NULL) { return INVALID_CPUID; } else { return vcpu->phys_id; @@ -152,12 +153,12 @@ static inline bool vm_has_interrupt(struct vm* vm, irqid_t int_id) return !!bitmap_get(vm->interrupt_bitmap, int_id); } -static inline void vcpu_inject_hw_irq(struct vcpu *vcpu, irqid_t id) +static inline void vcpu_inject_hw_irq(struct vcpu* vcpu, irqid_t id) { vcpu_arch_inject_hw_irq(vcpu, id); } -static inline void vcpu_inject_irq(struct vcpu *vcpu, irqid_t id) +static inline void vcpu_inject_irq(struct vcpu* vcpu, irqid_t id) { vcpu_arch_inject_irq(vcpu, id); } diff --git a/src/core/inc/vmm.h b/src/core/inc/vmm.h index b6dcfa125..59b2b1372 100644 --- a/src/core/inc/vmm.h +++ b/src/core/inc/vmm.h @@ -16,7 +16,7 @@ void vmm_arch_init(); void vmm_io_init(); -struct vm_install_info vmm_get_vm_install_info(struct vm_allocation *vm_alloc); -void vmm_vm_install(struct vm_install_info *install_info); +struct vm_install_info vmm_get_vm_install_info(struct vm_allocation* vm_alloc); +void vmm_vm_install(struct vm_install_info* install_info); #endif /* __VMM_H__ */ diff --git a/src/core/init.c b/src/core/init.c index 56a960144..5a6f5309f 100644 --- a/src/core/init.c +++ b/src/core/init.c @@ -25,7 +25,7 @@ void init(cpuid_t cpu_id, paddr_t load_addr) /* -------------------------------------------------------------- */ console_init(); - + if (cpu()->id == CPU_MASTER) { console_printk("Bao Hypervisor\n\r"); } @@ -35,5 +35,5 @@ void init(cpuid_t cpu_id, paddr_t load_addr) vmm_init(); /* Should never reach here */ - while (1); + while (1) { } } diff --git a/src/core/interrupts.c b/src/core/interrupts.c index 96f381126..c9120430d 100644 --- a/src/core/interrupts.c +++ b/src/core/interrupts.c @@ -41,7 +41,7 @@ inline void interrupts_init() interrupts_arch_init(); if (cpu()->id == CPU_MASTER) { - if(!interrupts_reserve(IPI_CPU_MSG, cpu_msg_handler)) { + if (!interrupts_reserve(IPI_CPU_MSG, cpu_msg_handler)) { ERROR("Failed to reserve IPI_CPU_MSG interrupt"); } } @@ -55,9 +55,9 @@ static inline bool interrupt_assigned_to_hyp(irqid_t int_id) } /** - * @brief For a given interrupt intp_id, return if this interrupt - * is already reserved by VMM or any VM - * + * @brief For a given interrupt intp_id, return if this interrupt is already reserved by VMM or any + * VM + * * @param int_id interrupt ID * @return true if interrupt is reserved * @return false if interrupt is NOT reserved @@ -84,7 +84,7 @@ enum irq_res interrupts_handle(irqid_t int_id) } } -bool interrupts_vm_assign(struct vm *vm, irqid_t id) +bool interrupts_vm_assign(struct vm* vm, irqid_t id) { bool ret = false; diff --git a/src/core/ipc.c b/src/core/ipc.c index 90f3d3778..4d8f58fd0 100644 --- a/src/core/ipc.c +++ b/src/core/ipc.c @@ -10,7 +10,7 @@ #include #include -enum {IPC_NOTIFY}; +enum { IPC_NOTIFY }; union ipc_msg_data { struct { @@ -21,22 +21,23 @@ union ipc_msg_data { }; static size_t shmem_table_size; -static struct shmem *shmem_table; +static struct shmem* shmem_table; -struct shmem* ipc_get_shmem(size_t shmem_id) { - if(shmem_id < shmem_table_size) { +struct shmem* ipc_get_shmem(size_t shmem_id) +{ + if (shmem_id < shmem_table_size) { return &shmem_table[shmem_id]; } else { return NULL; } } -static struct ipc* ipc_find_by_shmemid(struct vm* vm, size_t shmem_id) { - +static struct ipc* ipc_find_by_shmemid(struct vm* vm, size_t shmem_id) +{ struct ipc* ipc_obj = NULL; - for(size_t i = 0; i < vm->ipc_num; i++) { - if(vm->ipcs[i].shmem_id == shmem_id) { + for (size_t i = 0; i < vm->ipc_num; i++) { + if (vm->ipcs[i].shmem_id == shmem_id) { ipc_obj = &vm->ipcs[i]; break; } @@ -45,45 +46,45 @@ static struct ipc* ipc_find_by_shmemid(struct vm* vm, size_t shmem_id) { return ipc_obj; } -static void ipc_notify(size_t shmem_id, size_t event_id) { +static void ipc_notify(size_t shmem_id, size_t event_id) +{ struct ipc* ipc_obj = ipc_find_by_shmemid(cpu()->vcpu->vm, shmem_id); - if(ipc_obj != NULL && event_id < ipc_obj->interrupt_num) { + if (ipc_obj != NULL && event_id < ipc_obj->interrupt_num) { irqid_t irq_id = ipc_obj->interrupts[event_id]; vcpu_inject_hw_irq(cpu()->vcpu, irq_id); } } -static void ipc_handler(uint32_t event, uint64_t data){ +static void ipc_handler(uint32_t event, uint64_t data) +{ union ipc_msg_data ipc_data = { .raw = data }; - switch(event){ + switch (event) { case IPC_NOTIFY: ipc_notify(ipc_data.shmem_id, ipc_data.event_id); - break; + break; } } CPU_MSG_HANDLER(ipc_handler, IPC_CPUMSG_ID); -unsigned long ipc_hypercall(unsigned long ipc_id, unsigned long ipc_event, - unsigned long arg2) +unsigned long ipc_hypercall(unsigned long ipc_id, unsigned long ipc_event, unsigned long arg2) { unsigned long ret = -HC_E_SUCCESS; - struct shmem *shmem = NULL; + struct shmem* shmem = NULL; bool valid_ipc_obj = ipc_id < cpu()->vcpu->vm->ipc_num; - if(valid_ipc_obj) { + if (valid_ipc_obj) { shmem = ipc_get_shmem(cpu()->vcpu->vm->ipcs[ipc_id].shmem_id); } bool valid_shmem = shmem != NULL; - if(valid_ipc_obj && valid_shmem) { - + if (valid_ipc_obj && valid_shmem) { cpumap_t ipc_cpu_masters = shmem->cpu_masters & ~cpu()->vcpu->vm->cpus; union ipc_msg_data data = { .shmem_id = cpu()->vcpu->vm->ipcs[ipc_id].shmem_id, .event_id = ipc_event, }; - struct cpu_msg msg = {IPC_CPUMSG_ID, IPC_NOTIFY, data.raw}; + struct cpu_msg msg = { IPC_CPUMSG_ID, IPC_NOTIFY, data.raw }; for (size_t i = 0; i < platform.cpu_num; i++) { if (ipc_cpu_masters & (1ULL << i)) { @@ -98,13 +99,14 @@ unsigned long ipc_hypercall(unsigned long ipc_id, unsigned long ipc_event, return ret; } -static void ipc_alloc_shmem() { +static void ipc_alloc_shmem() +{ for (size_t i = 0; i < shmem_table_size; i++) { - struct shmem *shmem = &shmem_table[i]; - if(!shmem->place_phys) { + struct shmem* shmem = &shmem_table[i]; + if (!shmem->place_phys) { size_t n_pg = NUM_PAGES(shmem->size); struct ppages ppages = mem_alloc_ppages(shmem->colors, n_pg, false); - if(ppages.num_pages < n_pg) { + if (ppages.num_pages < n_pg) { ERROR("failed to allocate shared memory"); } shmem->phys = ppages.base; @@ -112,14 +114,14 @@ static void ipc_alloc_shmem() { } } -void ipc_init() { - - if(cpu()->id == CPU_MASTER) { +void ipc_init() +{ + if (cpu()->id == CPU_MASTER) { shmem_table_size = config.shmemlist_size; shmem_table = config.shmemlist; ipc_alloc_shmem(); - for(size_t i = 0; i < config.shmemlist_size; i++) { + for (size_t i = 0; i < config.shmemlist_size; i++) { config.shmemlist[i].cpu_masters = 0; } } diff --git a/src/core/mem.c b/src/core/mem.c index bca9b8596..34f1d4a01 100644 --- a/src/core/mem.c +++ b/src/core/mem.c @@ -14,26 +14,25 @@ #include #include -extern uint8_t _image_start, _image_load_end, _image_end, _vm_image_start, - _vm_image_end; +extern uint8_t _image_start, _image_load_end, _image_end, _vm_image_start, _vm_image_end; struct list page_pool_list; -bool pp_alloc(struct page_pool *pool, size_t num_pages, bool aligned, - struct ppages *ppages) +bool pp_alloc(struct page_pool* pool, size_t num_pages, bool aligned, struct ppages* ppages) { ppages->colors = 0; ppages->num_pages = 0; bool ok = false; - if (num_pages == 0) return true; + if (num_pages == 0) { + return true; + } spin_lock(&pool->lock); /** - * If we need a contigous segment aligned to its size, lets start - * at an already aligned index. + * If we need a contigous segment aligned to its size, lets start at an already aligned index. */ size_t start = aligned ? pool->base / PAGE_SIZE % num_pages : 0; size_t curr = pool->last + ((pool->last + start) % num_pages); @@ -45,29 +44,28 @@ bool pp_alloc(struct page_pool *pool, size_t num_pages, bool aligned, */ for (size_t i = 0; i < 2 && !ok; i++) { while (pool->free != 0) { - ssize_t bit = - bitmap_find_consec(pool->bitmap, pool->size, curr, num_pages, false); + ssize_t bit = bitmap_find_consec(pool->bitmap, pool->size, curr, num_pages, false); if (bit < 0) { /** - * No num_page page sement was found. If this is the first - * iteration set position to 0 to start next search from index 0. + * No num_page page sement was found. If this is the first iteration set position + * to 0 to start next search from index + * 0. */ - size_t next_aligned = + size_t next_aligned = (num_pages - ((pool->base / PAGE_SIZE) % num_pages)) % num_pages; curr = aligned ? next_aligned : 0; break; } else if (aligned && (((bit + start) % num_pages) != 0)) { /** - * If we're looking for an aligned segment and the found - * contigous segment is not aligned, start the search again - * from the last aligned index + * If we're looking for an aligned segment and the found contigous segment is not + * aligned, start the search again from the last aligned index */ curr = bit + ((bit + start) % num_pages); } else { /** - * We've found our pages. Fill output argument info, mark - * them as allocated, and update page pool bookkeeping. + * We've found our pages. Fill output argument info, mark them as allocated, and + * update page pool bookkeeping. */ ppages->base = pool->base + (bit * PAGE_SIZE); ppages->num_pages = num_pages; @@ -84,18 +82,18 @@ bool pp_alloc(struct page_pool *pool, size_t num_pages, bool aligned, return ok; } -bool mem_are_ppages_reserved_in_pool(struct page_pool *ppool, struct ppages *ppages) +bool mem_are_ppages_reserved_in_pool(struct page_pool* ppool, struct ppages* ppages) { bool reserved = false; - bool rgn_found = range_in_range(ppages->base, ppages->num_pages * PAGE_SIZE, - ppool->base, ppool->size * PAGE_SIZE); + bool rgn_found = range_in_range(ppages->base, ppages->num_pages * PAGE_SIZE, ppool->base, + ppool->size * PAGE_SIZE); if (rgn_found) { size_t pageoff = NUM_PAGES(ppages->base - ppool->base); // verify these pages arent allocated yet bool is_alloced = bitmap_get(ppool->bitmap, pageoff); - size_t avlbl_contig_pp = bitmap_count_consecutive( - ppool->bitmap, ppool->size, pageoff, ppages->num_pages); + size_t avlbl_contig_pp = + bitmap_count_consecutive(ppool->bitmap, ppool->size, pageoff, ppages->num_pages); if (is_alloced || avlbl_contig_pp < ppages->num_pages) { reserved = true; @@ -105,13 +103,12 @@ bool mem_are_ppages_reserved_in_pool(struct page_pool *ppool, struct ppages *ppa return reserved; } -bool mem_are_ppages_reserved(struct ppages *ppages) +bool mem_are_ppages_reserved(struct ppages* ppages) { bool reserved = false; - list_foreach(page_pool_list, struct page_pool, pool) - { - bool is_in_rgn = range_in_range(ppages->base, ppages->num_pages * PAGE_SIZE, - pool->base, pool->size * PAGE_SIZE); + list_foreach (page_pool_list, struct page_pool, pool) { + bool is_in_rgn = range_in_range(ppages->base, ppages->num_pages * PAGE_SIZE, pool->base, + pool->size * PAGE_SIZE); if (is_in_rgn) { reserved = mem_are_ppages_reserved_in_pool(pool, ppages); @@ -122,11 +119,13 @@ bool mem_are_ppages_reserved(struct ppages *ppages) return reserved; } -bool mem_reserve_ppool_ppages(struct page_pool *pool, struct ppages *ppages) +bool mem_reserve_ppool_ppages(struct page_pool* pool, struct ppages* ppages) { - bool is_in_rgn = range_in_range(ppages->base, ppages->num_pages * PAGE_SIZE, - pool->base, pool->size * PAGE_SIZE); - if (!is_in_rgn) return true; + bool is_in_rgn = range_in_range(ppages->base, ppages->num_pages * PAGE_SIZE, pool->base, + pool->size * PAGE_SIZE); + if (!is_in_rgn) { + return true; + } size_t pageoff = NUM_PAGES(ppages->base - pool->base); @@ -141,7 +140,7 @@ bool mem_reserve_ppool_ppages(struct page_pool *pool, struct ppages *ppages) return is_in_rgn && was_free; } -void *mem_alloc_page(size_t num_pages, enum AS_SEC sec, bool phys_aligned) +void* mem_alloc_page(size_t num_pages, enum AS_SEC sec, bool phys_aligned) { vaddr_t vpage = INVALID_VA; struct ppages ppages = mem_alloc_ppages(cpu()->as.colors, num_pages, phys_aligned); @@ -153,27 +152,29 @@ void *mem_alloc_page(size_t num_pages, enum AS_SEC sec, bool phys_aligned) return (void*)vpage; } -bool root_pool_set_up_bitmap(paddr_t load_addr, struct page_pool *root_pool) +bool root_pool_set_up_bitmap(paddr_t load_addr, struct page_pool* root_pool) { size_t image_size = (size_t)(&_image_end - &_image_start); size_t vm_image_size = (size_t)(&_vm_image_end - &_vm_image_start); size_t cpu_size = platform.cpu_num * mem_cpu_boot_alloc_size(); - size_t bitmap_num_pages = root_pool->size / (8 * PAGE_SIZE) + - ((root_pool->size % (8 * PAGE_SIZE) != 0) ? 1 : 0); - if (root_pool->size <= bitmap_num_pages) return false; + size_t bitmap_num_pages = + root_pool->size / (8 * PAGE_SIZE) + ((root_pool->size % (8 * PAGE_SIZE) != 0) ? 1 : 0); + if (root_pool->size <= bitmap_num_pages) { + return false; + } size_t bitmap_base = load_addr + image_size + vm_image_size + cpu_size; struct ppages bitmap_pp = mem_ppages_get(bitmap_base, bitmap_num_pages); - bitmap_t* root_bitmap = (bitmap_t*) - mem_alloc_map(&cpu()->as, SEC_HYP_GLOBAL, &bitmap_pp, INVALID_VA, bitmap_num_pages, PTE_HYP_FLAGS); + bitmap_t* root_bitmap = (bitmap_t*)mem_alloc_map(&cpu()->as, SEC_HYP_GLOBAL, &bitmap_pp, + INVALID_VA, bitmap_num_pages, PTE_HYP_FLAGS); root_pool->bitmap = root_bitmap; memset((void*)root_pool->bitmap, 0, bitmap_num_pages * PAGE_SIZE); return mem_reserve_ppool_ppages(root_pool, &bitmap_pp); } -bool pp_root_reserve_hyp_mem(paddr_t load_addr, struct page_pool *root_pool) +bool pp_root_reserve_hyp_mem(paddr_t load_addr, struct page_pool* root_pool) { size_t image_load_size = (size_t)(&_image_load_end - &_image_start); size_t image_noload_size = (size_t)(&_image_end - &_image_load_end); @@ -182,28 +183,24 @@ bool pp_root_reserve_hyp_mem(paddr_t load_addr, struct page_pool *root_pool) paddr_t image_noload_addr = load_addr + image_load_size + vm_image_size; paddr_t cpu_base_addr = image_noload_addr + image_noload_size; - struct ppages images_load_ppages = - mem_ppages_get(load_addr, NUM_PAGES(image_load_size)); + struct ppages images_load_ppages = mem_ppages_get(load_addr, NUM_PAGES(image_load_size)); struct ppages images_noload_ppages = mem_ppages_get(image_noload_addr, NUM_PAGES(image_noload_size)); - struct ppages cpu_ppages = - mem_ppages_get(cpu_base_addr, NUM_PAGES(cpu_size)); + struct ppages cpu_ppages = mem_ppages_get(cpu_base_addr, NUM_PAGES(cpu_size)); - bool image_load_reserved = - mem_reserve_ppool_ppages(root_pool, &images_load_ppages); - bool image_noload_reserved = - mem_reserve_ppool_ppages(root_pool, &images_noload_ppages); + bool image_load_reserved = mem_reserve_ppool_ppages(root_pool, &images_load_ppages); + bool image_noload_reserved = mem_reserve_ppool_ppages(root_pool, &images_noload_ppages); bool cpu_reserved = mem_reserve_ppool_ppages(root_pool, &cpu_ppages); return image_load_reserved && image_noload_reserved && cpu_reserved; } -static bool pp_root_init(paddr_t load_addr, struct mem_region *root_region) +static bool pp_root_init(paddr_t load_addr, struct mem_region* root_region) { - struct page_pool *root_pool = &root_region->page_pool; + struct page_pool* root_pool = &root_region->page_pool; root_pool->base = ALIGN(root_region->base, PAGE_SIZE); - root_pool->size = - root_region->size / PAGE_SIZE; /* TODO: what if not aligned? */ + root_pool->size = root_region->size / PAGE_SIZE; /* TODO: what if not + aligned? */ root_pool->free = root_pool->size; if (!root_pool_set_up_bitmap(load_addr, root_pool)) { @@ -217,26 +214,32 @@ static bool pp_root_init(paddr_t load_addr, struct mem_region *root_region) return true; } -static void pp_init(struct page_pool *pool, paddr_t base, size_t size) +static void pp_init(struct page_pool* pool, paddr_t base, size_t size) { struct ppages pages; - if (pool == NULL) return; + if (pool == NULL) { + return; + } memset((void*)pool, 0, sizeof(struct page_pool)); pool->base = ALIGN(base, PAGE_SIZE); pool->size = NUM_PAGES(size); - size_t bitmap_size = - pool->size / (8 * PAGE_SIZE) + !!(pool->size % (8 * PAGE_SIZE) != 0); + size_t bitmap_size = pool->size / (8 * PAGE_SIZE) + !!(pool->size % (8 * PAGE_SIZE) != 0); - if (size <= bitmap_size) return; + if (size <= bitmap_size) { + return; + } pages = mem_alloc_ppages(cpu()->as.colors, bitmap_size, false); - if (pages.num_pages != bitmap_size) return; + if (pages.num_pages != bitmap_size) { + return; + } - if ((pool->bitmap = (bitmap_t*)mem_alloc_map(&cpu()->as, SEC_HYP_GLOBAL, &pages, - INVALID_VA, bitmap_size, PTE_HYP_FLAGS)) == NULL) + if ((pool->bitmap = (bitmap_t*)mem_alloc_map(&cpu()->as, SEC_HYP_GLOBAL, &pages, INVALID_VA, + bitmap_size, PTE_HYP_FLAGS)) == NULL) { return; + } memset((void*)pool->bitmap, 0, bitmap_size * PAGE_SIZE); @@ -244,8 +247,8 @@ static void pp_init(struct page_pool *pool, paddr_t base, size_t size) pool->free = pool->size; } -bool mem_vm_img_in_phys_rgn(struct vm_config* vm_config) { - +bool mem_vm_img_in_phys_rgn(struct vm_config* vm_config) +{ bool img_in_rgn = false; for (size_t i = 0; i < vm_config->platform.region_num; i++) { @@ -264,21 +267,22 @@ bool mem_vm_img_in_phys_rgn(struct vm_config* vm_config) { return img_in_rgn; } -bool mem_reserve_physical_memory(struct page_pool *pool) +bool mem_reserve_physical_memory(struct page_pool* pool) { - if (pool == NULL) return false; + if (pool == NULL) { + return false; + } for (size_t i = 0; i < config.vmlist_size; i++) { - struct vm_config *vm_cfg = &config.vmlist[i]; + struct vm_config* vm_cfg = &config.vmlist[i]; size_t n_pg = NUM_PAGES(vm_cfg->image.size); struct ppages ppages = mem_ppages_get(vm_cfg->image.load_addr, n_pg); - // If the vm image is part of a statically allocated region of the same - // vm, we defer the reservation of this memory to when we reserve the - // physical region below. Note that this not allow partial overlaps. If the - // image must be entirely inside a statically allocated region, or - // completely outside of it. This avoid overcamplicating the reservation - // logic while still covering all the useful use cases. + // If the vm image is part of a statically allocated region of the same vm, we defer the + // reservation of this memory to when we reserve the physical region below. Note that this + // not allow partial overlaps. If the image must be entirely inside a statically allocated + // region, or completely outside of it. This avoid overcamplicating the reservation logic + // while still covering all the useful use cases. if (mem_vm_img_in_phys_rgn(vm_cfg)) { continue; } @@ -290,10 +294,10 @@ bool mem_reserve_physical_memory(struct page_pool *pool) /* for every vm config */ for (size_t i = 0; i < config.vmlist_size; i++) { - struct vm_config *vm_cfg = &config.vmlist[i]; + struct vm_config* vm_cfg = &config.vmlist[i]; /* for every mem region */ for (size_t j = 0; j < vm_cfg->platform.region_num; j++) { - struct vm_mem_region *reg = &vm_cfg->platform.regions[j]; + struct vm_mem_region* reg = &vm_cfg->platform.regions[j]; if (reg->place_phys) { size_t n_pg = NUM_PAGES(reg->size); struct ppages ppages = mem_ppages_get(reg->phys, n_pg); @@ -305,13 +309,13 @@ bool mem_reserve_physical_memory(struct page_pool *pool) } for (size_t i = 0; i < config.shmemlist_size; i++) { - struct shmem *shmem = &config.shmemlist[i]; - if(shmem->place_phys) { + struct shmem* shmem = &config.shmemlist[i]; + if (shmem->place_phys) { size_t n_pg = NUM_PAGES(shmem->size); struct ppages ppages = mem_ppages_get(shmem->phys, n_pg); if (!mem_reserve_ppool_ppages(pool, &ppages)) { return false; - } + } shmem->phys = ppages.base; } } @@ -319,12 +323,12 @@ bool mem_reserve_physical_memory(struct page_pool *pool) return true; } -bool mem_create_ppools(struct mem_region *root_mem_region) +bool mem_create_ppools(struct mem_region* root_mem_region) { for (size_t i = 0; i < platform.region_num; i++) { if (&platform.regions[i] != root_mem_region) { - struct mem_region *reg = &platform.regions[i]; - struct page_pool *pool = ®->page_pool; + struct mem_region* reg = &platform.regions[i]; + struct page_pool* pool = ®->page_pool; if (pool != NULL) { pp_init(pool, reg->base, reg->size); if (!mem_reserve_physical_memory(pool)) { @@ -338,16 +342,15 @@ bool mem_create_ppools(struct mem_region *root_mem_region) return true; } -struct mem_region *mem_find_root_region(paddr_t load_addr) +struct mem_region* mem_find_root_region(paddr_t load_addr) { size_t image_size = (size_t)(&_image_end - &_image_start); /* Find the root memory region in which the hypervisor was loaded. */ - struct mem_region *root_mem_region = NULL; + struct mem_region* root_mem_region = NULL; for (size_t i = 0; i < platform.region_num; i++) { - struct mem_region *region = &(platform.regions[i]); - bool is_in_rgn = - range_in_range(load_addr, image_size, region->base, region->size); + struct mem_region* region = &(platform.regions[i]); + bool is_in_rgn = range_in_range(load_addr, image_size, region->base, region->size); if (is_in_rgn) { root_mem_region = region; break; @@ -357,8 +360,7 @@ struct mem_region *mem_find_root_region(paddr_t load_addr) return root_mem_region; } -bool mem_setup_root_pool(paddr_t load_addr, - struct mem_region **root_mem_region) +bool mem_setup_root_pool(paddr_t load_addr, struct mem_region** root_mem_region) { *root_mem_region = mem_find_root_region(load_addr); if (*root_mem_region == NULL) { @@ -368,45 +370,46 @@ bool mem_setup_root_pool(paddr_t load_addr, return pp_root_init(load_addr, *root_mem_region); } -__attribute__((weak)) -void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_region) +__attribute__((weak)) void mem_color_hypervisor(const paddr_t load_addr, + struct mem_region* root_region) { - WARNING("Trying to color hypervisor, but implementation does not suuport it"); + WARNING("Trying to color hypervisor, but implementation does not suuport " + "it"); } -__attribute__((weak)) -bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, - size_t num_pages, mem_flags_t flags) { +__attribute__((weak)) bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, + size_t num_pages, mem_flags_t flags) +{ ERROR("Trying to recolor section but there is no coloring implementation"); } -__attribute__((weak)) -bool pp_alloc_clr(struct page_pool *pool, size_t num_pages, colormap_t colors, - struct ppages *ppages) +__attribute__((weak)) bool pp_alloc_clr(struct page_pool* pool, size_t num_pages, colormap_t colors, + struct ppages* ppages) { - ERROR("Trying to allocate colored pages but there is no coloring implementation"); + ERROR("Trying to allocate colored pages but there is no coloring " + "implementation"); } struct ppages mem_alloc_ppages(colormap_t colors, size_t num_pages, bool aligned) { - struct ppages pages = {.num_pages = 0}; - - list_foreach(page_pool_list, struct page_pool, pool) - { - bool ok = (!all_clrs(colors) && !aligned) - ? pp_alloc_clr(pool, num_pages, colors, &pages) - : pp_alloc(pool, num_pages, aligned, &pages); - if (ok) break; + struct ppages pages = { .num_pages = 0 }; + + list_foreach (page_pool_list, struct page_pool, pool) { + bool ok = (!all_clrs(colors) && !aligned) ? pp_alloc_clr(pool, num_pages, colors, &pages) : + pp_alloc(pool, num_pages, aligned, &pages); + if (ok) { + break; + } } return pages; } void mem_init(paddr_t load_addr) -{ +{ mem_prot_init(); - static struct mem_region *root_mem_region = NULL; + static struct mem_region* root_mem_region = NULL; if (cpu()->id == CPU_MASTER) { cache_enumerate(); diff --git a/src/core/mmu/inc/mem_prot/io.h b/src/core/mmu/inc/mem_prot/io.h index 7a2bc6c4c..fd88095ee 100644 --- a/src/core/mmu/inc/mem_prot/io.h +++ b/src/core/mmu/inc/mem_prot/io.h @@ -18,7 +18,7 @@ struct io_prot { /* Must be implemented by architecture. */ bool iommu_arch_init(); -bool iommu_arch_vm_init(struct vm *vm, const struct vm_config *config); -bool iommu_arch_vm_add_device(struct vm *vm, deviceid_t id); +bool iommu_arch_vm_init(struct vm* vm, const struct vm_config* config); +bool iommu_arch_vm_add_device(struct vm* vm, deviceid_t id); #endif /* MEM_PROT_IO_H */ diff --git a/src/core/mmu/inc/mem_prot/mem.h b/src/core/mmu/inc/mem_prot/mem.h index 579581294..ddf2c8a17 100644 --- a/src/core/mmu/inc/mem_prot/mem.h +++ b/src/core/mmu/inc/mem_prot/mem.h @@ -11,7 +11,7 @@ #include #include -#define HYP_ASID 0 +#define HYP_ASID 0 struct addr_space { struct page_table pt; enum AS_TYPE type; @@ -23,9 +23,7 @@ enum AS_SEC; typedef pte_t mem_flags_t; -void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, - pte_t* root_pt, colormap_t colors); -vaddr_t mem_alloc_vpage(struct addr_space* as, enum AS_SEC section, - vaddr_t at, size_t n); +void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt, colormap_t colors); +vaddr_t mem_alloc_vpage(struct addr_space* as, enum AS_SEC section, vaddr_t at, size_t n); #endif /* __MEM_PROT_H__ */ diff --git a/src/core/mmu/inc/mem_prot/vmm.h b/src/core/mmu/inc/mem_prot/vmm.h index 06cf4433c..e912f04ed 100644 --- a/src/core/mmu/inc/mem_prot/vmm.h +++ b/src/core/mmu/inc/mem_prot/vmm.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ diff --git a/src/core/mmu/io.c b/src/core/mmu/io.c index 449450e83..15b8f147f 100644 --- a/src/core/mmu/io.c +++ b/src/core/mmu/io.c @@ -17,23 +17,21 @@ void io_init() } /* Configure architecture dependent stuff. */ -bool io_vm_init(struct vm *vm, const struct vm_config *config) +bool io_vm_init(struct vm* vm, const struct vm_config* config) { return iommu_arch_vm_init(vm, config); } /* Allows vms to add devices to their address space. */ -bool io_vm_add_device(struct vm *vm, deviceid_t dev_id) +bool io_vm_add_device(struct vm* vm, deviceid_t dev_id) { bool res = false; /* - * If dev_id == 0 assume global mask includes - * the relevant devices for this VM. + * If dev_id == 0 assume global mask includes the relevant devices for this VM. * * Assume there's no device id = 0 */ if (dev_id != 0) { - /* Stream id is valid. Match this device with this VM specifically. */ res = iommu_arch_vm_add_device(vm, dev_id); } diff --git a/src/core/mmu/mem.c b/src/core/mmu/mem.c index 84dc9a4c2..1473d2ad7 100644 --- a/src/core/mmu/mem.c +++ b/src/core/mmu/mem.c @@ -16,15 +16,14 @@ #include #include -extern uint8_t _image_start, _image_load_end, _image_end, _dmem_phys_beg, - _dmem_beg, _cpu_private_beg, _cpu_private_end, _vm_beg, _vm_end, - _vm_image_start, _vm_image_end; +extern uint8_t _image_start, _image_load_end, _image_end, _dmem_phys_beg, _dmem_beg, + _cpu_private_beg, _cpu_private_end, _vm_beg, _vm_end, _vm_image_start, _vm_image_end; -void switch_space(struct cpu *, paddr_t); +void switch_space(struct cpu*, paddr_t); /** - * An important note about sections its that they must have diferent entries - * at the root page table. + * An important note about sections its that they must have diferent entries at the root page + * table. */ struct section { @@ -35,27 +34,27 @@ struct section { }; struct section hyp_secs[] = { - [SEC_HYP_GLOBAL] = {(vaddr_t)&_dmem_beg, (vaddr_t)&_cpu_private_beg - 1, true, - SPINLOCK_INITVAL}, - [SEC_HYP_IMAGE] = {(vaddr_t)&_image_start, (vaddr_t)&_image_end - 1, true, SPINLOCK_INITVAL}, - [SEC_HYP_PRIVATE] = {(vaddr_t)&_cpu_private_beg, (vaddr_t)&_cpu_private_end - 1, false, - SPINLOCK_INITVAL}, - [SEC_HYP_VM] = {(vaddr_t)&_vm_beg, (vaddr_t)&_vm_end - 1, true, SPINLOCK_INITVAL}, + [SEC_HYP_GLOBAL] = { (vaddr_t)&_dmem_beg, (vaddr_t)&_cpu_private_beg - 1, true, + SPINLOCK_INITVAL }, + [SEC_HYP_IMAGE] = { (vaddr_t)&_image_start, (vaddr_t)&_image_end - 1, true, SPINLOCK_INITVAL }, + [SEC_HYP_PRIVATE] = { (vaddr_t)&_cpu_private_beg, (vaddr_t)&_cpu_private_end - 1, false, + SPINLOCK_INITVAL }, + [SEC_HYP_VM] = { (vaddr_t)&_vm_beg, (vaddr_t)&_vm_end - 1, true, SPINLOCK_INITVAL }, }; -struct section vm_secs[] = { - [SEC_VM_ANY] = {0x0, MAX_VA, false, SPINLOCK_INITVAL}}; +struct section vm_secs[] = { [SEC_VM_ANY] = { 0x0, MAX_VA, false, SPINLOCK_INITVAL } }; struct { - struct section *sec; + struct section* sec; size_t sec_size; } sections[] = { - [AS_HYP] = {hyp_secs, sizeof(hyp_secs) / sizeof(struct section)}, - [AS_HYP_CPY] = {hyp_secs, sizeof(hyp_secs) / sizeof(struct section)}, - [AS_VM] = {vm_secs, sizeof(vm_secs) / sizeof(struct section)}, + [AS_HYP] = { hyp_secs, sizeof(hyp_secs) / sizeof(struct section) }, + [AS_HYP_CPY] = { hyp_secs, sizeof(hyp_secs) / sizeof(struct section) }, + [AS_VM] = { vm_secs, sizeof(vm_secs) / sizeof(struct section) }, }; -size_t mem_cpu_boot_alloc_size() { +size_t mem_cpu_boot_alloc_size() +{ size_t size = ALIGN(sizeof(struct cpu), PAGE_SIZE); for (size_t i = 0; i < cpu()->as.pt.dscr->lvls; i++) { size += ALIGN(pt_size(&cpu()->as.pt, i), PAGE_SIZE); @@ -68,16 +67,16 @@ static inline size_t pp_next_clr(paddr_t base, size_t from, colormap_t colors) size_t clr_offset = (base / PAGE_SIZE) % (COLOR_NUM * COLOR_SIZE); size_t index = from; - while (!((colors >> ((index + clr_offset) / COLOR_SIZE % COLOR_NUM)) & 1)) + while (!((colors >> ((index + clr_offset) / COLOR_SIZE % COLOR_NUM)) & 1)) { index++; + } return index; } -static void mem_free_ppages(struct ppages *ppages) +static void mem_free_ppages(struct ppages* ppages) { - list_foreach(page_pool_list, struct page_pool, pool) - { + list_foreach (page_pool_list, struct page_pool, pool) { spin_lock(&pool->lock); if (in_range(ppages->base, pool->base, pool->size * PAGE_SIZE)) { size_t index = (ppages->base - pool->base) / PAGE_SIZE; @@ -94,8 +93,7 @@ static void mem_free_ppages(struct ppages *ppages) } } -bool pp_alloc_clr(struct page_pool *pool, size_t n, colormap_t colors, - struct ppages *ppages) +bool pp_alloc_clr(struct page_pool* pool, size_t n, colormap_t colors, struct ppages* ppages) { size_t allocated = 0; @@ -108,16 +106,15 @@ bool pp_alloc_clr(struct page_pool *pool, size_t n, colormap_t colors, spin_lock(&pool->lock); /** - * Lets start the search at the first available color after the last - * known free position to the top of the pool. + * Lets start the search at the first available color after the last known free position to the + * top of the pool. */ size_t index = pp_next_clr(pool->base, pool->last, colors); size_t top = pool->size; /** - * Two iterations. One starting from the last known free page, - * other starting from the beggining of page pool to the start of the - * previous iteration. + * Two iterations. One starting from the last known free page, other starting from the + * beggining of page pool to the start of the previous iteration. */ for (size_t i = 0; i < 2 && !ok; i++) { while ((allocated < n) && (index < top)) { @@ -130,12 +127,10 @@ bool pp_alloc_clr(struct page_pool *pool, size_t n, colormap_t colors, first_index = index; /** - * Count the number of free pages contigous on the target - * color segement until n pages are found or we reach top page - * of the search. + * Count the number of free pages contigous on the target color segement until n pages + * are found or we reach top page of the search. */ - while ((index < top) && (bitmap_get(pool->bitmap, index) == 0) && - (allocated < n)) { + while ((index < top) && (bitmap_get(pool->bitmap, index) == 0) && (allocated < n)) { allocated++; index = pp_next_clr(pool->base, ++index, colors); } @@ -145,9 +140,8 @@ bool pp_alloc_clr(struct page_pool *pool, size_t n, colormap_t colors, if (allocated == n) { /** - * We've found n contigous free pages that fit the color pattern, - * Fill the output ppage arg, mark the pages as allocated and - * update page pool internal state. + * We've found n contigous free pages that fit the color pattern, Fill the output ppage + * arg, mark the pages as allocated and update page pool internal state. */ ppages->num_pages = n; ppages->base = pool->base + (first_index * PAGE_SIZE); @@ -161,9 +155,8 @@ bool pp_alloc_clr(struct page_pool *pool, size_t n, colormap_t colors, break; } else { /** - * If this is the first iteration, setup index and top to search - * from base of the page pool until the previous iteration start - * point + * If this is the first iteration, setup index and top to search from base of the page + * pool until the previous iteration start point */ index = 0; } @@ -174,12 +167,10 @@ bool pp_alloc_clr(struct page_pool *pool, size_t n, colormap_t colors, return ok; } - -static struct section *mem_find_sec(struct addr_space *as, vaddr_t va) +static struct section* mem_find_sec(struct addr_space* as, vaddr_t va) { for (size_t i = 0; i < sections[as->type].sec_size; i++) { - if ((va >= sections[as->type].sec[i].beg) && - (va <= sections[as->type].sec[i].end)) { + if ((va >= sections[as->type].sec[i].beg) && (va <= sections[as->type].sec[i].end)) { return §ions[as->type].sec[i]; } } @@ -187,40 +178,39 @@ static struct section *mem_find_sec(struct addr_space *as, vaddr_t va) return NULL; } -static inline bool pte_allocable(struct addr_space *as, pte_t *pte, size_t lvl, - size_t left, vaddr_t addr) +static inline bool pte_allocable(struct addr_space* as, pte_t* pte, size_t lvl, size_t left, + vaddr_t addr) { return (lvl == (as->pt.dscr->lvls - 1)) || - (pt_lvl_terminal(&as->pt, lvl) && !pte_valid(pte) && + (pt_lvl_terminal(&as->pt, lvl) && !pte_valid(pte) && (pt_lvlsize(&as->pt, lvl) <= (left * PAGE_SIZE)) && ((addr % pt_lvlsize(&as->pt, lvl)) == 0)); } -static inline pte_t *mem_alloc_pt(struct addr_space *as, pte_t *parent, size_t lvl, - vaddr_t addr) +static inline pte_t* mem_alloc_pt(struct addr_space* as, pte_t* parent, size_t lvl, vaddr_t addr) { /* Must have lock on as and va section to call */ size_t ptsize = NUM_PAGES(pt_size(&as->pt, lvl + 1)); struct ppages ppage = mem_alloc_ppages(as->colors, ptsize, ptsize > 1 ? true : false); - if (ppage.num_pages == 0) return NULL; + if (ppage.num_pages == 0) { + return NULL; + } pte_t pte_dflt_val = PTE_INVALID | (*parent & PTE_RSW_MSK); pte_set(parent, ppage.base, PTE_TABLE, PTE_HYP_FLAGS); fence_sync_write(); - pte_t *temp_pt = pt_get(&as->pt, lvl + 1, addr); + pte_t* temp_pt = pt_get(&as->pt, lvl + 1, addr); for (size_t i = 0; i < pt_nentries(&as->pt, lvl + 1); i++) { temp_pt[i] = pte_dflt_val; } return temp_pt; } -static inline bool pt_pte_mappable(struct addr_space *as, pte_t *pte, size_t lvl, - size_t left, vaddr_t vaddr, - paddr_t paddr) +static inline bool pt_pte_mappable(struct addr_space* as, pte_t* pte, size_t lvl, size_t left, + vaddr_t vaddr, paddr_t paddr) { - return !pte_valid(pte) && - (pt_lvlsize(&as->pt, lvl) <= (left * PAGE_SIZE)) && - (((size_t)vaddr % pt_lvlsize(&as->pt, lvl)) == 0) && - ((paddr % pt_lvlsize(&as->pt, lvl)) == 0); + return !pte_valid(pte) && (pt_lvlsize(&as->pt, lvl) <= (left * PAGE_SIZE)) && + (((size_t)vaddr % pt_lvlsize(&as->pt, lvl)) == 0) && + ((paddr % pt_lvlsize(&as->pt, lvl)) == 0); } static inline pte_type_t pt_page_type(struct page_table* pt, size_t lvl) @@ -228,7 +218,7 @@ static inline pte_type_t pt_page_type(struct page_table* pt, size_t lvl) return (lvl == pt->dscr->lvls - 1) ? PTE_PAGE : PTE_SUPERPAGE; } -static void mem_expand_pte(struct addr_space *as, vaddr_t va, size_t lvl) +static void mem_expand_pte(struct addr_space* as, vaddr_t va, size_t lvl) { /* Must have lock on as and va section to call */ @@ -237,37 +227,33 @@ static void mem_expand_pte(struct addr_space *as, vaddr_t va, size_t lvl) return; } - pte_t *pte = pt_get_pte(&as->pt, lvl, va); + pte_t* pte = pt_get_pte(&as->pt, lvl, va); /** - * only can expand if the pte exists and it isnt pointing to - * a next level table already. + * only can expand if the pte exists and it isnt pointing to a next level table already. */ if (pte != NULL && !pte_table(&as->pt, pte, lvl)) { - pte_t pte_val = *pte; // save the original pte + pte_t pte_val = *pte; // save the original pte bool rsv = pte_check_rsw(pte, PTE_RSW_RSRV); bool vld = pte_valid(pte); pte = mem_alloc_pt(as, pte, lvl, va); if (vld || rsv) { /** - * If this was valid before and it wasn't a table, it must - * have been a superpage, so fill the new expanded table to - * have the same mappings; + * If this was valid before and it wasn't a table, it must have been a superpage, so + * fill the new expanded table to have the same mappings; */ /** - * Invalidate the old TLB entries with superpage entries. - * This means that from now on to the end of the function, - * the original spaced mapped by the entry will be unmaped. - * Therefore this function cannot be call on the entry mapping - * hypervisor code or data used in it (including stack). + * Invalidate the old TLB entries with superpage entries. This means that from now on + * to the end of the function, the original spaced mapped by the entry will be unmaped. + * Therefore this function cannot be call on the entry mapping hypervisor code or data + * used in it (including stack). */ tlb_inv_va(&cpu()->as, va); /** - * Now traverse the new next level page table to replicate the - * original mapping. + * Now traverse the new next level page table to replicate the original mapping. */ lvl++; @@ -276,14 +262,14 @@ static void mem_expand_pte(struct addr_space *as, vaddr_t va, size_t lvl) size_t nentries = pt_nentries(&as->pt, lvl); size_t lvlsz = pt_lvlsize(&as->pt, lvl); pte_type_t type = pt_page_type(&as->pt, lvl); - pte_flags_t flags = - (as->type == AS_HYP ? PTE_HYP_FLAGS : PTE_VM_FLAGS); + pte_flags_t flags = (as->type == AS_HYP ? PTE_HYP_FLAGS : PTE_VM_FLAGS); while (entry < nentries) { - if (vld) + if (vld) { pte_set(pte, paddr, type, flags); - else if (rsv) + } else if (rsv) { pte_set_rsw(pte, PTE_RSW_RSRV); + } pte++; entry++; paddr += lvlsz; @@ -294,13 +280,13 @@ static void mem_expand_pte(struct addr_space *as, vaddr_t va, size_t lvl) } } -static void mem_inflate_pt(struct addr_space *as, vaddr_t va, size_t length) +static void mem_inflate_pt(struct addr_space* as, vaddr_t va, size_t length) { /* Must have lock on as and va section to call */ /** - * For each level in the pt, expand each entry in the specified range - * as a next level page table. + * For each level in the pt, expand each entry in the specified range as a next level page + * table. */ for (size_t lvl = 0; lvl < as->pt.dscr->lvls - 1; lvl++) { vaddr_t vaddr = va; @@ -312,8 +298,7 @@ static void mem_inflate_pt(struct addr_space *as, vaddr_t va, size_t length) } } -vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, - vaddr_t at, size_t n) +vaddr_t mem_alloc_vpage(struct addr_space* as, enum AS_SEC section, vaddr_t at, size_t n) { size_t lvl = 0; size_t entry = 0; @@ -323,13 +308,15 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, vaddr_t addr = INVALID_VA; vaddr_t vpage = INVALID_VA; vaddr_t top = MAX_VA; - pte_t *pte = NULL; + pte_t* pte = NULL; bool failed = false; // TODO: maybe some bound checking here would be nice - struct section *sec = §ions[as->type].sec[section]; + struct section* sec = §ions[as->type].sec[section]; if (at != INVALID_VA) { - if (sec != mem_find_sec(as, at)) return INVALID_VA; + if (sec != mem_find_sec(as, at)) { + return INVALID_VA; + } addr = at; } else { addr = sec->beg; @@ -339,16 +326,18 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, if (addr > top || !IS_ALIGNED(addr, PAGE_SIZE)) { return INVALID_VA; } - + spin_lock(&as->lock); - if (sec->shared) spin_lock(&sec->lock); + if (sec->shared) { + spin_lock(&sec->lock); + } while (count < n && !failed) { - // Check if there is still enough space in the address space. - // The corner case of top being the highest address in the address - // space and the target address being 0 is handled separate - size_t full_as = (addr == 0) && (top == MAX_VA); - if (!full_as && (((top+1-addr)/PAGE_SIZE) < n)) { + // Check if there is still enough space in the address space. The corner case of top being + // the highest address in the address space and the target address being 0 is handled + // separate + size_t full_as = (addr == 0) && (top == MAX_VA); + if (!full_as && (((top + 1 - addr) / PAGE_SIZE) < n)) { vpage = INVALID_VA; failed = true; break; @@ -360,17 +349,19 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, lvlsze = pt_lvlsize(&as->pt, lvl); while ((entry < nentries) && (count < n) && !failed) { - if(pte_check_rsw(pte, PTE_RSW_RSRV) || - (pte_valid(pte) && !pte_table(&as->pt, pte, lvl))) { + if (pte_check_rsw(pte, PTE_RSW_RSRV) || + (pte_valid(pte) && !pte_table(&as->pt, pte, lvl))) { count = 0; vpage = INVALID_VA; if (at != INVALID_VA) { failed = true; break; } - } else if(!pte_valid(pte)) { - if(pte_allocable(as, pte, lvl, n - count, addr)) { - if (count == 0) vpage = addr; + } else if (!pte_valid(pte)) { + if (pte_allocable(as, pte, lvl, n - count, addr)) { + if (count == 0) { + vpage = addr; + } count += (lvlsze / PAGE_SIZE); } else { if (mem_alloc_pt(as, pte, lvl, addr) == NULL) { @@ -379,7 +370,7 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, } } - if(pte_table(&as->pt, pte, lvl)) { + if (pte_table(&as->pt, pte, lvl)) { lvl++; break; } else { @@ -389,7 +380,7 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, lvl = 0; break; } - } + } } } @@ -401,7 +392,9 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, while (count < n) { for (lvl = 0; lvl < as->pt.dscr->lvls; lvl++) { pte = pt_get_pte(&as->pt, lvl, addr); - if (!pte_valid(pte)) break; + if (!pte_valid(pte)) { + break; + } } pte_set_rsw(pte, PTE_RSW_RSRV); addr += pt_lvlsize(&as->pt, lvl); @@ -409,15 +402,16 @@ vaddr_t mem_alloc_vpage(struct addr_space *as, enum AS_SEC section, } } - if (sec->shared) spin_unlock(&sec->lock); + if (sec->shared) { + spin_unlock(&sec->lock); + } spin_unlock(&as->lock); return vpage; } -void mem_unmap(struct addr_space *as, vaddr_t at, size_t num_pages, - bool free_ppages) +void mem_unmap(struct addr_space* as, vaddr_t at, size_t num_pages, bool free_ppages) { vaddr_t vaddr = at; vaddr_t top = at + (num_pages * PAGE_SIZE); @@ -425,11 +419,13 @@ void mem_unmap(struct addr_space *as, vaddr_t at, size_t num_pages, spin_lock(&as->lock); - struct section *sec = mem_find_sec(as, at); - if (sec->shared) spin_lock(&sec->lock); + struct section* sec = mem_find_sec(as, at); + if (sec->shared) { + spin_lock(&sec->lock); + } while (vaddr < top) { - pte_t *pte = pt_get_pte(&as->pt, lvl, vaddr); + pte_t* pte = pt_get_pte(&as->pt, lvl, vaddr); if (pte == NULL) { ERROR("invalid pte while freeing vpages"); } else if (!pte_valid(pte)) { @@ -454,8 +450,7 @@ void mem_unmap(struct addr_space *as, vaddr_t at, size_t num_pages, if (free_ppages) { paddr_t paddr = pte_addr(pte); - struct ppages ppages = - mem_ppages_get(paddr, lvlsz / PAGE_SIZE); + struct ppages ppages = mem_ppages_get(paddr, lvlsz / PAGE_SIZE); mem_free_ppages(&ppages); } @@ -475,35 +470,38 @@ void mem_unmap(struct addr_space *as, vaddr_t at, size_t num_pages, } /** - * TODO: check if the current pt is now empty and if so, - * free it too up to the root. + * TODO: check if the current pt is now empty and if so, free it too up to the root. */ } } - if (sec->shared) spin_unlock(&sec->lock); + if (sec->shared) { + spin_unlock(&sec->lock); + } spin_unlock(&as->lock); } -bool mem_map(struct addr_space *as, vaddr_t va, struct ppages *ppages, - size_t num_pages, mem_flags_t flags) +bool mem_map(struct addr_space* as, vaddr_t va, struct ppages* ppages, size_t num_pages, + mem_flags_t flags) { size_t count = 0; - pte_t *pte = NULL; + pte_t* pte = NULL; vaddr_t vaddr = va & ~(PAGE_SIZE - 1); - struct section *sec = mem_find_sec(as, vaddr); + struct section* sec = mem_find_sec(as, vaddr); - if ((sec == NULL) || (sec != mem_find_sec(as, vaddr + num_pages * PAGE_SIZE - 1))) + if ((sec == NULL) || (sec != mem_find_sec(as, vaddr + num_pages * PAGE_SIZE - 1))) { return false; + } spin_lock(&as->lock); - if (sec->shared) spin_lock(&sec->lock); + if (sec->shared) { + spin_lock(&sec->lock); + } /** - * TODO check if entry is reserved. Unrolling mapping if something - * goes wrong. + * TODO check if entry is reserved. Unrolling mapping if something goes wrong. */ struct ppages temp_ppages; @@ -533,8 +531,8 @@ bool mem_map(struct addr_space *as, vaddr_t va, struct ppages *ppages, for (lvl = 0; lvl < as->pt.dscr->lvls; lvl++) { pte = pt_get_pte(&as->pt, lvl, vaddr); if (pt_lvl_terminal(&as->pt, lvl)) { - if (pt_pte_mappable(as, pte, lvl, num_pages - count, - vaddr, ppages ? paddr : 0)) { + if (pt_pte_mappable(as, pte, lvl, num_pages - count, vaddr, + ppages ? paddr : 0)) { break; } else if (!pte_valid(pte)) { mem_alloc_pt(as, pte, lvl, vaddr); @@ -549,10 +547,9 @@ bool mem_map(struct addr_space *as, vaddr_t va, struct ppages *ppages, size_t lvlsz = pt_lvlsize(&as->pt, lvl); while ((entry < nentries) && (count < num_pages) && - (num_pages - count >= lvlsz / PAGE_SIZE)) { + (num_pages - count >= lvlsz / PAGE_SIZE)) { if (ppages == NULL) { - struct ppages temp = - mem_alloc_ppages(as->colors, lvlsz / PAGE_SIZE, true); + struct ppages temp = mem_alloc_ppages(as->colors, lvlsz / PAGE_SIZE, true); if (temp.num_pages < lvlsz / PAGE_SIZE) { if (lvl == (as->pt.dscr->lvls - 1)) { // TODO: free previously allocated pages @@ -588,37 +585,35 @@ bool mem_map(struct addr_space *as, vaddr_t va, struct ppages *ppages, return true; } -bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, - size_t num_pages, mem_flags_t flags) +bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, size_t num_pages, + mem_flags_t flags) { if (ppages == NULL) { ERROR("no indication on what to recolor"); } /** - * Count how many pages are not colored in original images. - * Allocate the necessary colored pages. - * Mapped onto hypervisor address space. + * Count how many pages are not colored in original images. Allocate the necessary colored + * pages. Mapped onto hypervisor address space. */ - size_t reclrd_num = - num_pages / (COLOR_NUM * COLOR_SIZE) * COLOR_SIZE * + size_t reclrd_num = num_pages / (COLOR_NUM * COLOR_SIZE) * COLOR_SIZE * bit_count(~(as->colors & BIT_MASK(0, COLOR_NUM))); size_t clr_offset = (ppages->base / PAGE_SIZE) % (COLOR_NUM * COLOR_SIZE); for (size_t i = 0; i < (num_pages % (COLOR_NUM * COLOR_SIZE)); i++) { - if (!bit_get(as->colors, (i + clr_offset) / COLOR_SIZE % COLOR_NUM)) + if (!bit_get(as->colors, (i + clr_offset) / COLOR_SIZE % COLOR_NUM)) { reclrd_num++; + } } - /** - * If the address space was not assigned any specific color, - * or there are no pages to recolor defer to vanilla mapping. + /** + * If the address space was not assigned any specific color, or there are no pages to recolor + * defer to vanilla mapping. */ if (all_clrs(as->colors) || (reclrd_num == 0)) { return mem_map(as, va, ppages, num_pages, flags); } - vaddr_t reclrd_va_base = - mem_alloc_vpage(&cpu()->as, SEC_HYP_VM, INVALID_VA, reclrd_num); + vaddr_t reclrd_va_base = mem_alloc_vpage(&cpu()->as, SEC_HYP_VM, INVALID_VA, reclrd_num); struct ppages reclrd_ppages = mem_alloc_ppages(as->colors, reclrd_num, false); mem_map(&cpu()->as, reclrd_va_base, &reclrd_ppages, reclrd_num, PTE_HYP_FLAGS); @@ -628,7 +623,7 @@ bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, vaddr_t phys_va_base = mem_alloc_vpage(&cpu()->as, SEC_HYP_VM, INVALID_VA, num_pages); mem_map(&cpu()->as, phys_va_base, ppages, num_pages, PTE_HYP_FLAGS); - pte_t *pte = NULL; + pte_t* pte = NULL; vaddr_t vaddr = va & ~(PAGE_SIZE - 1); paddr_t paddr = ppages->base; vaddr_t clrd_vaddr = reclrd_va_base; @@ -636,8 +631,8 @@ bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, size_t index = 0; /** - * Inflate reserved page tables to the last level. This assumes - * coloring always needs the finest grained mapping possible. + * Inflate reserved page tables to the last level. This assumes coloring always needs the + * finest grained mapping possible. */ mem_inflate_pt(as, vaddr, num_pages * PAGE_SIZE); @@ -645,8 +640,8 @@ bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, pte = pt_get_pte(&as->pt, as->pt.dscr->lvls - 1, vaddr); /** - * If image page is already color, just map it. - * Otherwise first copy it to the previously allocated pages. + * If image page is already color, just map it. Otherwise first copy it to the previously + * allocated pages. */ if (bit_get(as->colors, ((i + clr_offset) / COLOR_SIZE % COLOR_NUM))) { pte_set(pte, paddr, PTE_PAGE, flags); @@ -665,19 +660,17 @@ bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, } /** - * Flush the newly allocated colored pages to which parts of the - * image was copied, and might stayed in the cache system. + * Flush the newly allocated colored pages to which parts of the image was copied, and might + * stayed in the cache system. */ cache_flush_range(reclrd_va_base, reclrd_num * PAGE_SIZE); /** * Free the uncolored pages of the original image. */ - struct ppages unused_pages = { - .base = ppages->base, + struct ppages unused_pages = { .base = ppages->base, .num_pages = reclrd_num, - .colors = ~as->colors - }; + .colors = ~as->colors }; mem_free_ppages(&unused_pages); mem_unmap(&cpu()->as, reclrd_va_base, reclrd_num, false); @@ -686,9 +679,9 @@ bool mem_map_reclr(struct addr_space *as, vaddr_t va, struct ppages *ppages, return true; } -vaddr_t mem_map_cpy(struct addr_space *ass, struct addr_space *asd, vaddr_t vas, - vaddr_t vad, size_t num_pages) { - +vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas, vaddr_t vad, + size_t num_pages) +{ vaddr_t _vad = mem_alloc_vpage(asd, SEC_HYP_GLOBAL, vad, num_pages); size_t base_vad = _vad; size_t count = 0; @@ -696,14 +689,14 @@ vaddr_t mem_map_cpy(struct addr_space *ass, struct addr_space *asd, vaddr_t vas, while (count < num_pages) { size_t lvl = 0; - pte_t *pte = pt_get_pte(&ass->pt, lvl, vas); - while(!pte_page(&ass->pt, pte, lvl)) { + pte_t* pte = pt_get_pte(&ass->pt, lvl, vas); + while (!pte_page(&ass->pt, pte, lvl)) { lvl += 1; pte = pt_get_pte(&ass->pt, lvl, vas); } size_t lvl_size = pt_lvlsize(&ass->pt, lvl); size_t size = lvl_size; - if(to_map < lvl_size) { + if (to_map < lvl_size) { size = to_map; } size_t npages = NUM_PAGES(size); @@ -719,7 +712,7 @@ vaddr_t mem_map_cpy(struct addr_space *ass, struct addr_space *asd, vaddr_t vas, return base_vad; } -void *copy_space(void *base, const size_t size, struct ppages *pages) +void* copy_space(void* base, const size_t size, struct ppages* pages) { *pages = mem_alloc_ppages(cpu()->as.colors, NUM_PAGES(size), false); vaddr_t va = mem_alloc_vpage(&cpu()->as, SEC_HYP_PRIVATE, INVALID_VA, NUM_PAGES(size)); @@ -730,22 +723,22 @@ void *copy_space(void *base, const size_t size, struct ppages *pages) } /** - * To have the true benefits of coloring it's necessary that not only the guest - * images, but also the hypervisor itself, are colored. + * To have the true benefits of coloring it's necessary that not only the guest images, but also + * the hypervisor itself, are colored. * - * Bao is coloring itself by copying everything that has been allocated until - * this point in a new colored space, jumping into this new region and then - * then deleting all that was allocated before. + * Bao is coloring itself by copying everything that has been allocated until this point in a new + * colored space, jumping into this new region and then then deleting all that was allocated + * before. * - * Some regions need to be aligned due to some ARM restraint with the pagetable - * structure, so true coloring is actually never achieved. The drawbacks of - * this limitation are yet to be seen, and are in need of more testing. + * Some regions need to be aligned due to some ARM restraint with the pagetable structure, so true + * coloring is actually never achieved. The drawbacks of this limitation are yet to be seen, and + * are in need of more testing. */ -void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_region) +void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_region) { volatile static pte_t shared_pte; vaddr_t va = INVALID_VA; - struct cpu *cpu_new; + struct cpu* cpu_new; struct ppages p_cpu; struct ppages p_image; struct ppages p_bitmap; @@ -753,12 +746,12 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_regio size_t image_load_size = (size_t)(&_image_load_end - &_image_start); size_t image_noload_size = (size_t)(&_image_end - &_image_load_end); size_t image_size = image_load_size + image_noload_size; - size_t vm_image_size = (size_t)(&_vm_image_end - &_vm_image_start); + size_t vm_image_size = (size_t)(&_vm_image_end - &_vm_image_start); size_t cpu_boot_size = mem_cpu_boot_alloc_size(); - struct page_pool *root_pool = &root_region->page_pool; - size_t bitmap_size = (root_pool->size / (8 * PAGE_SIZE) + - !!(root_pool->size % (8 * PAGE_SIZE) != 0)) * - PAGE_SIZE; + struct page_pool* root_pool = &root_region->page_pool; + size_t bitmap_size = + (root_pool->size / (8 * PAGE_SIZE) + !!(root_pool->size % (8 * PAGE_SIZE) != 0)) * + PAGE_SIZE; colormap_t colors = config.hyp.colors; /* Set hypervisor colors in current address space */ @@ -767,119 +760,106 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_regio /* * Copy the CPU space into a colored region. * - * It's not possible to simply copy the CPU space as-is, since there are a - * few pointers and structures that would point to the old, non-colored - * data. + * It's not possible to simply copy the CPU space as-is, since there are a few pointers and + * structures that would point to the old, non-colored data. * * the new CPU region is created, cleaned, prepared and finally mapped. */ - cpu_new = copy_space((void *)BAO_CPU_BASE, sizeof(struct cpu), &p_cpu); + cpu_new = copy_space((void*)BAO_CPU_BASE, sizeof(struct cpu), &p_cpu); as_init(&cpu_new->as, AS_HYP_CPY, HYP_ASID, NULL, colors); - va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_PRIVATE, - (vaddr_t)BAO_CPU_BASE, - NUM_PAGES(sizeof(struct cpu))); - if (va != (vaddr_t)BAO_CPU_BASE) + va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_PRIVATE, (vaddr_t)BAO_CPU_BASE, + NUM_PAGES(sizeof(struct cpu))); + if (va != (vaddr_t)BAO_CPU_BASE) { ERROR("Can't allocate virtual address for cpuspace"); + } mem_map(&cpu_new->as, va, &p_cpu, NUM_PAGES(sizeof(struct cpu)), PTE_HYP_FLAGS); /* - * Also, map the root page table in the new address space and keep both the - * virtual address and physical address in local variables as they will be - * needed later to perform the address space switch and new address space - * initialization. + * Also, map the root page table in the new address space and keep both the virtual address and + * physical address in local variables as they will be needed later to perform the address + * space switch and new address space initialization. */ paddr_t p_root_pt_addr; vaddr_t v_root_pt_addr; size_t root_pt_num_pages = NUM_PAGES(pt_size(&cpu_new->as.pt, 0)); mem_translate(&cpu()->as, (vaddr_t)cpu_new->as.pt.root, &p_root_pt_addr); - v_root_pt_addr = mem_alloc_vpage(&cpu_new->as, SEC_HYP_PRIVATE, INVALID_VA, - root_pt_num_pages); + v_root_pt_addr = mem_alloc_vpage(&cpu_new->as, SEC_HYP_PRIVATE, INVALID_VA, root_pt_num_pages); if (va == INVALID_VA) { ERROR("Can't allocate virtuall address space for root page table"); } - struct ppages p_root_pt_pages = - mem_ppages_get(p_root_pt_addr, root_pt_num_pages); - mem_map(&cpu_new->as, v_root_pt_addr, &p_root_pt_pages, root_pt_num_pages, - PTE_HYP_FLAGS); + struct ppages p_root_pt_pages = mem_ppages_get(p_root_pt_addr, root_pt_num_pages); + mem_map(&cpu_new->as, v_root_pt_addr, &p_root_pt_pages, root_pt_num_pages, PTE_HYP_FLAGS); /* - * Copy the Hypervisor image and root page pool bitmap into a colored - * region. + * Copy the Hypervisor image and root page pool bitmap into a colored region. * - * CPU_MASTER allocates, copies and maps the image and the root page pool - * bitmap on a shared space, whilst other CPUs only have to copy the image - * from the CPU_MASTER in order to be able to access it. + * CPU_MASTER allocates, copies and maps the image and the root page pool bitmap on a shared + * space, whilst other CPUs only have to copy the image from the CPU_MASTER in order to be able + * to access it. */ if (cpu()->id == CPU_MASTER) { copy_space(&_image_start, image_size, &p_image); - va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_IMAGE, - (vaddr_t) &_image_start, NUM_PAGES(image_size)); + va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_IMAGE, (vaddr_t)&_image_start, + NUM_PAGES(image_size)); - if (va != (vaddr_t)&_image_start) + if (va != (vaddr_t)&_image_start) { ERROR("Can't allocate virtual address for Bao Image"); + } - mem_map(&cpu_new->as, va, &p_image, - NUM_PAGES(image_size), PTE_HYP_FLAGS); - shared_pte = pte_addr(pt_get_pte(&cpu_new->as.pt, 0, - (vaddr_t)&_image_start)); + mem_map(&cpu_new->as, va, &p_image, NUM_PAGES(image_size), PTE_HYP_FLAGS); + shared_pte = pte_addr(pt_get_pte(&cpu_new->as.pt, 0, (vaddr_t)&_image_start)); } else { - pte_t *image_pte = pt_get_pte(&cpu_new->as.pt, 0, - (vaddr_t)&_image_start); + pte_t* image_pte = pt_get_pte(&cpu_new->as.pt, 0, (vaddr_t)&_image_start); /* Wait for CPU_MASTER to get image page table entry */ - while (shared_pte == 0); + while (shared_pte == 0) { } pte_set(image_pte, (paddr_t)shared_pte, PTE_TABLE, PTE_HYP_FLAGS); } cpu_sync_barrier(&cpu_glb_sync); /* - * CPU_MASTER will also take care of mapping the configuration onto the new - * space. + * CPU_MASTER will also take care of mapping the configuration onto the new space. * - * The root page pool bitmap tracks all the physical allocation, so it - * needs to be the last thing to be copied, as after that, no physical - * allocation will be tracked. + * The root page pool bitmap tracks all the physical allocation, so it needs to be the last + * thing to be copied, as after that, no physical allocation will be tracked. */ if (cpu()->id == CPU_MASTER) { /* Copy root pool bitmap */ copy_space((void*)root_pool->bitmap, bitmap_size, &p_bitmap); - va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_GLOBAL, - (vaddr_t)root_pool->bitmap, - NUM_PAGES(bitmap_size)); + va = mem_alloc_vpage(&cpu_new->as, SEC_HYP_GLOBAL, (vaddr_t)root_pool->bitmap, + NUM_PAGES(bitmap_size)); - if (va != (vaddr_t)root_pool->bitmap) + if (va != (vaddr_t)root_pool->bitmap) { ERROR("Can't allocate address for cpu interface"); + } - mem_map(&cpu_new->as, va, &p_bitmap, - NUM_PAGES(bitmap_size), PTE_HYP_FLAGS); + mem_map(&cpu_new->as, va, &p_bitmap, NUM_PAGES(bitmap_size), PTE_HYP_FLAGS); } cpu_sync_barrier(&cpu_glb_sync); switch_space(cpu_new, p_root_pt_addr); /** - * Make sure the new physical pages containing image and cpu are flushed - * to main memmory + * Make sure the new physical pages containing image and cpu are flushed to main memmory */ cache_flush_range((vaddr_t)&_image_start, image_size); cache_flush_range((vaddr_t)&_cpu_private_beg, sizeof(struct cpu)); /** - * Bao's code from here's on still uses the static global variables, so - * they need to be updated. + * Bao's code from here's on still uses the static global variables, so they need to be + * updated. * - * The synchronization objects are in an inconsistent state, and they need - * to be re-initialized before they get used again, so CPUs need a way to - * communicate between themselves without an explicit barrier. To - * accomplish this a static global variable is used. + * The synchronization objects are in an inconsistent state, and they need to be re-initialized + * before they get used again, so CPUs need a way to communicate between themselves without an + * explicit barrier. To accomplish this a static global variable is used. */ if (cpu()->id == CPU_MASTER) { cpu_sync_init(&cpu_glb_sync, platform.cpu_num); shared_pte = 0; } else { - while (shared_pte != 0); + while (shared_pte != 0) { } } as_init(&cpu()->as, AS_HYP, HYP_ASID, (void*)v_root_pt_addr, colors); @@ -887,8 +867,8 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_regio /* * Clear the old region that have been copied. * - * CPU space regions and Hypervisor image region are contingent, starting - * from `load_addr`. The bitmap region is on top of the root pool region. + * CPU space regions and Hypervisor image region are contingent, starting from `load_addr`. The + * bitmap region is on top of the root pool region. */ if (cpu()->id == CPU_MASTER) { p_image = mem_ppages_get(load_addr, NUM_PAGES(image_load_size)); @@ -905,8 +885,8 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_regio mem_unmap(&cpu()->as, va, p_image.num_pages, true); p_bitmap = mem_ppages_get(load_addr + image_size + vm_image_size + - (cpu_boot_size * platform.cpu_num), - NUM_PAGES(bitmap_size)); + (cpu_boot_size * platform.cpu_num), + NUM_PAGES(bitmap_size)); va = mem_alloc_vpage(&cpu()->as, SEC_HYP_GLOBAL, INVALID_VA, p_bitmap.num_pages); mem_map(&cpu()->as, va, &p_bitmap, p_bitmap.num_pages, PTE_HYP_FLAGS); @@ -914,30 +894,26 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region *root_regio mem_unmap(&cpu()->as, va, p_bitmap.num_pages, true); } - p_cpu = mem_ppages_get( - load_addr + image_size + vm_image_size +(cpu_boot_size * cpu()->id), + p_cpu = mem_ppages_get(load_addr + image_size + vm_image_size + (cpu_boot_size * cpu()->id), cpu_boot_size / PAGE_SIZE); va = mem_alloc_vpage(&cpu()->as, SEC_HYP_PRIVATE, INVALID_VA, p_cpu.num_pages); - mem_map(&cpu()->as, va, &p_cpu,p_cpu.num_pages, PTE_HYP_FLAGS); - memset((void*)va, 0,p_cpu.num_pages * PAGE_SIZE); - mem_unmap(&cpu()->as, va,p_cpu.num_pages, false); + mem_map(&cpu()->as, va, &p_cpu, p_cpu.num_pages, PTE_HYP_FLAGS); + memset((void*)va, 0, p_cpu.num_pages * PAGE_SIZE); + mem_unmap(&cpu()->as, va, p_cpu.num_pages, false); } -void as_init(struct addr_space *as, enum AS_TYPE type, asid_t id, - pte_t *root_pt, colormap_t colors) +void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt, colormap_t colors) { as->type = type; - as->pt.dscr = - type == AS_HYP || type == AS_HYP_CPY ? hyp_pt_dscr : vm_pt_dscr; + as->pt.dscr = type == AS_HYP || type == AS_HYP_CPY ? hyp_pt_dscr : vm_pt_dscr; as->colors = colors; as->lock = SPINLOCK_INITVAL; as->id = id; if (root_pt == NULL) { size_t n = NUM_PAGES(pt_size(&as->pt, 0)); - root_pt = (pte_t*) mem_alloc_page(n, - type == AS_HYP || type == AS_HYP_CPY ? SEC_HYP_PRIVATE : SEC_HYP_VM, - true); + root_pt = (pte_t*)mem_alloc_page(n, + type == AS_HYP || type == AS_HYP_CPY ? SEC_HYP_PRIVATE : SEC_HYP_VM, true); memset((void*)root_pt, 0, n * PAGE_SIZE); } as->pt.root = root_pt; @@ -945,14 +921,14 @@ void as_init(struct addr_space *as, enum AS_TYPE type, asid_t id, as_arch_init(as); } -void mem_prot_init() { - pte_t* root_pt = (pte_t*) - ALIGN(((vaddr_t)cpu()) + sizeof(struct cpu), PAGE_SIZE); +void mem_prot_init() +{ + pte_t* root_pt = (pte_t*)ALIGN(((vaddr_t)cpu()) + sizeof(struct cpu), PAGE_SIZE); as_init(&cpu()->as, AS_HYP, HYP_ASID, root_pt, config.hyp.colors); } -vaddr_t mem_alloc_map(struct addr_space* as, enum AS_SEC section, struct ppages *page, - vaddr_t at, size_t num_pages, mem_flags_t flags) +vaddr_t mem_alloc_map(struct addr_space* as, enum AS_SEC section, struct ppages* page, vaddr_t at, + size_t num_pages, mem_flags_t flags) { vaddr_t address = mem_alloc_vpage(as, section, at, num_pages); if (address != INVALID_VA) { @@ -961,14 +937,13 @@ vaddr_t mem_alloc_map(struct addr_space* as, enum AS_SEC section, struct ppages return address; } -vaddr_t mem_alloc_map_dev(struct addr_space* as, enum AS_SEC section, - vaddr_t at, paddr_t pa, size_t num_pages) +vaddr_t mem_alloc_map_dev(struct addr_space* as, enum AS_SEC section, vaddr_t at, paddr_t pa, + size_t num_pages) { vaddr_t address = mem_alloc_vpage(as, section, at, num_pages); if (address != INVALID_VA) { struct ppages pages = mem_ppages_get(pa, num_pages); - mem_flags_t flags = - as->type == AS_HYP ? PTE_HYP_DEV_FLAGS : PTE_VM_DEV_FLAGS; + mem_flags_t flags = as->type == AS_HYP ? PTE_HYP_DEV_FLAGS : PTE_VM_DEV_FLAGS; mem_map(as, address, &pages, num_pages, flags); } diff --git a/src/core/mmu/vm.c b/src/core/mmu/vm.c index 03230db76..9ad204e7a 100644 --- a/src/core/mmu/vm.c +++ b/src/core/mmu/vm.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -8,6 +8,7 @@ #include #include -void vm_mem_prot_init(struct vm* vm, const struct vm_config* config) { +void vm_mem_prot_init(struct vm* vm, const struct vm_config* config) +{ as_init(&vm->as, AS_VM, vm->id, NULL, config->colors); } diff --git a/src/core/mmu/vmm.c b/src/core/mmu/vmm.c index 017f7f515..ec25d663b 100644 --- a/src/core/mmu/vmm.c +++ b/src/core/mmu/vmm.c @@ -6,11 +6,13 @@ #include #include -void vmm_io_init() { +void vmm_io_init() +{ io_init(); } -struct vm_install_info vmm_get_vm_install_info(struct vm_allocation* vm_alloc) { +struct vm_install_info vmm_get_vm_install_info(struct vm_allocation* vm_alloc) +{ struct vm_install_info info = { .base = vm_alloc->base, .vm_section_pte = *pt_get_pte(&cpu()->as.pt, 0, vm_alloc->base), @@ -18,7 +20,8 @@ struct vm_install_info vmm_get_vm_install_info(struct vm_allocation* vm_alloc) { return info; } -void vmm_vm_install(struct vm_install_info *install_info) { +void vmm_vm_install(struct vm_install_info* install_info) +{ pte_t* pte = pt_get_pte(&cpu()->as.pt, 0, (vaddr_t)install_info->base); *pte = install_info->vm_section_pte; } diff --git a/src/core/mpu/config.c b/src/core/mpu/config.c index c9c29dad0..231830288 100644 --- a/src/core/mpu/config.c +++ b/src/core/mpu/config.c @@ -6,14 +6,12 @@ #include #include -void config_mem_prot_init(paddr_t load_addr) { - - for (size_t i = 0; i < config.vmlist_size; i++){ - +void config_mem_prot_init(paddr_t load_addr) +{ + for (size_t i = 0; i < config.vmlist_size; i++) { for (size_t j = 0; j < config.vmlist[i].platform.region_num; j++) { /** - * On MPU systems all VM regions must be physical regions with - * 1-to-1 mapping. + * On MPU systems all VM regions must be physical regions with 1-to-1 mapping. */ config.vmlist[i].platform.regions[j].place_phys = true; vaddr_t region_base = config.vmlist[i].platform.regions[j].base; @@ -22,8 +20,8 @@ void config_mem_prot_init(paddr_t load_addr) { for (size_t j = 0; j < config.vmlist[i].platform.ipc_num; j++) { /** - * In MPU-based systems, the address of the VM's IPC object and - * the used must follow a 1-1 mapping. + * In MPU-based systems, the address of the VM's IPC object and the used must follow a + * 1-1 mapping. */ size_t shmem_id = config.vmlist[i].platform.ipcs[j].shmem_id; vaddr_t ipc_base_addr = config.vmlist[i].platform.ipcs[j].base; @@ -32,15 +30,12 @@ void config_mem_prot_init(paddr_t load_addr) { ERROR("IPC base addr must be the same as its shmem base addr."); } } - } - for (size_t i = 0; i < config.shmemlist_size; i++){ + for (size_t i = 0; i < config.shmemlist_size; i++) { /** - * On MPU systems all shared memory regions must be physical - * regions with 1-to-1 mapping. + * On MPU systems all shared memory regions must be physical regions with 1-to-1 mapping. */ config.shmemlist[i].place_phys = true; } - } diff --git a/src/core/mpu/inc/mem_prot/io.h b/src/core/mpu/inc/mem_prot/io.h index 36e08c92e..bb997ad06 100644 --- a/src/core/mpu/inc/mem_prot/io.h +++ b/src/core/mpu/inc/mem_prot/io.h @@ -1,13 +1,11 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ #ifndef MEM_PROT_IO_H #define MEM_PROT_IO_H -struct io_prot { - -}; +struct io_prot { }; #endif /* MEM_PROT_IO_H */ diff --git a/src/core/mpu/inc/mem_prot/mem.h b/src/core/mpu/inc/mem_prot/mem.h index aae472010..e8b7b83db 100644 --- a/src/core/mpu/inc/mem_prot/mem.h +++ b/src/core/mpu/inc/mem_prot/mem.h @@ -11,8 +11,8 @@ #include #include -#define HYP_ASID 0 -#define VMPU_NUM_ENTRIES 64 +#define HYP_ASID 0 +#define VMPU_NUM_ENTRIES 64 struct mp_region { vaddr_t base; @@ -32,24 +32,22 @@ struct addr_space { spinlock_t lock; }; -void as_init(struct addr_space *as, enum AS_TYPE type, asid_t id, colormap_t colors); +void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, colormap_t colors); -static inline bool mem_regions_overlap(struct mp_region *reg1, - struct mp_region *reg2) +static inline bool mem_regions_overlap(struct mp_region* reg1, struct mp_region* reg2) { return range_in_range(reg1->base, reg1->size, reg2->base, reg2->size); } /** - * This functions must be defined for the physical MPU. The abstraction provided - * by the physical MPU layer is minimal. Besides initialization: - * i) It must provide the view of a separate physical MPU for each privilege; - * ii) It must allow the mapping and unmapping of regions on these MPUs, - * returning a binary return success value. + * This functions must be defined for the physical MPU. The abstraction provided by the physical + * MPU layer is minimal. Besides initialization: + * i) It must provide the view of a separate physical MPU for each privilege; + * ii) It must allow the mapping and unmapping of regions on these MPUs,returning a binary return + * success value. */ void mpu_init(); bool mpu_map(priv_t priv, struct mp_region* mem); bool mpu_unmap(priv_t priv, struct mp_region* mem); - #endif /* __MEM_PROT_H__ */ diff --git a/src/core/mpu/io.c b/src/core/mpu/io.c index 8a9dbdbc3..8526c5544 100644 --- a/src/core/mpu/io.c +++ b/src/core/mpu/io.c @@ -11,12 +11,12 @@ void io_init() return; } -bool io_vm_init(struct vm *vm, const struct vm_config *config) +bool io_vm_init(struct vm* vm, const struct vm_config* config) { return true; } -bool io_vm_add_device(struct vm *vm, deviceid_t dev_id) +bool io_vm_add_device(struct vm* vm, deviceid_t dev_id) { return true; } diff --git a/src/core/mpu/mem.c b/src/core/mpu/mem.c index ae8527d14..96981a612 100644 --- a/src/core/mpu/mem.c +++ b/src/core/mpu/mem.c @@ -20,8 +20,8 @@ struct shared_region { }; void mem_handle_broadcast_region(uint32_t event, uint64_t data); -bool mem_map(struct addr_space *as, struct mp_region *mpr, bool broadcast); -bool mem_unmap_range(struct addr_space *as, vaddr_t vaddr, size_t size, bool broadcast); +bool mem_map(struct addr_space* as, struct mp_region* mpr, bool broadcast); +bool mem_unmap_range(struct addr_space* as, vaddr_t vaddr, size_t size, bool broadcast); enum { MEM_INSERT_REGION, MEM_REMOVE_REGION }; @@ -39,9 +39,9 @@ static inline struct mpe* mem_vmpu_get_entry(struct addr_space* as, mpid_t mpid) return NULL; } -void mem_vmpu_set_entry(struct addr_space *as, mpid_t mpid, struct mp_region *mpr) +void mem_vmpu_set_entry(struct addr_space* as, mpid_t mpid, struct mp_region* mpr) { - struct mpe *mpe = mem_vmpu_get_entry(as, mpid); + struct mpe* mpe = mem_vmpu_get_entry(as, mpid); mpe->region.base = mpr->base; mpe->region.size = mpr->size; @@ -50,7 +50,7 @@ void mem_vmpu_set_entry(struct addr_space *as, mpid_t mpid, struct mp_region *mp mpe->state = MPE_S_VALID; } -void mem_vmpu_clear_entry(struct addr_space *as, mpid_t mpid) +void mem_vmpu_clear_entry(struct addr_space* as, mpid_t mpid) { struct mpe* mpe = mem_vmpu_get_entry(as, mpid); @@ -61,14 +61,14 @@ void mem_vmpu_clear_entry(struct addr_space *as, mpid_t mpid) mpe->state = MPE_S_INVALID; } -void mem_vmpu_free_entry(struct addr_space *as, mpid_t mpid) +void mem_vmpu_free_entry(struct addr_space* as, mpid_t mpid) { mem_vmpu_clear_entry(as, mpid); struct mpe* mpe = mem_vmpu_get_entry(as, mpid); mpe->state = MPE_S_FREE; } -mpid_t mem_vmpu_allocate_entry(struct addr_space *as) +mpid_t mem_vmpu_allocate_entry(struct addr_space* as) { mpid_t mpid = INVALID_MPID; @@ -89,24 +89,23 @@ mpid_t mem_vmpu_get_entry_by_addr(struct addr_space* as, vaddr_t addr) mpid_t mpid = INVALID_MPID; for (mpid_t i = 0; i < VMPU_NUM_ENTRIES; i++) { - - struct mpe *mpe = mem_vmpu_get_entry(as, i); + struct mpe* mpe = mem_vmpu_get_entry(as, i); if (mpe->state != MPE_S_VALID) { continue; } vaddr_t limit_addr = mpe->region.base + mpe->region.size; - if((addr >= mpe->region.base) && (addr < limit_addr)) { - mpid = i; - break; - } + if ((addr >= mpe->region.base) && (addr < limit_addr)) { + mpid = i; + break; + } } return mpid; } -static inline priv_t as_priv(struct addr_space *as) +static inline priv_t as_priv(struct addr_space* as) { priv_t priv; @@ -128,12 +127,11 @@ static inline priv_t as_priv(struct addr_space *as) static void as_init_boot_regions() { - /** * Add hypervisor mpu entries set up during boot to the vmpu: * - hypervisor image (loadable and non-loadable) * - private cpu region - */ + */ extern uint8_t _image_start, _image_load_end, _image_noload_start, _image_end; vaddr_t image_start = (vaddr_t)&_image_start; @@ -147,9 +145,9 @@ static void as_init_boot_regions() bool separate_noload_region = image_load_end != image_noload_start; vaddr_t first_region_end = separate_noload_region ? image_load_end : image_end; - mpr = (struct mp_region) { + mpr = (struct mp_region){ .base = image_start, - .size = (size_t) (first_region_end - image_start), + .size = (size_t)(first_region_end - image_start), .mem_flags = PTE_HYP_FLAGS, .as_sec = SEC_HYP_IMAGE, }; @@ -157,9 +155,9 @@ static void as_init_boot_regions() mpid++; if (separate_noload_region) { - mpr = (struct mp_region) { + mpr = (struct mp_region){ .base = image_noload_start, - .size = (size_t) image_end - image_noload_start, + .size = (size_t)image_end - image_noload_start, .mem_flags = PTE_HYP_FLAGS, .as_sec = SEC_HYP_IMAGE, }; @@ -167,9 +165,9 @@ static void as_init_boot_regions() mpid++; } - mpr = (struct mp_region) { + mpr = (struct mp_region){ .base = (vaddr_t)cpu(), - .size = ALIGN(sizeof(struct cpu), PAGE_SIZE), + .size = ALIGN(sizeof(struct cpu), PAGE_SIZE), .mem_flags = PTE_HYP_FLAGS, .as_sec = SEC_HYP_PRIVATE, }; @@ -190,22 +188,21 @@ size_t mem_cpu_boot_alloc_size() return size; } -void as_init(struct addr_space *as, enum AS_TYPE type, asid_t id, colormap_t colors) +void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, colormap_t colors) { as->type = type; as->colors = 0; as->id = id; as_arch_init(as); - for(size_t i=0; ilock); if (in_range(ppages->base, pool->base, pool->size * PAGE_SIZE)) { size_t index = (ppages->base - pool->base) / PAGE_SIZE; @@ -221,18 +218,17 @@ void mem_msg_handler(uint32_t event, uint64_t data) } CPU_MSG_HANDLER(mem_msg_handler, MEM_PROT_SYNC); -static cpumap_t mem_section_shared_cpus(struct addr_space *as, as_sec_t section) +static cpumap_t mem_section_shared_cpus(struct addr_space* as, as_sec_t section) { cpumap_t cpus = 0; if (as->type == AS_HYP) { - if ((section == SEC_HYP_GLOBAL) || (section == SEC_HYP_IMAGE)) { + if ((section == SEC_HYP_GLOBAL) || (section == SEC_HYP_IMAGE)) { cpus = BIT_MASK(0, PLAT_CPU_NUM); } else if (section == SEC_HYP_VM) { /** - * If we don't have a valid vcpu at this point, it means we are - * creating this region before even having a vm. Therefore, the - * sharing of the region must be guaranteed by other means (e.g. - * vmm_vm_install) + * If we don't have a valid vcpu at this point, it means we are creating this region + * before even having a vm. Therefore, the sharing of the region must be guaranteed by + * other means (e.g. vmm_vm_install) */ if (cpu()->vcpu != NULL) { cpus = cpu()->vcpu->vm->cpus; @@ -245,11 +241,11 @@ static cpumap_t mem_section_shared_cpus(struct addr_space *as, as_sec_t section) return cpus; } -void mem_region_broadcast(struct addr_space *as, struct mp_region *mpr, uint32_t op) +void mem_region_broadcast(struct addr_space* as, struct mp_region* mpr, uint32_t op) { cpumap_t shared_cpus = mem_section_shared_cpus(as, mpr->as_sec); - if(shared_cpus == 0) { + if (shared_cpus == 0) { return; } @@ -261,19 +257,19 @@ void mem_region_broadcast(struct addr_space *as, struct mp_region *mpr, uint32_t for (cpuid_t cpuid = 0; cpuid < PLAT_CPU_NUM; cpuid++) { if ((cpu()->id != cpuid) && bit_get(shared_cpus, cpuid)) { - struct shared_region *node = objpool_alloc(&shared_region_pool); + struct shared_region* node = objpool_alloc(&shared_region_pool); if (node == NULL) { ERROR("Failed allocating shared region node"); } *node = shared_region; - struct cpu_msg msg = {MEM_PROT_SYNC, op, (uintptr_t) node}; + struct cpu_msg msg = { MEM_PROT_SYNC, op, (uintptr_t)node }; cpu_send_msg(cpuid, &msg); } } } -bool mem_vmpu_insert_region(struct addr_space *as, mpid_t mpid, - struct mp_region *mpr, bool broadcast) +bool mem_vmpu_insert_region(struct addr_space* as, mpid_t mpid, struct mp_region* mpr, + bool broadcast) { if (mpid == INVALID_MPID) { return false; @@ -294,7 +290,7 @@ bool mem_vmpu_remove_region(struct addr_space* as, mpid_t mpid, bool broadcast) { bool removed = false; - struct mpe *mpe = mem_vmpu_get_entry(as, mpid); + struct mpe* mpe = mem_vmpu_get_entry(as, mpid); if ((mpe != NULL) && (mpe->state == MPE_S_VALID)) { if (broadcast) { @@ -308,7 +304,7 @@ bool mem_vmpu_remove_region(struct addr_space* as, mpid_t mpid, bool broadcast) return removed; } -void mem_handle_broadcast_insert(struct addr_space *as, struct mp_region *mpr) +void mem_handle_broadcast_insert(struct addr_space* as, struct mp_region* mpr) { if (as->type == AS_HYP) { mem_map(&cpu()->as, mpr, false); @@ -317,7 +313,7 @@ void mem_handle_broadcast_insert(struct addr_space *as, struct mp_region *mpr) } } -void mem_handle_broadcast_remove(struct addr_space *as, struct mp_region *mpr) +void mem_handle_broadcast_remove(struct addr_space* as, struct mp_region* mpr) { if (as->type == AS_HYP) { mem_unmap_range(&cpu()->as, mpr->base, mpr->size, false); @@ -328,21 +324,21 @@ void mem_handle_broadcast_remove(struct addr_space *as, struct mp_region *mpr) void mem_handle_broadcast_region(uint32_t event, uint64_t data) { - struct shared_region* sh_reg = (struct shared_region*) (uintptr_t)data; + struct shared_region* sh_reg = (struct shared_region*)(uintptr_t)data; - if(sh_reg != NULL) { - struct addr_space *as; + if (sh_reg != NULL) { + struct addr_space* as; if (sh_reg->as_type == AS_HYP) { as = &cpu()->as; } else { - struct addr_space *vm_as = &cpu()->vcpu->vm->as; + struct addr_space* vm_as = &cpu()->vcpu->vm->as; if (vm_as->id != sh_reg->asid) { ERROR("Received shared region for unkown vm address space."); } as = vm_as; } - switch(event){ + switch (event) { case MEM_INSERT_REGION: mem_handle_broadcast_insert(as, &sh_reg->region); break; @@ -357,13 +353,11 @@ void mem_handle_broadcast_region(uint32_t event, uint64_t data) } } -mpid_t mem_vmpu_find_overlapping_region(struct addr_space *as, - struct mp_region *region) +mpid_t mem_vmpu_find_overlapping_region(struct addr_space* as, struct mp_region* region) { mpid_t mpid = INVALID_MPID; for (mpid_t i = 0; i < VMPU_NUM_ENTRIES; i++) { - struct mpe* mpe = mem_vmpu_get_entry(as, i); if (mpe->state != MPE_S_VALID) { @@ -374,13 +368,12 @@ mpid_t mem_vmpu_find_overlapping_region(struct addr_space *as, mpid = i; break; } - } return mpid; } -bool mem_map(struct addr_space *as, struct mp_region *mpr, bool broadcast) +bool mem_map(struct addr_space* as, struct mp_region* mpr, bool broadcast) { bool mapped = false; @@ -389,18 +382,18 @@ bool mem_map(struct addr_space *as, struct mp_region *mpr, bool broadcast) } if ((mpr->size % mpu_granularity()) != 0) { - ERROR("trying to set mpu region which is not a multiple of granularity"); + ERROR("trying to set mpu region which is not a multiple of " + "granularity"); } spin_lock(&as->lock); - + if (mem_vmpu_find_overlapping_region(as, mpr) == INVALID_MPID) { // TODO: check if it possible to merge with another region mpid_t mpid = mem_vmpu_allocate_entry(as); if (mpid != INVALID_MPID) { mapped = mem_vmpu_insert_region(as, mpid, mpr, broadcast); } - } spin_unlock(&as->lock); @@ -408,29 +401,27 @@ bool mem_map(struct addr_space *as, struct mp_region *mpr, bool broadcast) return mapped; } -bool mem_unmap_range(struct addr_space *as, vaddr_t vaddr, size_t size, bool broadcast) +bool mem_unmap_range(struct addr_space* as, vaddr_t vaddr, size_t size, bool broadcast) { spin_lock(&as->lock); size_t size_left = size; while (size_left > 0) { - - struct mp_region reg = (struct mp_region) { + struct mp_region reg = (struct mp_region){ reg.base = vaddr, reg.size = size, }; mpid_t mpid = mem_vmpu_find_overlapping_region(as, ®); if (mpid == INVALID_MPID) { /** - * FIXME: right now we are ignoring the fact that the range or - * parts of it might not be mapped. This is in line to what the MMU - * mem_unmap function does. We should change this to only go ahead - * with the unpamming if the full range is indeed mapped. - */ + * FIXME: right now we are ignoring the fact that the range or parts of it might not + * be mapped. This is in line to what the MMU mem_unmap function does. We should change + * this to only go ahead with the unpamming if the full range is indeed mapped. + */ break; } - struct mpe *mpe = mem_vmpu_get_entry(as, mpid); + struct mpe* mpe = mem_vmpu_get_entry(as, mpid); reg = mpe->region; vaddr_t limit = vaddr + size; @@ -466,9 +457,7 @@ bool mem_unmap_range(struct addr_space *as, vaddr_t vaddr, size_t size, bool bro return size_left == 0; } - -void mem_unmap(struct addr_space *as, vaddr_t at, size_t num_pages, - bool free_ppages) +void mem_unmap(struct addr_space* as, vaddr_t at, size_t num_pages, bool free_ppages) { if (mem_unmap_range(as, at, num_pages * PAGE_SIZE, true) && free_ppages) { struct ppages ppages = mem_ppages_get(at, num_pages); @@ -476,18 +465,17 @@ void mem_unmap(struct addr_space *as, vaddr_t at, size_t num_pages, } } -vaddr_t mem_map_cpy(struct addr_space *ass, struct addr_space *asd, vaddr_t vas, - vaddr_t vad, size_t num_pages) +vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas, vaddr_t vad, + size_t num_pages) { - struct mpe *mpe; + struct mpe* mpe; struct mp_region mpr; vaddr_t va_res = INVALID_VA; if ((ass != asd) && (vad == INVALID_VA || vad == vas)) { - // In mpu-based systems, we can only copy mappings between address - // spaces, as copying a mapping in a single address space would overlap - // the orignal mapping. Also because only identify mappings are - // supported, the source va must equal the destination va, or be an + // In mpu-based systems, we can only copy mappings between address spaces, as copying a + // mapping in a single address space would overlap the orignal mapping. Also because only + // identify mappings are supported, the source va must equal the destination va, or be an // invalid va. This still covers the most useful uses cases. spin_lock(&ass->lock); @@ -518,22 +506,22 @@ bool mem_translate(struct addr_space* as, vaddr_t va, paddr_t* pa) } } -vaddr_t mem_alloc_map(struct addr_space* as, as_sec_t section, - struct ppages *ppages, vaddr_t at, size_t num_pages, mem_flags_t flags) +vaddr_t mem_alloc_map(struct addr_space* as, as_sec_t section, struct ppages* ppages, vaddr_t at, + size_t num_pages, mem_flags_t flags) { // TODO: Check if page->base, page->size and vaddr_t at are page_size align? struct ppages temp_ppages; if (at == INVALID_VA && ppages == NULL) { - ERROR ("Can't map an MPU region because neither the virtual" - "or phsyical address was specified."); - } - + ERROR("Can't map an MPU region because neither the virtual" + "or phsyical address was specified."); + } + if (at != INVALID_VA && ppages != NULL && at != ppages->base) { - ERROR ("Trying to map non identity"); + ERROR("Trying to map non identity"); } - + if (at == INVALID_VA) { at = ppages->base; } else if (ppages == NULL) { @@ -541,7 +529,7 @@ vaddr_t mem_alloc_map(struct addr_space* as, as_sec_t section, ppages = &temp_ppages; } - struct mp_region mpr = (struct mp_region) { + struct mp_region mpr = (struct mp_region){ .base = ppages->base, .size = (num_pages * PAGE_SIZE), .as_sec = section, @@ -553,12 +541,10 @@ vaddr_t mem_alloc_map(struct addr_space* as, as_sec_t section, return at; } -vaddr_t mem_alloc_map_dev(struct addr_space* as, as_sec_t section, - vaddr_t at, paddr_t pa, size_t num_pages) +vaddr_t mem_alloc_map_dev(struct addr_space* as, as_sec_t section, vaddr_t at, paddr_t pa, + size_t num_pages) { struct ppages temp_page = mem_ppages_get(pa, num_pages); return mem_alloc_map(as, section, &temp_page, at, num_pages, - as->type == AS_HYP ? PTE_HYP_DEV_FLAGS : PTE_VM_DEV_FLAGS); + as->type == AS_HYP ? PTE_HYP_DEV_FLAGS : PTE_VM_DEV_FLAGS); } - - diff --git a/src/core/mpu/vm.c b/src/core/mpu/vm.c index a363fd500..b5004c741 100644 --- a/src/core/mpu/vm.c +++ b/src/core/mpu/vm.c @@ -5,7 +5,7 @@ #include -void vm_mem_prot_init(struct vm* vm, const struct vm_config* config) { +void vm_mem_prot_init(struct vm* vm, const struct vm_config* config) +{ as_init(&vm->as, AS_VM, vm->id, 0); } - diff --git a/src/core/mpu/vmm.c b/src/core/mpu/vmm.c index f95a20dd1..4c11f23df 100644 --- a/src/core/mpu/vmm.c +++ b/src/core/mpu/vmm.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -7,18 +7,17 @@ #include #include -void vmm_io_init() { +void vmm_io_init() { } +struct vm_install_info vmm_get_vm_install_info(struct vm_allocation* vm_alloc) +{ + return (struct vm_install_info){ vm_alloc->base, vm_alloc->size }; } -struct vm_install_info vmm_get_vm_install_info(struct vm_allocation* vm_alloc) { - return (struct vm_install_info) { vm_alloc->base, vm_alloc->size }; -} - -void vmm_vm_install(struct vm_install_info *install_info) { +void vmm_vm_install(struct vm_install_info* install_info) +{ size_t num_pages = NUM_PAGES(install_info->size); - struct ppages ppages = - mem_ppages_get(install_info->base_addr, num_pages); - mem_alloc_map(&cpu()->as, SEC_HYP_VM, &ppages, install_info->base_addr, - num_pages, PTE_HYP_FLAGS); + struct ppages ppages = mem_ppages_get(install_info->base_addr, num_pages); + mem_alloc_map(&cpu()->as, SEC_HYP_VM, &ppages, install_info->base_addr, num_pages, + PTE_HYP_FLAGS); } diff --git a/src/core/objpool.c b/src/core/objpool.c index e92b6c67a..db38f6965 100644 --- a/src/core/objpool.c +++ b/src/core/objpool.c @@ -6,13 +6,15 @@ #include #include -void objpool_init(struct objpool *objpool) { - memset(objpool->pool, 0, objpool->objsize*objpool->num); +void objpool_init(struct objpool* objpool) +{ + memset(objpool->pool, 0, objpool->objsize * objpool->num); memset(objpool->bitmap, 0, BITMAP_SIZE(objpool->num)); } -void* objpool_alloc(struct objpool *objpool) { - void *obj = NULL; +void* objpool_alloc(struct objpool* objpool) +{ + void* obj = NULL; spin_lock(&objpool->lock); ssize_t n = bitmap_find_nth(objpool->bitmap, objpool->num, 1, 0, false); if (n >= 0) { @@ -23,18 +25,18 @@ void* objpool_alloc(struct objpool *objpool) { return obj; } -void objpool_free(struct objpool *objpool, void* obj) { +void objpool_free(struct objpool* objpool, void* obj) +{ vaddr_t obj_addr = (vaddr_t)obj; vaddr_t pool_addr = (vaddr_t)objpool->pool; - bool in_pool = - in_range(obj_addr, pool_addr, objpool->objsize * objpool->num); - bool aligned = IS_ALIGNED(obj_addr-pool_addr, objpool->objsize); + bool in_pool = in_range(obj_addr, pool_addr, objpool->objsize * objpool->num); + bool aligned = IS_ALIGNED(obj_addr - pool_addr, objpool->objsize); if (in_pool && aligned) { - size_t n = (obj_addr-pool_addr)/objpool->objsize; + size_t n = (obj_addr - pool_addr) / objpool->objsize; spin_lock(&objpool->lock); bitmap_clear(objpool->bitmap, n); spin_unlock(&objpool->lock); } else { - WARNING("leaked while trying to free stray object"); + WARNING("leaked while trying to free stray object"); } } diff --git a/src/core/vm.c b/src/core/vm.c index a4c853bab..4069229fc 100644 --- a/src/core/vm.c +++ b/src/core/vm.c @@ -28,10 +28,13 @@ void vm_cpu_init(struct vm* vm) spin_unlock(&vm->lock); } -static vcpuid_t vm_calc_vcpu_id(struct vm* vm) { +static vcpuid_t vm_calc_vcpu_id(struct vm* vm) +{ vcpuid_t vcpu_id = 0; - for(size_t i = 0; i < cpu()->id; i++) { - if (!!bit_get(vm->cpus, i)) vcpu_id++; + for (size_t i = 0; i < cpu()->id; i++) { + if (!!bit_get(vm->cpus, i)) { + vcpu_id++; + } } return vcpu_id; } @@ -55,24 +58,23 @@ void vm_map_mem_region(struct vm* vm, struct vm_mem_region* reg) size_t n = NUM_PAGES(reg->size); struct ppages pa_reg; - struct ppages *pa_ptr = NULL; + struct ppages* pa_ptr = NULL; if (reg->place_phys) { pa_reg = mem_ppages_get(reg->phys, n); - pa_reg.colors = reg->colors; + pa_reg.colors = reg->colors; pa_ptr = &pa_reg; } else { pa_ptr = NULL; } - vaddr_t va = mem_alloc_map(&vm->as, SEC_VM_ANY, pa_ptr, - (vaddr_t)reg->base, n, PTE_VM_FLAGS); + vaddr_t va = mem_alloc_map(&vm->as, SEC_VM_ANY, pa_ptr, (vaddr_t)reg->base, n, PTE_VM_FLAGS); if (va != (vaddr_t)reg->base) { ERROR("failed to allocate vm's region at 0x%lx", reg->base); } } static void vm_map_img_rgn_inplace(struct vm* vm, const struct vm_config* config, - struct vm_mem_region* reg) + struct vm_mem_region* reg) { vaddr_t img_base = config->image.base_addr; size_t img_size = config->image.size; @@ -86,61 +88,56 @@ static void vm_map_img_rgn_inplace(struct vm* vm, const struct vm_config* config /* map img in place */ struct ppages pa_img = mem_ppages_get(config->image.load_addr, n_img); - mem_alloc_map(&vm->as, SEC_VM_ANY, NULL, (vaddr_t)reg->base, n_before, - PTE_VM_FLAGS); + mem_alloc_map(&vm->as, SEC_VM_ANY, NULL, (vaddr_t)reg->base, n_before, PTE_VM_FLAGS); if (all_clrs(vm->as.colors)) { /* map img in place */ - mem_alloc_map(&vm->as, SEC_VM_ANY, &pa_img, img_base, n_img, - PTE_VM_FLAGS); + mem_alloc_map(&vm->as, SEC_VM_ANY, &pa_img, img_base, n_img, PTE_VM_FLAGS); /* we are mapping in place, config is already reserved */ } else { /* recolour img */ mem_map_reclr(&vm->as, img_base, &pa_img, n_img, PTE_VM_FLAGS); } /* map pages after img */ - mem_alloc_map(&vm->as, SEC_VM_ANY, NULL, img_base + NUM_PAGES(img_size)*PAGE_SIZE, n_aft, + mem_alloc_map(&vm->as, SEC_VM_ANY, NULL, img_base + NUM_PAGES(img_size) * PAGE_SIZE, n_aft, PTE_VM_FLAGS); } -static void vm_install_image(struct vm* vm, struct vm_mem_region* reg) { - +static void vm_install_image(struct vm* vm, struct vm_mem_region* reg) +{ if (reg->place_phys) { paddr_t img_base = (paddr_t)vm->config->image.base_addr; paddr_t img_load_pa = vm->config->image.load_addr; size_t img_sz = vm->config->image.size; if (img_base == img_load_pa) { - // The image is already correctly installed. Our work is done. + // The image is already correctly installed. Our work is done. return; } if (range_overlap_range(img_base, img_sz, img_load_pa, img_sz)) { - // We impose an image load region cannot overlap its runtime region. - // This both simplifies the copying procedure as well as avoids - // limitations of mpu-based memory management which does not allow - // overlapping mappings on the same address space. + // We impose an image load region cannot overlap its runtime region. This both + // simplifies the copying procedure as well as avoids limitations of mpu-based memory + // management which does not allow overlapping mappings on the same address space. ERROR("failed installing vm image. Image load region overlaps with" - " image runtime region"); + " image runtime region"); } - } - + } + size_t img_num_pages = NUM_PAGES(vm->config->image.size); - struct ppages img_ppages = - mem_ppages_get(vm->config->image.load_addr, img_num_pages); - vaddr_t src_va = mem_alloc_map(&cpu()->as, SEC_HYP_GLOBAL, &img_ppages, - INVALID_VA, img_num_pages, PTE_HYP_FLAGS); - vaddr_t dst_va = mem_map_cpy(&vm->as, &cpu()->as, vm->config->image.base_addr, - INVALID_VA, img_num_pages); + struct ppages img_ppages = mem_ppages_get(vm->config->image.load_addr, img_num_pages); + vaddr_t src_va = mem_alloc_map(&cpu()->as, SEC_HYP_GLOBAL, &img_ppages, INVALID_VA, + img_num_pages, PTE_HYP_FLAGS); + vaddr_t dst_va = + mem_map_cpy(&vm->as, &cpu()->as, vm->config->image.base_addr, INVALID_VA, img_num_pages); memcpy((void*)dst_va, (void*)src_va, vm->config->image.size); cache_flush_range((vaddr_t)dst_va, vm->config->image.size); mem_unmap(&cpu()->as, src_va, img_num_pages, false); mem_unmap(&cpu()->as, dst_va, img_num_pages, false); } -static void vm_map_img_rgn(struct vm* vm, const struct vm_config* config, - struct vm_mem_region* reg) +static void vm_map_img_rgn(struct vm* vm, const struct vm_config* config, struct vm_mem_region* reg) { - if(!reg->place_phys && config->image.inplace) { + if (!reg->place_phys && config->image.inplace) { vm_map_img_rgn_inplace(vm, config, reg); } else { vm_map_mem_region(vm, reg); @@ -152,8 +149,8 @@ static void vm_init_mem_regions(struct vm* vm, const struct vm_config* config) { for (size_t i = 0; i < config->platform.region_num; i++) { struct vm_mem_region* reg = &config->platform.regions[i]; - bool img_is_in_rgn = range_in_range( - config->image.base_addr, config->image.size, reg->base, reg->size); + bool img_is_in_rgn = + range_in_range(config->image.base_addr, config->image.size, reg->base, reg->size); if (img_is_in_rgn) { vm_map_img_rgn(vm, config, reg); } else { @@ -167,18 +164,18 @@ static void vm_init_ipc(struct vm* vm, const struct vm_config* config) vm->ipc_num = config->platform.ipc_num; vm->ipcs = config->platform.ipcs; for (size_t i = 0; i < config->platform.ipc_num; i++) { - struct ipc *ipc = &config->platform.ipcs[i]; - struct shmem *shmem = ipc_get_shmem(ipc->shmem_id); - if(shmem == NULL) { + struct ipc* ipc = &config->platform.ipcs[i]; + struct shmem* shmem = ipc_get_shmem(ipc->shmem_id); + if (shmem == NULL) { WARNING("Invalid shmem id in configuration. Ignored."); continue; } size_t size = ipc->size; - if(ipc->size > shmem->size) { + if (ipc->size > shmem->size) { size = shmem->size; WARNING("Trying to map region to smaller shared memory. Truncated"); } - + spin_lock(&shmem->lock); shmem->cpu_masters |= (1ULL << cpu()->id); spin_unlock(&shmem->lock); @@ -188,7 +185,7 @@ static void vm_init_ipc(struct vm* vm, const struct vm_config* config) .size = size, .place_phys = true, .phys = shmem->phys, - .colors = shmem->colors + .colors = shmem->colors, }; vm_map_mem_region(vm, ®); @@ -207,7 +204,7 @@ static void vm_init_dev(struct vm* vm, const struct vm_config* config) } for (size_t j = 0; j < dev->interrupt_num; j++) { - if(!interrupts_vm_assign(vm, dev->interrupts[j])) { + if (!interrupts_vm_assign(vm, dev->interrupts[j])) { ERROR("Failed to assign interrupt id %d", dev->interrupts[j]); } } @@ -217,26 +214,25 @@ static void vm_init_dev(struct vm* vm, const struct vm_config* config) for (size_t i = 0; i < config->platform.dev_num; i++) { struct vm_dev_region* dev = &config->platform.devs[i]; if (dev->id) { - if(!io_vm_add_device(vm, dev->id)){ + if (!io_vm_add_device(vm, dev->id)) { ERROR("Failed to add device to iommu"); } } } } - } -static struct vm* vm_allocation_init(struct vm_allocation* vm_alloc) { - struct vm *vm = vm_alloc->vm; +static struct vm* vm_allocation_init(struct vm_allocation* vm_alloc) +{ + struct vm* vm = vm_alloc->vm; vm->vcpus = vm_alloc->vcpus; return vm; } -struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* config, - bool master, vmid_t vm_id) +struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* config, bool master, + vmid_t vm_id) { - - struct vm *vm = vm_allocation_init(vm_alloc); + struct vm* vm = vm_allocation_init(vm_alloc); /** * Before anything else, initialize vm structure. @@ -260,15 +256,13 @@ struct vm* vm_init(struct vm_allocation* vm_alloc, const struct vm_config* confi cpu_sync_barrier(&vm->sync); /** - * Perform architecture dependent initializations. This includes, - * for example, setting the page table pointer and other virtualization - * extensions specifics. + * Perform architecture dependent initializations. This includes, for example, setting the page + * table pointer and other virtualization extensions specifics. */ vm_arch_init(vm, config); /** - * Create the VM's address space according to configuration and where - * its image was loaded. + * Create the VM's address space according to configuration and where its image was loaded. */ if (master) { vm_init_mem_regions(vm, config); @@ -289,12 +283,12 @@ void vm_emul_add_mem(struct vm* vm, struct emul_mem* emu) void vm_emul_add_reg(struct vm* vm, struct emul_reg* emu) { list_push(&vm->emul_reg_list, &emu->node); -} +} emul_handler_t vm_emul_get_mem(struct vm* vm, vaddr_t addr) { emul_handler_t handler = NULL; - list_foreach(vm->emul_mem_list, struct emul_mem, emu) { + list_foreach (vm->emul_mem_list, struct emul_mem, emu) { if (addr >= emu->va_base && (addr < (emu->va_base + emu->size))) { handler = emu->handler; break; @@ -307,10 +301,10 @@ emul_handler_t vm_emul_get_mem(struct vm* vm, vaddr_t addr) emul_handler_t vm_emul_get_reg(struct vm* vm, vaddr_t addr) { emul_handler_t handler = NULL; - list_foreach(vm->emul_reg_list, struct emul_reg, emu) { - if(emu->addr == addr) { + list_foreach (vm->emul_reg_list, struct emul_reg, emu) { + if (emu->addr == addr) { handler = emu->handler; - break; + break; } } @@ -327,30 +321,24 @@ void vm_msg_broadcast(struct vm* vm, struct cpu_msg* msg) } } -__attribute__((weak)) cpumap_t vm_translate_to_pcpu_mask(struct vm* vm, - cpumap_t mask, - size_t len) +__attribute__((weak)) cpumap_t vm_translate_to_pcpu_mask(struct vm* vm, cpumap_t mask, size_t len) { cpumap_t pmask = 0; cpuid_t shift; for (size_t i = 0; i < len; i++) { - if ((mask & (1ULL << i)) && - ((shift = vm_translate_to_pcpuid(vm, i)) != INVALID_CPUID)) { + if ((mask & (1ULL << i)) && ((shift = vm_translate_to_pcpuid(vm, i)) != INVALID_CPUID)) { pmask |= (1ULL << shift); } } return pmask; } -__attribute__((weak)) cpumap_t vm_translate_to_vcpu_mask(struct vm* vm, - cpumap_t mask, - size_t len) +__attribute__((weak)) cpumap_t vm_translate_to_vcpu_mask(struct vm* vm, cpumap_t mask, size_t len) { cpumap_t pmask = 0; vcpuid_t shift; for (size_t i = 0; i < len; i++) { - if ((mask & (1ULL << i)) && - ((shift = vm_translate_to_vcpuid(vm, i)) != INVALID_CPUID)) { + if ((mask & (1ULL << i)) && ((shift = vm_translate_to_vcpuid(vm, i)) != INVALID_CPUID)) { pmask |= (1ULL << shift); } } diff --git a/src/core/vmm.c b/src/core/vmm.c index a620c3284..0866b32d9 100644 --- a/src/core/vmm.c +++ b/src/core/vmm.c @@ -22,7 +22,8 @@ static struct vm_assignment { volatile bool install_info_ready; } vm_assign[CONFIG_VM_NUM]; -static bool vmm_assign_vcpu(bool *master, vmid_t *vm_id) { +static bool vmm_assign_vcpu(bool* master, vmid_t* vm_id) +{ bool assigned = false; *master = false; /* Assign cpus according to vm affinity. */ @@ -36,8 +37,7 @@ static bool vmm_assign_vcpu(bool *master, vmid_t *vm_id) { *master = true; assigned = true; *vm_id = i; - } else if (vm_assign[i].ncpus < - config.vmlist[i].platform.cpu_num) { + } else if (vm_assign[i].ncpus < config.vmlist[i].platform.cpu_num) { assigned = true; vm_assign[i].ncpus++; vm_assign[i].cpus |= (1UL << cpu()->id); @@ -53,8 +53,7 @@ static bool vmm_assign_vcpu(bool *master, vmid_t *vm_id) { if (assigned == false) { for (size_t i = 0; i < config.vmlist_size && !assigned; i++) { spin_lock(&vm_assign[i].lock); - if (vm_assign[i].ncpus < - config.vmlist[i].platform.cpu_num) { + if (vm_assign[i].ncpus < config.vmlist[i].platform.cpu_num) { if (!vm_assign[i].master) { vm_assign[i].master = true; vm_assign[i].ncpus++; @@ -76,15 +75,13 @@ static bool vmm_assign_vcpu(bool *master, vmid_t *vm_id) { return assigned; } -static bool vmm_alloc_vm(struct vm_allocation* vm_alloc, struct vm_config *config) { - +static bool vmm_alloc_vm(struct vm_allocation* vm_alloc, struct vm_config* config) +{ /** - * We know that we will allocate a block aligned to the PAGE_SIZE, which - * is guaranteed to fulfill the alignment of all types. - * However, to guarantee the alignment of all fields, when we calculate - * the size of a field in the vm_allocation struct, we must align the - * previous total size calculated until that point, to the alignment of - * the type of the next field. + * We know that we will allocate a block aligned to the PAGE_SIZE, which is guaranteed to + * fulfill the alignment of all types. However, to guarantee the alignment of all fields, when + * we calculate the size of a field in the vm_allocation struct, we must align the previous + * total size calculated until that point, to the alignment of the type of the next field. */ size_t total_size = sizeof(struct vm); @@ -98,17 +95,18 @@ static bool vmm_alloc_vm(struct vm_allocation* vm_alloc, struct vm_config *confi } memset((void*)allocation, 0, total_size); - vm_alloc->base = (vaddr_t) allocation; + vm_alloc->base = (vaddr_t)allocation; vm_alloc->size = total_size; - vm_alloc->vm = (struct vm*) vm_alloc->base; - vm_alloc->vcpus = (struct vcpu*) (vm_alloc->base + vcpus_offset); + vm_alloc->vm = (struct vm*)vm_alloc->base; + vm_alloc->vcpus = (struct vcpu*)(vm_alloc->base + vcpus_offset); return true; } -static struct vm_allocation* vmm_alloc_install_vm(vmid_t vm_id, bool master) { - struct vm_allocation *vm_alloc = &vm_assign[vm_id].vm_alloc; - struct vm_config *vm_config = &config.vmlist[vm_id]; +static struct vm_allocation* vmm_alloc_install_vm(vmid_t vm_id, bool master) +{ + struct vm_allocation* vm_alloc = &vm_assign[vm_id].vm_alloc; + struct vm_config* vm_config = &config.vmlist[vm_id]; if (master) { if (!vmm_alloc_vm(vm_alloc, vm_config)) { ERROR("Failed to allocate vm internal structures"); @@ -117,7 +115,7 @@ static struct vm_allocation* vmm_alloc_install_vm(vmid_t vm_id, bool master) { fence_ord_write(); vm_assign[vm_id].install_info_ready = true; } else { - while (!vm_assign[vm_id].install_info_ready); + while (!vm_assign[vm_id].install_info_ready) { } vmm_vm_install(&vm_assign[vm_id].vm_install_info); fence_sync_write(); } @@ -136,9 +134,9 @@ void vmm_init() bool master = false; vmid_t vm_id = -1; if (vmm_assign_vcpu(&master, &vm_id)) { - struct vm_allocation *vm_alloc = vmm_alloc_install_vm(vm_id, master); - struct vm_config *vm_config = &config.vmlist[vm_id]; - struct vm *vm = vm_init(vm_alloc, vm_config, master, vm_id); + struct vm_allocation* vm_alloc = vmm_alloc_install_vm(vm_id, master); + struct vm_config* vm_config = &config.vmlist[vm_id]; + struct vm* vm = vm_init(vm_alloc, vm_config, master, vm_id); cpu_sync_barrier(&vm->sync); vcpu_run(cpu()->vcpu); } else { diff --git a/src/lib/bitmap.c b/src/lib/bitmap.c index fbaf0e50e..1dd269a66 100644 --- a/src/lib/bitmap.c +++ b/src/lib/bitmap.c @@ -5,25 +5,27 @@ #include -ssize_t bitmap_find_nth(bitmap_t* map, size_t size, size_t nth, size_t start, - bool set) +ssize_t bitmap_find_nth(bitmap_t* map, size_t size, size_t nth, size_t start, bool set) { - if (size <= 0 || nth <= 0 || start < 0) return -1; + if (size <= 0 || nth <= 0 || start < 0) { + return -1; + } size_t count = 0; unsigned bit = set ? 1 : 0; for (ssize_t i = start; i < size; i++) { if (bitmap_get(map, i) == bit) { - if (++count == nth) return i; + if (++count == nth) { + return i; + } } } return -1; } -size_t bitmap_count_consecutive(bitmap_t* map, size_t size, size_t start, - size_t n) +size_t bitmap_count_consecutive(bitmap_t* map, size_t size, size_t start, size_t n) { size_t pos = start; size_t count = 0; @@ -33,16 +35,18 @@ size_t bitmap_count_consecutive(bitmap_t* map, size_t size, size_t start, bitmap_granule_t init_mask = BITMAP_GRANULE_MASK(start_offset, first_word_bits); bitmap_granule_t mask; - if (n <= 1) return n; + if (n <= 1) { + return n; + } mask = set ? init_mask : ~init_mask; - if (!((map[pos/BITMAP_GRANULE_LEN] ^ mask) & init_mask)) { + if (!((map[pos / BITMAP_GRANULE_LEN] ^ mask) & init_mask)) { count += first_word_bits; pos += first_word_bits; } mask = set ? ~0 : 0; - while ((pos < size) && !(map[pos/BITMAP_GRANULE_LEN] ^ mask) && (count < n)) { + while ((pos < size) && !(map[pos / BITMAP_GRANULE_LEN] ^ mask) && (count < n)) { count += BITMAP_GRANULE_LEN; pos += BITMAP_GRANULE_LEN; } @@ -55,19 +59,20 @@ size_t bitmap_count_consecutive(bitmap_t* map, size_t size, size_t start, return count; } -ssize_t bitmap_find_consec(bitmap_t* map, size_t size, size_t start, size_t n, - bool set) +ssize_t bitmap_find_consec(bitmap_t* map, size_t size, size_t start, size_t n, bool set) { ssize_t count = 0; ssize_t i = 0; // find first set - if ((i = bitmap_find_nth(map, size, 1, start, set)) < 0) return -1; + if ((i = bitmap_find_nth(map, size, 1, start, set)) < 0) { + return -1; + } while (i < size) { // find the last (with n as maximum) contiguous set page count = bitmap_count_consecutive(map, size, i, n); - if (count < n) { // if didn't found enough n contiguous set pages + if (count < n) { // if didn't found enough n contiguous set pages i += count; // find the last contiguous ~set page i += bitmap_count_consecutive(map, size, i, -1); @@ -76,7 +81,9 @@ ssize_t bitmap_find_consec(bitmap_t* map, size_t size, size_t start, size_t n, } } - if (i >= size) i = -1; + if (i >= size) { + i = -1; + } return i; } @@ -86,19 +93,19 @@ void bitmap_set_consecutive(bitmap_t* map, size_t start, size_t n) size_t pos = start; size_t count = n; size_t start_offset = start % BITMAP_GRANULE_LEN; - size_t first_word_bits = min(BITMAP_GRANULE_LEN - start_offset, count); + size_t first_word_bits = min(BITMAP_GRANULE_LEN - start_offset, count); - map[pos/BITMAP_GRANULE_LEN] |= BITMAP_GRANULE_MASK(start_offset, first_word_bits); + map[pos / BITMAP_GRANULE_LEN] |= BITMAP_GRANULE_MASK(start_offset, first_word_bits); pos += first_word_bits; count -= first_word_bits; while (count >= BITMAP_GRANULE_LEN) { - map[pos/BITMAP_GRANULE_LEN] |= ~0; + map[pos / BITMAP_GRANULE_LEN] |= ~0; pos += BITMAP_GRANULE_LEN; count -= BITMAP_GRANULE_LEN; } if (count > 0) { - map[pos/BITMAP_GRANULE_LEN] |= BITMAP_GRANULE_MASK(0, count); + map[pos / BITMAP_GRANULE_LEN] |= BITMAP_GRANULE_MASK(0, count); } } diff --git a/src/lib/inc/bit.h b/src/lib/inc/bit.h index 93284f101..27ab1c4b3 100644 --- a/src/lib/inc/bit.h +++ b/src/lib/inc/bit.h @@ -9,63 +9,62 @@ #include /** - * The extra shift is because both arm and riscv logical shift instructions - * support a maximum of machine word length minus one bit shits. This covers - * the corner case of runtime full machine word length masks with the cost of - * an extra shift instruction. For static masks, there should be no extra costs. + * The extra shift is because both arm and riscv logical shift instructions support a maximum of + * machine word length minus one bit shits. This covers the corner case of runtime full machine + * word length masks with the cost of an extra shift instruction. For static masks, there should be + * no extra costs. */ -#define BIT32_MASK(OFF, LEN) ((((UINT32_C(1)<<((LEN)-1))<<1)-1)<<(OFF)) -#define BIT64_MASK(OFF, LEN) ((((UINT64_C(1)<<((LEN)-1))<<1)-1)<<(OFF)) -#define BIT_MASK(OFF, LEN) (((((1UL)<<((LEN)-1))<<1)-1)<<(OFF)) +#define BIT32_MASK(OFF, LEN) ((((UINT32_C(1) << ((LEN)-1)) << 1) - 1) << (OFF)) +#define BIT64_MASK(OFF, LEN) ((((UINT64_C(1) << ((LEN)-1)) << 1) - 1) << (OFF)) +#define BIT_MASK(OFF, LEN) (((((1UL) << ((LEN)-1)) << 1) - 1) << (OFF)) #ifndef __ASSEMBLER__ -#define BIT_OPS_GEN(PRE, TYPE, LIT, MASK) \ - static inline TYPE PRE ## _get(TYPE word, size_t off)\ - {\ - return word & ((LIT) << off);\ - }\ - static inline TYPE PRE ## _set(TYPE word, size_t off)\ - {\ - return word |= ((LIT) << off);\ - }\ - static inline TYPE PRE ## _clear(TYPE word, size_t off)\ - {\ - return word &= ~((LIT) << off);\ - }\ - static inline TYPE PRE ## _extract(TYPE word, size_t off, size_t len)\ - {\ - return (word >> off) & MASK(0, len);\ - }\ - static inline TYPE PRE ## _insert(TYPE word, TYPE val, size_t off,\ - size_t len)\ - {\ - return (~MASK(off, len) & word) | ((MASK(0, len) & val) << off);\ - }\ - static inline ssize_t PRE ## _ffs(TYPE word)\ - {\ - ssize_t pos = (ssize_t)0;\ - TYPE mask = (LIT);\ - while (mask != 0U) {\ - if ((mask & word) != 0U) {\ - break;\ - }\ - mask <<= 1U;\ - pos++;\ - }\ - return (mask != 0U) ? pos : (ssize_t)-1;\ - }\ - static inline ssize_t PRE ## _count(TYPE word)\ - {\ - size_t count = 0;\ - TYPE mask = (LIT);\ - while (mask != 0U) {\ - if ((mask & word) != 0U) {\ - count += 1;\ - }\ - mask <<= 1U;\ - }\ - return count;\ +#define BIT_OPS_GEN(PRE, TYPE, LIT, MASK) \ + static inline TYPE PRE##_get(TYPE word, size_t off) \ + { \ + return word & ((LIT) << off); \ + } \ + static inline TYPE PRE##_set(TYPE word, size_t off) \ + { \ + return word |= ((LIT) << off); \ + } \ + static inline TYPE PRE##_clear(TYPE word, size_t off) \ + { \ + return word &= ~((LIT) << off); \ + } \ + static inline TYPE PRE##_extract(TYPE word, size_t off, size_t len) \ + { \ + return (word >> off) & MASK(0, len); \ + } \ + static inline TYPE PRE##_insert(TYPE word, TYPE val, size_t off, size_t len) \ + { \ + return (~MASK(off, len) & word) | ((MASK(0, len) & val) << off); \ + } \ + static inline ssize_t PRE##_ffs(TYPE word) \ + { \ + ssize_t pos = (ssize_t)0; \ + TYPE mask = (LIT); \ + while (mask != 0U) { \ + if ((mask & word) != 0U) { \ + break; \ + } \ + mask <<= 1U; \ + pos++; \ + } \ + return (mask != 0U) ? pos : (ssize_t)-1; \ + } \ + static inline ssize_t PRE##_count(TYPE word) \ + { \ + size_t count = 0; \ + TYPE mask = (LIT); \ + while (mask != 0U) { \ + if ((mask & word) != 0U) { \ + count += 1; \ + } \ + mask <<= 1U; \ + } \ + return count; \ } BIT_OPS_GEN(bit32, uint32_t, UINT32_C(1), BIT32_MASK); diff --git a/src/lib/inc/bitmap.h b/src/lib/inc/bitmap.h index dea42b2b9..a5eb6cb56 100644 --- a/src/lib/inc/bitmap.h +++ b/src/lib/inc/bitmap.h @@ -16,17 +16,14 @@ typedef bitmap_granule_t bitmap_t; static const bitmap_granule_t ONE = 1; -#define BITMAP_GRANULE_LEN (sizeof(bitmap_granule_t) * 8) -#define BITMAP_GRANULE_MASK(O, L) BIT32_MASK((O), (L)) +#define BITMAP_GRANULE_LEN (sizeof(bitmap_granule_t) * 8) +#define BITMAP_GRANULE_MASK(O, L) BIT32_MASK((O), (L)) -#define BITMAP_SIZE(SIZE) (((SIZE) / BITMAP_GRANULE_LEN) + \ - ((SIZE) % BITMAP_GRANULE_LEN ? 1 : 0)) +#define BITMAP_SIZE(SIZE) (((SIZE) / BITMAP_GRANULE_LEN) + ((SIZE) % BITMAP_GRANULE_LEN ? 1 : 0)) -#define BITMAP_ALLOC(NAME, SIZE) bitmap_granule_t NAME[BITMAP_SIZE((SIZE))] - -#define BITMAP_ALLOC_ARRAY(NAME, SIZE ,NUM) \ - bitmap_granule_t NAME[NUM][BITMAP_SIZE((SIZE))] +#define BITMAP_ALLOC(NAME, SIZE) bitmap_granule_t NAME[BITMAP_SIZE(SIZE)] +#define BITMAP_ALLOC_ARRAY(NAME, SIZE, NUM) bitmap_granule_t NAME[NUM][BITMAP_SIZE(SIZE)] static inline void bitmap_set(bitmap_t* map, size_t bit) { @@ -40,37 +37,34 @@ static inline void bitmap_clear(bitmap_t* map, size_t bit) static inline unsigned bitmap_get(bitmap_t* map, size_t bit) { - return (map[bit / BITMAP_GRANULE_LEN] & (ONE << (bit % BITMAP_GRANULE_LEN))) - ? 1U - : 0U; + return (map[bit / BITMAP_GRANULE_LEN] & (ONE << (bit % BITMAP_GRANULE_LEN))) ? 1U : 0U; } void bitmap_set_consecutive(bitmap_t* map, size_t start, size_t n); -static inline void bitmap_clear_consecutive(bitmap_t* map, size_t start, - size_t n) +static inline void bitmap_clear_consecutive(bitmap_t* map, size_t start, size_t n) { - for (size_t i = 0; i < n; i++) bitmap_clear(map, start + i); + for (size_t i = 0; i < n; i++) { + bitmap_clear(map, start + i); + } } -static inline size_t bitmap_count(bitmap_t* map, size_t start, size_t n, - bool set) +static inline size_t bitmap_count(bitmap_t* map, size_t start, size_t n, bool set) { size_t count = 0; for (size_t i = start; i < n; i++) { - if (bitmap_get(map, i) == set) count++; + if (bitmap_get(map, i) == set) { + count++; + } } return count; } -ssize_t bitmap_find_nth(bitmap_t* map, size_t size, size_t nth, size_t start, - bool set); +ssize_t bitmap_find_nth(bitmap_t* map, size_t size, size_t nth, size_t start, bool set); -size_t bitmap_count_consecutive(bitmap_t* map, size_t size, size_t start, - size_t n); +size_t bitmap_count_consecutive(bitmap_t* map, size_t size, size_t start, size_t n); -ssize_t bitmap_find_consec(bitmap_t* map, size_t size, size_t start, size_t n, - bool set); +ssize_t bitmap_find_consec(bitmap_t* map, size_t size, size_t start, size_t n, bool set); #endif /* __BITMAP_H__ */ diff --git a/src/lib/inc/list.h b/src/lib/inc/list.h index 9f49206fc..5551947a0 100644 --- a/src/lib/inc/list.h +++ b/src/lib/inc/list.h @@ -16,13 +16,12 @@ struct list { spinlock_t lock; }; -#define list_foreach(list, type, nodeptr) \ - for (type* nodeptr = ((type*)list.head); nodeptr != NULL; \ - nodeptr = *((type**)nodeptr)) +#define list_foreach(list, type, nodeptr) \ + for (type* nodeptr = ((type*)list.head); nodeptr != NULL; nodeptr = *((type**)nodeptr)) -#define list_foreach_tail(list, type, nodeptr, tail) \ - for (type* nodeptr = ((type*)list.head), *tail = NULL; \ - nodeptr != NULL; tail = nodeptr, nodeptr = *((type**)nodeptr)) +#define list_foreach_tail(list, type, nodeptr, tail) \ + for (type* nodeptr = ((type*)list.head), *tail = NULL; nodeptr != NULL; \ + tail = nodeptr, nodeptr = *((type**)nodeptr)) static inline void list_init(struct list* list) { @@ -39,11 +38,15 @@ static inline void list_push(struct list* list, node_t* node) *node = NULL; spin_lock(&list->lock); - if (list->tail != NULL) *list->tail = node; + if (list->tail != NULL) { + *list->tail = node; + } list->tail = node; - if (list->head == NULL) list->head = node; + if (list->head == NULL) { + list->head = node; + } spin_unlock(&list->lock); } @@ -59,7 +62,9 @@ static inline node_t* list_pop(struct list* list) temp = list->head; list->head = *list->head; - if (list->head == NULL) list->tail = NULL; + if (list->head == NULL) { + list->tail = NULL; + } *temp = NULL; } @@ -96,13 +101,13 @@ static inline bool list_rm(struct list* list, node_t* node) } if (temp != NULL && temp == node) { /* found the node, remove it */ - if(temp_prev != NULL) { + if (temp_prev != NULL) { *temp_prev = *temp; } else { list->head = *temp; } - if(list->head == NULL) { + if (list->head == NULL) { list->tail = NULL; } } @@ -121,8 +126,8 @@ static inline void list_insert_ordered(struct list* list, node_t* node, node_cmp *node = NULL; spin_lock(&list->lock); - node_t *cur = list->head; - node_t *tail = NULL; + node_t* cur = list->head; + node_t* tail = NULL; while (cur != NULL) { if (cmp(cur, node) > 0) { diff --git a/src/lib/inc/printk.h b/src/lib/inc/printk.h index 0d6252ee7..5a083ef5f 100644 --- a/src/lib/inc/printk.h +++ b/src/lib/inc/printk.h @@ -1,15 +1,14 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#ifndef __PRINTK_H -#define __PRINTK_H - -#include -#include - -size_t vsnprintk(char* buf, size_t buf_size, const char** fmt, - va_list* args); - -#endif /* __PRINTK_H */ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __PRINTK_H +#define __PRINTK_H + +#include +#include + +size_t vsnprintk(char* buf, size_t buf_size, const char** fmt, va_list* args); + +#endif /* __PRINTK_H */ diff --git a/src/lib/inc/string.h b/src/lib/inc/string.h index 960ae9edf..c8fe70d45 100644 --- a/src/lib/inc/string.h +++ b/src/lib/inc/string.h @@ -1,19 +1,19 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#ifndef __STRING_H_ -#define __STRING_H_ - -#include - -void *memcpy(void *dst, const void *src, size_t count); -void *memset(void *dest, int c, size_t count); - -char *strcat(char *dest, char *src); -size_t strlen(const char *s); -size_t strnlen(const char *s, size_t n); -char *strcpy(char *dest, char *src); - -#endif /* __STRING_H_ */ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __STRING_H_ +#define __STRING_H_ + +#include + +void* memcpy(void* dst, const void* src, size_t count); +void* memset(void* dest, int c, size_t count); + +char* strcat(char* dest, char* src); +size_t strlen(const char* s); +size_t strnlen(const char* s, size_t n); +char* strcpy(char* dest, char* src); + +#endif /* __STRING_H_ */ diff --git a/src/lib/inc/util.h b/src/lib/inc/util.h index 82a3d9ad1..61d203797 100644 --- a/src/lib/inc/util.h +++ b/src/lib/inc/util.h @@ -9,81 +9,78 @@ /* UTILITY MACROS */ /* align VAL to TO which must be power a two */ -#define ALIGN(VAL, TO) ((((VAL) + (TO)-1) / (TO)) * TO) -#define IS_ALIGNED(VAL, TO) (!((VAL)%(TO))) +#define ALIGN(VAL, TO) ((((VAL) + (TO)-1) / (TO)) * TO) +#define IS_ALIGNED(VAL, TO) (!((VAL) % (TO))) #define ALIGN_FLOOR(VAL, TO) ((VAL) & ~((TO)-1)) -#define NUM_PAGES(SZ) (ALIGN(SZ, PAGE_SIZE)/PAGE_SIZE) -#define PAGE_OFFSET_MASK ((PAGE_SIZE)-1) -#define PAGE_FRAME_MASK (~(PAGE_OFFSET_MASK)) +#define NUM_PAGES(SZ) (ALIGN(SZ, PAGE_SIZE) / PAGE_SIZE) +#define PAGE_OFFSET_MASK ((PAGE_SIZE)-1) +#define PAGE_FRAME_MASK (~(PAGE_OFFSET_MASK)) -#define SR_OR(VAL, SHIFT) (((VAL) >> (SHIFT)) | VAL) +#define SR_OR(VAL, SHIFT) (((VAL) >> (SHIFT)) | VAL) /* Next Power Of Two */ -#define NPOT(VAL) \ - ((SR_OR(((VAL)-1), 1) | SR_OR(SR_OR(((VAL)-1), 1), 2) | \ - SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4) | \ - SR_OR(SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4), 8) | \ - SR_OR(SR_OR(SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4), 8), 16)) + \ - 1) +#define NPOT(VAL) \ + ((SR_OR(((VAL)-1), 1) | SR_OR(SR_OR(((VAL)-1), 1), 2) | \ + SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4) | \ + SR_OR(SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4), 8) | \ + SR_OR(SR_OR(SR_OR(SR_OR(SR_OR(((VAL)-1), 1), 2), 4), 8), 16)) + \ + 1) /* Previous Power Of Two */ -#define PPOT(VAL) (NPOT((VAL)) - (NPOT((VAL)) >> 1)) +#define PPOT(VAL) (NPOT(VAL) - (NPOT(VAL) >> 1)) -#define STR(s) #s -#define XSTR(s) STR(s) +#define STR(s) #s +#define XSTR(s) STR(s) #ifndef __ASSEMBLER__ #define DEFINE_OFFSET(SYMBOL, STRUCT, FIELD) \ asm volatile("\n-> " XSTR(SYMBOL) " %0 \n" : : "i"(offsetof(STRUCT, FIELD))) -#define DEFINE_SIZE(SYMBOL, TYPE) \ - asm volatile("\n-> " XSTR(SYMBOL) " %0 \n" : : "i"(sizeof(TYPE))) +#define DEFINE_SIZE(SYMBOL, TYPE) asm volatile("\n-> " XSTR(SYMBOL) " %0 \n" : : "i"(sizeof(TYPE))) -#define max(n1, n2) (((n1) > (n2)) ? (n1) : (n2)) -#define min(n1, n2) (((n1) < (n2)) ? (n1) : (n2)) +#define max(n1, n2) (((n1) > (n2)) ? (n1) : (n2)) +#define min(n1, n2) (((n1) < (n2)) ? (n1) : (n2)) static inline bool range_overlap_range(unsigned long base1, unsigned long size1, - unsigned long base2, unsigned long size2) { - + unsigned long base2, unsigned long size2) +{ vaddr_t reg1_lim = base1 + size1 - 1; vaddr_t reg2_lim = base2 + size2 - 1; - return (base1 >= base2 && base1 <= reg2_lim) || - (reg1_lim >= base2 && reg1_lim <= reg2_lim) || + return (base1 >= base2 && base1 <= reg2_lim) || (reg1_lim >= base2 && reg1_lim <= reg2_lim) || (base1 <= base2 && reg1_lim >= reg2_lim); } -static inline bool range_in_range(unsigned long base1, unsigned long size1, - unsigned long base2, unsigned long size2) { - +static inline bool range_in_range(unsigned long base1, unsigned long size1, unsigned long base2, + unsigned long size2) +{ unsigned long limit1 = base1 + size1; unsigned long limit2 = base2 + size2; /* Saturate possible overflows */ - if (limit1 < base1) { + if (limit1 < base1) { limit1 = ULONG_MAX; } if (limit2 < base2) { - limit2= ULONG_MAX; + limit2 = ULONG_MAX; } return (base1 >= base2) && (limit1 <= limit2); } /* WARNING! does not check for overflow! */ -#define in_range(_addr, _base, _size) range_in_range(_addr, 0, _base, _size) - +#define in_range(_addr, _base, _size) range_in_range(_addr, 0, _base, _size) /** - * Check if a given macro was defined. Note it only works wither if the macro - * is undefined or defined to the value 1. If the macro is defined with any - * other value it will fail recognizing its defined. + * Check if a given macro was defined. Note it only works wither if the macro is undefined or + * defined to the value 1. If the macro is defined with any other value it will fail recognizing + * its defined. */ -#define DEFINED(MACRO) _DEFINED(MACRO) -#define _DEFINED_1 0, -#define _DEFINED(VALUE) __DEFINED(_DEFINED_ ## VALUE) -#define __DEFINED(VALUE) ___DEFINED(VALUE true, false) -#define ___DEFINED(IGNORE, RESULT, ...) (RESULT) +#define DEFINED(MACRO) _DEFINED(MACRO) +#define _DEFINED_1 0, +#define _DEFINED(VALUE) __DEFINED(_DEFINED_##VALUE) +#define __DEFINED(VALUE) ___DEFINED(VALUE true, false) +#define ___DEFINED(IGNORE, RESULT, ...) (RESULT) #endif diff --git a/src/lib/printk.c b/src/lib/printk.c index 67138e06f..9e88a1bbe 100644 --- a/src/lib/printk.c +++ b/src/lib/printk.c @@ -1,182 +1,177 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#include - -#define F_LONG (1U << 0U) -#define F_UNSIGNED (1U << 1U) -#define F_BASE16 (1U << 2U) - -static inline char digit_to_char(unsigned long i, unsigned int base) -{ - unsigned long c; - unsigned long digit = i % base; - if (i < 10U) { - c = ((unsigned long)'0') + digit; - } else { - c = ((unsigned long)'a') + - (digit - 10U); - } - return (char)c; -} - -static inline void printc(char** buf, char c) -{ - if (buf != NULL) { - **buf = c; - (*buf)++; - } -} - -static size_t prints(char** buf, const char* str) -{ - const char* str_it = str; - size_t char_count = 0; - while (*str_it != '\0') { - printc(buf, *str_it); - char_count++; - str_it++; - } - return char_count; -} - -static size_t vprintd(char** buf, unsigned int flags, va_list* args) -{ - unsigned long u; - size_t base = ((flags & F_BASE16) != 0U) ? (unsigned int)16U : 10U; - bool is_long = ((flags & F_LONG) != 0U); - bool is_unsigned = ((flags & F_UNSIGNED) != 0U) || (base != 10U); - size_t divisor; - unsigned long tmp; - size_t char_count = 0; - - if (is_unsigned) { - u = is_long ? va_arg(*args, unsigned long) : - va_arg(*args, unsigned int); - } else { - signed long s = - is_long ? va_arg(*args, signed long) : va_arg(*args, signed int); - if (s < 0) { - printc(buf, '-'); - char_count++; - s = -s; - } - u = (unsigned long)s; - } - - divisor = 1; - tmp = u; - while (tmp >= base) { - divisor *= base; - tmp /= base; - } - - while (divisor > 0U) { - unsigned long digit = u / divisor; - u -= digit * divisor; - divisor /= base; - printc(buf, digit_to_char(digit, base)); - char_count++; - } - - return char_count; -} - -/** - * This is a limited printf implementation. The format string only supports - * integer, string and char arguments. That is, 'd', 'u' or 'x', 's' and 'c' - * specifiers, respectively. For integers, it only supports the none and 'l' - * lengths. It does not support any flags, width or precision fields. If - * present, this fields are ignored. - * - * Note this does not follow the C lib vsnprintf specification. It returns the - * numbers of characters written to the buffer, and changes fmt to point to the - * first character that was not printed. - */ -size_t vsnprintk(char* buf, size_t buf_size, const char** fmt, - va_list* args) -{ - char* buf_it = buf; - size_t buf_left = buf_size; - const char* fmt_it = *fmt; - va_list args_tmp; - - while ((*fmt_it != '\0') && (buf_left > 0U)) { - if ((*fmt_it) != '%') { - printc(&buf_it, *fmt_it); - buf_left--; - } else { - unsigned int flags; - bool ignore_char; - size_t arg_char_count = 0; - - fmt_it++; - flags = 0; - if (*fmt_it == 'l') { - fmt_it++; - flags = flags | F_LONG; - if (*fmt_it == 'l') { - fmt_it++; - } // ignore long long - } - - do { - ignore_char = false; - switch (*fmt_it) { - case 'x': - case 'X': - flags = flags | F_BASE16; - /* fallthrough */ - case 'u': - flags = flags | F_UNSIGNED; - /* fallthrough */ - case 'd': - case 'i': - va_copy(args_tmp, *args); - arg_char_count = vprintd(NULL, flags, &args_tmp); - if (arg_char_count <= buf_left) { - (void)vprintd(&buf_it, flags, args); - } - break; - case 's': - va_copy(args_tmp, *args); - arg_char_count = prints(NULL, va_arg(args_tmp, char*)); - if (arg_char_count <= buf_left) { - (void)prints(&buf_it, va_arg(*args, char*)); - } - break; - case 'c': - arg_char_count = 1; - if (arg_char_count <= buf_left) { - printc(&buf_it, (char)va_arg(args_tmp, int)); - } - break; - case '%': - arg_char_count = 1; - if (arg_char_count <= buf_left) { - printc(&buf_it, *fmt_it); - } - break; - default: - ignore_char = true; - break; - } - } while (ignore_char); - - if (arg_char_count <= buf_left) { - buf_left -= arg_char_count; - } else { - while (*fmt_it != '%') { - fmt_it--; - } - break; - } - } - fmt_it++; - } - - *fmt = fmt_it; - return buf_size - buf_left; -} +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +#define F_LONG (1U << 0U) +#define F_UNSIGNED (1U << 1U) +#define F_BASE16 (1U << 2U) + +static inline char digit_to_char(unsigned long i, unsigned int base) +{ + unsigned long c; + unsigned long digit = i % base; + if (i < 10U) { + c = ((unsigned long)'0') + digit; + } else { + c = ((unsigned long)'a') + (digit - 10U); + } + return (char)c; +} + +static inline void printc(char** buf, char c) +{ + if (buf != NULL) { + **buf = c; + (*buf)++; + } +} + +static size_t prints(char** buf, const char* str) +{ + const char* str_it = str; + size_t char_count = 0; + while (*str_it != '\0') { + printc(buf, *str_it); + char_count++; + str_it++; + } + return char_count; +} + +static size_t vprintd(char** buf, unsigned int flags, va_list* args) +{ + unsigned long u; + size_t base = ((flags & F_BASE16) != 0U) ? (unsigned int)16U : 10U; + bool is_long = ((flags & F_LONG) != 0U); + bool is_unsigned = ((flags & F_UNSIGNED) != 0U) || (base != 10U); + size_t divisor; + unsigned long tmp; + size_t char_count = 0; + + if (is_unsigned) { + u = is_long ? va_arg(*args, unsigned long) : va_arg(*args, unsigned int); + } else { + signed long s = is_long ? va_arg(*args, signed long) : va_arg(*args, signed int); + if (s < 0) { + printc(buf, '-'); + char_count++; + s = -s; + } + u = (unsigned long)s; + } + + divisor = 1; + tmp = u; + while (tmp >= base) { + divisor *= base; + tmp /= base; + } + + while (divisor > 0U) { + unsigned long digit = u / divisor; + u -= digit * divisor; + divisor /= base; + printc(buf, digit_to_char(digit, base)); + char_count++; + } + + return char_count; +} + +/** + * This is a limited printf implementation. The format string only supports integer, string and + * char arguments. That is, 'd', 'u' or 'x', 's' and 'c' specifiers, respectively. For integers, it + * only supports the none and 'l' lengths. It does not support any flags, width or precision + * fields. If present, this fields are ignored. + * + * Note this does not follow the C lib vsnprintf specification. It returns the numbers of + * characters written to the buffer, and changes fmt to point to the first character that was not + * printed. + */ +size_t vsnprintk(char* buf, size_t buf_size, const char** fmt, va_list* args) +{ + char* buf_it = buf; + size_t buf_left = buf_size; + const char* fmt_it = *fmt; + va_list args_tmp; + + while ((*fmt_it != '\0') && (buf_left > 0U)) { + if ((*fmt_it) != '%') { + printc(&buf_it, *fmt_it); + buf_left--; + } else { + unsigned int flags; + bool ignore_char; + size_t arg_char_count = 0; + + fmt_it++; + flags = 0; + if (*fmt_it == 'l') { + fmt_it++; + flags = flags | F_LONG; + if (*fmt_it == 'l') { + fmt_it++; + } // ignore long long + } + + do { + ignore_char = false; + switch (*fmt_it) { + case 'x': + case 'X': + flags = flags | F_BASE16; + /* fallthrough */ + case 'u': + flags = flags | F_UNSIGNED; + /* fallthrough */ + case 'd': + case 'i': + va_copy(args_tmp, *args); + arg_char_count = vprintd(NULL, flags, &args_tmp); + if (arg_char_count <= buf_left) { + (void)vprintd(&buf_it, flags, args); + } + break; + case 's': + va_copy(args_tmp, *args); + arg_char_count = prints(NULL, va_arg(args_tmp, char*)); + if (arg_char_count <= buf_left) { + (void)prints(&buf_it, va_arg(*args, char*)); + } + break; + case 'c': + arg_char_count = 1; + if (arg_char_count <= buf_left) { + printc(&buf_it, (char)va_arg(args_tmp, int)); + } + break; + case '%': + arg_char_count = 1; + if (arg_char_count <= buf_left) { + printc(&buf_it, *fmt_it); + } + break; + default: + ignore_char = true; + break; + } + } while (ignore_char); + + if (arg_char_count <= buf_left) { + buf_left -= arg_char_count; + } else { + while (*fmt_it != '%') { + fmt_it--; + } + break; + } + } + fmt_it++; + } + + *fmt = fmt_it; + return buf_size - buf_left; +} diff --git a/src/lib/string.c b/src/lib/string.c index c59c003cd..6ee158125 100644 --- a/src/lib/string.c +++ b/src/lib/string.c @@ -1,98 +1,102 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#include - -void *memcpy(void *dst, const void *src, size_t count) -{ - size_t i; - uint8_t *dst_tmp = dst; - const uint8_t *src_tmp = src; - static const size_t WORD_SIZE = sizeof(unsigned long); - - if (!((uintptr_t)src & (WORD_SIZE - 1)) && - !((uintptr_t)dst & (WORD_SIZE - 1))) { - for (i = 0; i < count; i += WORD_SIZE) { - if (i + (WORD_SIZE - 1) > count - 1) break; - *(unsigned long *)dst_tmp = *(unsigned long *)src_tmp; - dst_tmp += WORD_SIZE; - src_tmp += WORD_SIZE; - } - if (i <= count - 1) { - for (; i < count; i++) { - *dst_tmp = *src_tmp; - dst_tmp++; - src_tmp++; - } - } - } else { - for (i = 0; i < count; i++) dst_tmp[i] = src_tmp[i]; - } - return dst; -} - -void *memset(void *dest, int c, size_t count) -{ - uint8_t *d; - d = (uint8_t *)dest; - - while (count--) { - *d = c; - d++; - } - - return dest; -} - -char *strcat(char *dest, char *src) -{ - char *save = dest; - - for (; *dest; ++dest); - while ((*dest++ = *src++) != 0); - - return (save); -} - -size_t strlen(const char *s) -{ - const char *sc; - for (sc = s; *sc != '\0'; ++sc) { - /* Just iterate */ - } - return sc - s; -} - -size_t strnlen(const char *s, size_t n) -{ - const char *str; - - for (str = s; *str != '\0' && n--; ++str) { - /* Just iterate */ - } - return str - s; -} - -char *strcpy(char *dest, char *src) -{ - char *tmp = dest; - - while ((*dest++ = *src++) != '\0') { - /* Just iterate */ - } - return tmp; -} - -int strcmp(char *str0, char *str1) -{ - char *tmp0 = str0, *tmp1 = str1; - - while (*tmp0 == *tmp1 && ((*tmp0 != '\0') && (*tmp1 != '\0'))) { - tmp0++; - tmp1++; - } - - return (int)(tmp0 - tmp1); -} +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +void* memcpy(void* dst, const void* src, size_t count) +{ + size_t i; + uint8_t* dst_tmp = dst; + const uint8_t* src_tmp = src; + static const size_t WORD_SIZE = sizeof(unsigned long); + + if (!((uintptr_t)src & (WORD_SIZE - 1)) && !((uintptr_t)dst & (WORD_SIZE - 1))) { + for (i = 0; i < count; i += WORD_SIZE) { + if (i + (WORD_SIZE - 1) > count - 1) { + break; + } + *(unsigned long*)dst_tmp = *(unsigned long*)src_tmp; + dst_tmp += WORD_SIZE; + src_tmp += WORD_SIZE; + } + if (i <= count - 1) { + for (; i < count; i++) { + *dst_tmp = *src_tmp; + dst_tmp++; + src_tmp++; + } + } + } else { + for (i = 0; i < count; i++) { + dst_tmp[i] = src_tmp[i]; + } + } + return dst; +} + +void* memset(void* dest, int c, size_t count) +{ + uint8_t* d; + d = (uint8_t*)dest; + + while (count--) { + *d = c; + d++; + } + + return dest; +} + +char* strcat(char* dest, char* src) +{ + char* save = dest; + + for (; *dest; ++dest) + ; + while ((*dest++ = *src++) != 0) { } + + return (save); +} + +size_t strlen(const char* s) +{ + const char* sc; + for (sc = s; *sc != '\0'; ++sc) { + /* Just iterate */ + } + return sc - s; +} + +size_t strnlen(const char* s, size_t n) +{ + const char* str; + + for (str = s; *str != '\0' && n--; ++str) { + /* Just iterate */ + } + return str - s; +} + +char* strcpy(char* dest, char* src) +{ + char* tmp = dest; + + while ((*dest++ = *src++) != '\0') { + /* Just iterate */ + } + return tmp; +} + +int strcmp(char* str0, char* str1) +{ + char *tmp0 = str0, *tmp1 = str1; + + while (*tmp0 == *tmp1 && ((*tmp0 != '\0') && (*tmp1 != '\0'))) { + tmp0++; + tmp1++; + } + + return (int)(tmp0 - tmp1); +} diff --git a/src/platform/drivers/8250_uart/8250_uart.c b/src/platform/drivers/8250_uart/8250_uart.c index 0a63e775f..538eba499 100644 --- a/src/platform/drivers/8250_uart/8250_uart.c +++ b/src/platform/drivers/8250_uart/8250_uart.c @@ -1,41 +1,42 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#include - -void uart_init(volatile struct uart8250_hw *uart) { - - /* set baudrate */ - uart->lcr |= UART8250_LCR_DLAB; - /** - * should set dll and dlh, - * to simplify instead lets assume the firmware did this for us. - * TODO: we should add uart clk and baudrate info to platform descrption - * and use this to calculate this values in runtime. - */ - uart->lcr &= ~UART8250_LCR_DLAB; - -/* configure 8n1 */ -uart->lcr = UART8250_LCR_8BIT; - - /* disable interrupts */ - uart->ier = 0; - - /* no modem */ - uart->mcr = 0; - - /* clear status */ - (void) uart->lsr; - uart->msr = 0; -} - -void uart_enable(volatile struct uart8250_hw *uart){ - uart->fcr = UART8250_FCR_EN; -} - -void uart_putc(volatile struct uart8250_hw *uart, int8_t c){ - while(!(uart->lsr & UART8250_LSR_THRE)); - uart->thr = c; -} +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +void uart_init(volatile struct uart8250_hw* uart) +{ + /* set baudrate */ + uart->lcr |= UART8250_LCR_DLAB; + /** + * should set dll and dlh, to simplify instead lets assume the firmware did this for us. + * TODO: we should add uart clk and baudrate info to platform descrption and use this to + * calculate this values in runtime. + */ + uart->lcr &= ~UART8250_LCR_DLAB; + + /* configure 8n1 */ + uart->lcr = UART8250_LCR_8BIT; + + /* disable interrupts */ + uart->ier = 0; + + /* no modem */ + uart->mcr = 0; + + /* clear status */ + (void)uart->lsr; + uart->msr = 0; +} + +void uart_enable(volatile struct uart8250_hw* uart) +{ + uart->fcr = UART8250_FCR_EN; +} + +void uart_putc(volatile struct uart8250_hw* uart, int8_t c) +{ + while (!(uart->lsr & UART8250_LSR_THRE)) { } + uart->thr = c; +} diff --git a/src/platform/drivers/8250_uart/inc/drivers/8250_uart.h b/src/platform/drivers/8250_uart/inc/drivers/8250_uart.h index f93f2da31..821355dc7 100644 --- a/src/platform/drivers/8250_uart/inc/drivers/8250_uart.h +++ b/src/platform/drivers/8250_uart/inc/drivers/8250_uart.h @@ -9,10 +9,10 @@ #include #include -#define UART8250_LSR_THRE (1U << 5) +#define UART8250_LSR_THRE (1U << 5) #ifndef UART8250_REG_WIDTH - #error "uart8259 reg width " UART8250_REG_WIDTH "not defined" +#error "uart8259 reg width " UART8250_REG_WIDTH "not defined" #endif #ifndef UART8250_REG_WIDTH @@ -20,14 +20,13 @@ #endif #if (UART8250_REG_WIDTH == 1) - typedef uint8_t uart8250_reg_t; +typedef uint8_t uart8250_reg_t; #elif (UART8250_REG_WIDTH == 4) - typedef uint32_t uart8250_reg_t; -#else - #error "uart8250 reg width " UART8250_REG_WIDTH " not supported" +typedef uint32_t uart8250_reg_t; +#else +#error "uart8250 reg width " UART8250_REG_WIDTH " not supported" #endif - #ifndef UART8250_PAGE_OFFSET #define UART8250_PAGE_OFFSET 0 #endif @@ -57,13 +56,13 @@ struct uart8250_hw { #define UART8250_LCR_DLAB (0x1 << 7) #define UART8250_LCR_8BIT (0x3 << 0) -#define UART8250_FCR_TX_CLR (0x1 << 2) -#define UART8250_FCR_RX_CLR (0x1 << 1) -#define UART8250_FCR_EN (0x1 << 0) +#define UART8250_FCR_TX_CLR (0x1 << 2) +#define UART8250_FCR_RX_CLR (0x1 << 1) +#define UART8250_FCR_EN (0x1 << 0) typedef struct uart8250_hw bao_uart_t; -void uart_enable(volatile struct uart8250_hw *uart); -void uart_init(volatile struct uart8250_hw *uart); +void uart_enable(volatile struct uart8250_hw* uart); +void uart_init(volatile struct uart8250_hw* uart); #endif /* UART8250_H */ diff --git a/src/platform/drivers/nxp_uart/inc/drivers/nxp_uart.h b/src/platform/drivers/nxp_uart/inc/drivers/nxp_uart.h index f2a9d81ad..843aa6b55 100644 --- a/src/platform/drivers/nxp_uart/inc/drivers/nxp_uart.h +++ b/src/platform/drivers/nxp_uart/inc/drivers/nxp_uart.h @@ -1,36 +1,36 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#ifndef UART_NXP_H -#define UART_NXP_H - -#include -#include - -struct lpuart { - uint32_t verid; - uint32_t param; - uint32_t global; - uint32_t pincfg; - uint32_t baud; - uint32_t stat; - uint32_t ctrl; - uint32_t data; - uint32_t match; - uint32_t modir; - uint32_t fifo; - uint32_t water; -}; - -#define LPUART_GLOBAL_RST_BIT (1U << 1) -#define LPUART_BAUD_80MHZ_115200 ((4 << 24) | (1 << 17) | 138) -#define LPUART_CTRL_TE_BIT (1U << 19) -#define LPUART_STAT_TDRE_BIT (1U << 23) - -typedef struct lpuart bao_uart_t; - -void uart_enable(volatile struct lpuart *uart); -void uart_init(volatile struct lpuart *uart); -#endif /* __UART_NXP_H */ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef UART_NXP_H +#define UART_NXP_H + +#include +#include + +struct lpuart { + uint32_t verid; + uint32_t param; + uint32_t global; + uint32_t pincfg; + uint32_t baud; + uint32_t stat; + uint32_t ctrl; + uint32_t data; + uint32_t match; + uint32_t modir; + uint32_t fifo; + uint32_t water; +}; + +#define LPUART_GLOBAL_RST_BIT (1U << 1) +#define LPUART_BAUD_80MHZ_115200 ((4 << 24) | (1 << 17) | 138) +#define LPUART_CTRL_TE_BIT (1U << 19) +#define LPUART_STAT_TDRE_BIT (1U << 23) + +typedef struct lpuart bao_uart_t; + +void uart_enable(volatile struct lpuart* uart); +void uart_init(volatile struct lpuart* uart); +#endif /* __UART_NXP_H */ diff --git a/src/platform/drivers/nxp_uart/nxp_uart.c b/src/platform/drivers/nxp_uart/nxp_uart.c index 3345ae082..161ced84f 100644 --- a/src/platform/drivers/nxp_uart/nxp_uart.c +++ b/src/platform/drivers/nxp_uart/nxp_uart.c @@ -1,22 +1,25 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#include - -void uart_init(volatile struct lpuart *uart){ - uart->global |= LPUART_GLOBAL_RST_BIT; - uart->global &= ~LPUART_GLOBAL_RST_BIT; - - uart->baud = LPUART_BAUD_80MHZ_115200; -} - -void uart_enable(volatile struct lpuart *uart){ - uart->ctrl = LPUART_CTRL_TE_BIT; -} - -void uart_putc(volatile struct lpuart *uart, char c){ - while(!(uart->stat & LPUART_STAT_TDRE_BIT)); - uart->data = c; -} +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +void uart_init(volatile struct lpuart* uart) +{ + uart->global |= LPUART_GLOBAL_RST_BIT; + uart->global &= ~LPUART_GLOBAL_RST_BIT; + + uart->baud = LPUART_BAUD_80MHZ_115200; +} + +void uart_enable(volatile struct lpuart* uart) +{ + uart->ctrl = LPUART_CTRL_TE_BIT; +} + +void uart_putc(volatile struct lpuart* uart, char c) +{ + while (!(uart->stat & LPUART_STAT_TDRE_BIT)) { } + uart->data = c; +} diff --git a/src/platform/drivers/pl011_uart/inc/drivers/pl011_uart.h b/src/platform/drivers/pl011_uart/inc/drivers/pl011_uart.h index d04fffaf1..878feaacc 100644 --- a/src/platform/drivers/pl011_uart/inc/drivers/pl011_uart.h +++ b/src/platform/drivers/pl011_uart/inc/drivers/pl011_uart.h @@ -1,210 +1,212 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#ifndef __PL011_UART_H_ -#define __PL011_UART_H_ - -#include - -/* UART Base Address (PL011) */ - -#define UART_BASE_0 0xFDF02000 -#define UART_BASE_1 0xFDF00000 -#define UART_BASE_2 0xFDF03000 -#define UART_BASE_4 0xFDF01000 -#define UART_BASE_5 0xFDF05000 -#define UART_BASE_6 0xFFF32000 - -/* UART Interrupts */ - -#define UART_0_INTERRUPT 106 -#define UART_1_INTERRUPT 107 -#define UART_2_INTERRUPT 108 -#define UART_4_INTERRUPT 109 -#define UART_5_INTERRUPT 110 -#define UART_6_INTERRUPT 111 - -#define NUM_UART 6 - -#define UART_CLK 19200000 -#define UART_BAUD_RATE 115200 - -/* UART Data Register */ - -#define UART_DATA_DATA 0xFFFFFF00 -#define UART_DATA_FE (1 << 8 -#define UART_DATA_PE (1 << 9) -#define UART_DATA_BE (1 << 10) -#define UART_DATA_OE (1 << 11) - -/* UART Receive Status Register/Error Clear Register */ - -#define UART_RSR_ECR_FE (1 << 0) -#define UART_RSR_ECR_PE (1 << 1) -#define UART_RSR_ECR_BE (1 << 2) -#define UART_RSR_ECR_OE (1 << 3) -#define UART_RSR_ECR_CLEAR 0xFFFFFF00 - -/* UART Flag Register */ - -#define UART_FR_CTS (1 << 0) -#define UART_FR_DSR (1 << 1) -#define UART_FR_DCD (1 << 2) -#define UART_FR_BUSY (1 << 3) -#define UART_FR_RXFE (1 << 4) -#define UART_FR_TXFF (1 << 5) -#define UART_FR_RXFF (1 << 6) -#define UART_FR_TXFE (1 << 7) -#define UART_FR_RI (1 << 8) - -/* UART Integer Baud Rate Register */ - -#define UART_IBRD_DIVINT 0x0000FFFF - -/* UART Fractional Baud Rate Register */ - -#define UART_FBRD_DIVFRAC 0x0000003F - -/* UART Line Control Register */ - -#define UART_LCR_BRK (1 << 0) -#define UART_LCR_PEN (1 << 1) -#define UART_LCR_EPS (1 << 2) -#define UART_LCR_STP2 (1 << 3) -#define UART_LCR_FEN (1 << 4) -#define UART_LCR_WLEN_8 (0b11 << 5) -#define UART_LCR_WLEN_7 (0b10 << 5) -#define UART_LCR_WLEN_6 (0b01 << 5) -#define UART_LCR_WLEN_5 (0b00 << 5) -#define UART_LCR_SPS (1 << 7) - -/* UART Control Register */ - -#define UART_CR_UARTEN (1 << 0) -#define UART_CR_SIREN (1 << 1) -#define UART_CR_SIRLP (1 << 2) -#define UART_CR_LBE (1 << 7) -#define UART_CR_TXE (1 << 8) -#define UART_CR_RXE (1 << 9) -#define UART_CR_DTR (1 << 10) -#define UART_CR_RTS (1 << 11) -#define UART_CR_OUT1 (1 << 12) -#define UART_CR_OUT2 (1 << 13) -#define UART_CR_RTSE (1 << 14) -#define UART_CR_CTSE (1 << 15) - -/* UART Interrupt FIFO Level Select Register */ - -#define UART_IFLS_TXIFLSEL_1_8 (0b000 << 0) -#define UART_IFLS_TXIFLSEL_1_4 (0b001 << 0) -#define UART_IFLS_TXIFLSEL_1_2 (0b010 << 0) -#define UART_IFLS_TXIFLSEL_3_4 (0b011 << 0) -#define UART_IFLS_TXIFLSEL_7_8 (0b100 << 0) -#define UART_IFLS_RXIFLSEL_1_8 (0b000 << 3) -#define UART_IFLS_RXIFLSEL_1_4 (0b001 << 3) -#define UART_IFLS_RXIFLSEL_1_2 (0b010 << 3) -#define UART_IFLS_RXIFLSEL_3_4 (0b011 << 3) -#define UART_IFLS_RXIFLSEL_7_8 (0b100 << 3) - -/* UART Interrupt Mask Set/Clear Register */ - -#define UART_IMSC_RIMIM (1 << 0) -#define UART_IMSC_CTSMIM (1 << 1) -#define UART_IMSC_DCDMIM (1 << 2) -#define UART_IMSC_DSRMI (1 << 3) -#define UART_IMSC_RXIM (1 << 4) -#define UART_IMSC_TXIM (1 << 5) -#define UART_IMSC_RTIM (1 << 6) -#define UART_IMSC_FEIM (1 << 7) -#define UART_IMSC_PEIM (1 << 8) -#define UART_IMSC_BEIM (1 << 9) -#define UART_IMSC_OEIM (1 << 10) - -/* UART Raw Interrupt Status Register */ - -#define UART_RIS_RIRMIS (1 << 0) -#define UART_RIS_CTSRMIS (1 << 1) -#define UART_RIS_DCDRMIS (1 << 2) -#define UART_RIS_DSRRMIS (1 << 3) -#define UART_RIS_RXRIS (1 << 4) -#define UART_RIS_TXRIS (1 << 5) -#define UART_RIS_RTRIS (1 << 6) -#define UART_RIS_FERIS (1 << 7) -#define UART_RIS_PERIS (1 << 8) -#define UART_RIS_BERIS (1 << 9) -#define UART_RIS_OERIS (1 << 10) - -/* UART Masked Interrupt Status Register */ - -#define UART_MIS_RIMMIS (1 << 0) -#define UART_MIS_CTSMMIS (1 << 1) -#define UART_MIS_DCDMMIS (1 << 2) -#define UART_MIS_DSRMMIS (1 << 3) -#define UART_MIS_RXMIS (1 << 4) -#define UART_MIS_TXMIS (1 << 5) -#define UART_MIS_RTMIS (1 << 6) -#define UART_MIS_FEMIS (1 << 7) -#define UART_MIS_PEMIS (1 << 8) -#define UART_MIS_BEMIS (1 << 9) -#define UART_MIS_OEMIS (1 << 10) - -/* UART Interrupt Clear Register */ - -#define UART_ICR_RIMIC (1 << 0) -#define UART_ICR_CTSMIC (1 << 1) -#define UART_ICR_DCDMIC (1 << 2) -#define UART_ICR_DSRMIC (1 << 3) -#define UART_ICR_RXIC (1 << 4) -#define UART_ICR_TXIC (1 << 5) -#define UART_ICR_RTIC (1 << 6) -#define UART_ICR_FEIC (1 << 7) -#define UART_ICR_PEIC (1 << 8) -#define UART_ICR_BEIC (1 << 9) -#define UART_ICR_OEIC (1 << 10) - -/* UART DMA Control Register */ - -#define UART_DMACR_RXDMAE (1 << 0) -#define UART_DMACR_TXDMAE (1 << 1) -#define UART_DMACR_DMAONERR (1 << 2) - -/* For printk */ - -#define serial_puts(str_buffer) uart_puts(1,str_buffer) - -/* UART (PL011) register structure */ - -struct Pl011_Uart_hw -{ - volatile uint32_t data; // UART Data Register - volatile uint32_t status_error; // UART Receive Status Register/Error Clear Register - const uint32_t reserved1[4]; // Reserved: 4(0x4) bytes - volatile uint32_t flag; // UART Flag Register - const uint32_t reserved2[1]; // Reserved: 1(0x1) bytes - volatile uint32_t lp_counter; // UART Low-power Counter Register - volatile uint32_t integer_br; // UART Integer Baud Rate Register - volatile uint32_t fractional_br; // UART Fractional Baud Rate Register - volatile uint32_t line_control; // UART Line Control Register - volatile uint32_t control; // UART Control Register - volatile uint32_t isr_fifo_level_sel; // UART Interrupt FIFO level Select Register - volatile uint32_t isr_mask; // UART Interrupt Mask Set/Clear Register - volatile uint32_t raw_isr_status; // UART Raw Interrupt Status Register - volatile uint32_t masked_isr_status; // UART Masked Interrupt Status Register - volatile uint32_t isr_clear; // UART Interrupt Clear Register - volatile uint32_t DMA_control; // UART DMA control Register -}; - -typedef struct Pl011_Uart_hw bao_uart_t; - -/** Public PL011 UART interfaces */ - -void uart_disable(volatile struct Pl011_Uart_hw * ptr_uart); -void uart_enable(volatile struct Pl011_Uart_hw * ptr_uart); -void uart_set_baud_rate(volatile struct Pl011_Uart_hw * ptr_uart, uint32_t baud_rate); -void uart_init(volatile struct Pl011_Uart_hw * ptr_uart); -uint32_t uart_getc(volatile struct Pl011_Uart_hw * ptr_uart); -void uart_putc(volatile struct Pl011_Uart_hw * ptr_uart,int8_t c); - -#endif /* __PL011_UART_H_ */ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __PL011_UART_H_ +#define __PL011_UART_H_ + +#include + +/* UART Base Address (PL011) */ + +#define UART_BASE_0 0xFDF02000 +#define UART_BASE_1 0xFDF00000 +#define UART_BASE_2 0xFDF03000 +#define UART_BASE_4 0xFDF01000 +#define UART_BASE_5 0xFDF05000 +#define UART_BASE_6 0xFFF32000 + +/* UART Interrupts */ + +#define UART_0_INTERRUPT 106 +#define UART_1_INTERRUPT 107 +#define UART_2_INTERRUPT 108 +#define UART_4_INTERRUPT 109 +#define UART_5_INTERRUPT 110 +#define UART_6_INTERRUPT 111 + +#define NUM_UART 6 + +#define UART_CLK 19200000 +#define UART_BAUD_RATE 115200 + +/* UART Data Register */ + +#define UART_DATA_DATA 0xFFFFFF00 +#define UART_DATA_FE (1 << 8) +#define UART_DATA_PE (1 << 9) +#define UART_DATA_BE (1 << 10) +#define UART_DATA_OE (1 << 11) + +/* UART Receive Status Register/Error Clear Register */ + +#define UART_RSR_ECR_FE (1 << 0) +#define UART_RSR_ECR_PE (1 << 1) +#define UART_RSR_ECR_BE (1 << 2) +#define UART_RSR_ECR_OE (1 << 3) +#define UART_RSR_ECR_CLEAR 0xFFFFFF00 + +/* UART Flag Register */ + +#define UART_FR_CTS (1 << 0) +#define UART_FR_DSR (1 << 1) +#define UART_FR_DCD (1 << 2) +#define UART_FR_BUSY (1 << 3) +#define UART_FR_RXFE (1 << 4) +#define UART_FR_TXFF (1 << 5) +#define UART_FR_RXFF (1 << 6) +#define UART_FR_TXFE (1 << 7) +#define UART_FR_RI (1 << 8) + +/* UART Integer Baud Rate Register */ + +#define UART_IBRD_DIVINT 0x0000FFFF + +/* UART Fractional Baud Rate Register */ + +#define UART_FBRD_DIVFRAC 0x0000003F + +/* UART Line Control Register */ + +#define UART_LCR_BRK (1 << 0) +#define UART_LCR_PEN (1 << 1) +#define UART_LCR_EPS (1 << 2) +#define UART_LCR_STP2 (1 << 3) +#define UART_LCR_FEN (1 << 4) +#define UART_LCR_WLEN_8 (0b11 << 5) +#define UART_LCR_WLEN_7 (0b10 << 5) +#define UART_LCR_WLEN_6 (0b01 << 5) +#define UART_LCR_WLEN_5 (0b00 << 5) +#define UART_LCR_SPS (1 << 7) + +/* UART Control Register */ + +#define UART_CR_UARTEN (1 << 0) +#define UART_CR_SIREN (1 << 1) +#define UART_CR_SIRLP (1 << 2) +#define UART_CR_LBE (1 << 7) +#define UART_CR_TXE (1 << 8) +#define UART_CR_RXE (1 << 9) +#define UART_CR_DTR (1 << 10) +#define UART_CR_RTS (1 << 11) +#define UART_CR_OUT1 (1 << 12) +#define UART_CR_OUT2 (1 << 13) +#define UART_CR_RTSE (1 << 14) +#define UART_CR_CTSE (1 << 15) + +/* UART Interrupt FIFO Level Select Register */ + +#define UART_IFLS_TXIFLSEL_1_8 (0b000 << 0) +#define UART_IFLS_TXIFLSEL_1_4 (0b001 << 0) +#define UART_IFLS_TXIFLSEL_1_2 (0b010 << 0) +#define UART_IFLS_TXIFLSEL_3_4 (0b011 << 0) +#define UART_IFLS_TXIFLSEL_7_8 (0b100 << 0) +#define UART_IFLS_RXIFLSEL_1_8 (0b000 << 3) +#define UART_IFLS_RXIFLSEL_1_4 (0b001 << 3) +#define UART_IFLS_RXIFLSEL_1_2 (0b010 << 3) +#define UART_IFLS_RXIFLSEL_3_4 (0b011 << 3) +#define UART_IFLS_RXIFLSEL_7_8 (0b100 << 3) + +/* UART Interrupt Mask Set/Clear Register */ + +#define UART_IMSC_RIMIM (1 << 0) +#define UART_IMSC_CTSMIM (1 << 1) +#define UART_IMSC_DCDMIM (1 << 2) +#define UART_IMSC_DSRMI (1 << 3) +#define UART_IMSC_RXIM (1 << 4) +#define UART_IMSC_TXIM (1 << 5) +#define UART_IMSC_RTIM (1 << 6) +#define UART_IMSC_FEIM (1 << 7) +#define UART_IMSC_PEIM (1 << 8) +#define UART_IMSC_BEIM (1 << 9) +#define UART_IMSC_OEIM (1 << 10) + +/* UART Raw Interrupt Status Register */ + +#define UART_RIS_RIRMIS (1 << 0) +#define UART_RIS_CTSRMIS (1 << 1) +#define UART_RIS_DCDRMIS (1 << 2) +#define UART_RIS_DSRRMIS (1 << 3) +#define UART_RIS_RXRIS (1 << 4) +#define UART_RIS_TXRIS (1 << 5) +#define UART_RIS_RTRIS (1 << 6) +#define UART_RIS_FERIS (1 << 7) +#define UART_RIS_PERIS (1 << 8) +#define UART_RIS_BERIS (1 << 9) +#define UART_RIS_OERIS (1 << 10) + +/* UART Masked Interrupt Status Register */ + +#define UART_MIS_RIMMIS (1 << 0) +#define UART_MIS_CTSMMIS (1 << 1) +#define UART_MIS_DCDMMIS (1 << 2) +#define UART_MIS_DSRMMIS (1 << 3) +#define UART_MIS_RXMIS (1 << 4) +#define UART_MIS_TXMIS (1 << 5) +#define UART_MIS_RTMIS (1 << 6) +#define UART_MIS_FEMIS (1 << 7) +#define UART_MIS_PEMIS (1 << 8) +#define UART_MIS_BEMIS (1 << 9) +#define UART_MIS_OEMIS (1 << 10) + +/* UART Interrupt Clear Register */ + +#define UART_ICR_RIMIC (1 << 0) +#define UART_ICR_CTSMIC (1 << 1) +#define UART_ICR_DCDMIC (1 << 2) +#define UART_ICR_DSRMIC (1 << 3) +#define UART_ICR_RXIC (1 << 4) +#define UART_ICR_TXIC (1 << 5) +#define UART_ICR_RTIC (1 << 6) +#define UART_ICR_FEIC (1 << 7) +#define UART_ICR_PEIC (1 << 8) +#define UART_ICR_BEIC (1 << 9) +#define UART_ICR_OEIC (1 << 10) + +/* UART DMA Control Register */ + +#define UART_DMACR_RXDMAE (1 << 0) +#define UART_DMACR_TXDMAE (1 << 1) +#define UART_DMACR_DMAONERR (1 << 2) + +/* For printk */ + +#define serial_puts(str_buffer) uart_puts(1, str_buffer) + +/* UART (PL011) register structure */ + +struct Pl011_Uart_hw { + volatile uint32_t data; // UART Data Register + volatile uint32_t status_error; // UART Receive Status Register/Error Clear + // Register + const uint32_t reserved1[4]; // Reserved: 4(0x4) bytes + volatile uint32_t flag; // UART Flag Register + const uint32_t reserved2[1]; // Reserved: 1(0x1) bytes + volatile uint32_t lp_counter; // UART Low-power Counter Register + volatile uint32_t integer_br; // UART Integer Baud Rate Register + volatile uint32_t fractional_br; // UART Fractional Baud Rate Register + volatile uint32_t line_control; // UART Line Control Register + volatile uint32_t control; // UART Control Register + volatile uint32_t isr_fifo_level_sel; // UART Interrupt FIFO level Select + // Register + volatile uint32_t isr_mask; // UART Interrupt Mask Set/Clear Register + volatile uint32_t raw_isr_status; // UART Raw Interrupt Status Register + volatile uint32_t masked_isr_status; // UART Masked Interrupt Status + // Register + volatile uint32_t isr_clear; // UART Interrupt Clear Register + volatile uint32_t DMA_control; // UART DMA control Register +}; + +typedef struct Pl011_Uart_hw bao_uart_t; + +/** Public PL011 UART interfaces */ + +void uart_disable(volatile struct Pl011_Uart_hw* ptr_uart); +void uart_enable(volatile struct Pl011_Uart_hw* ptr_uart); +void uart_set_baud_rate(volatile struct Pl011_Uart_hw* ptr_uart, uint32_t baud_rate); +void uart_init(volatile struct Pl011_Uart_hw* ptr_uart); +uint32_t uart_getc(volatile struct Pl011_Uart_hw* ptr_uart); +void uart_putc(volatile struct Pl011_Uart_hw* ptr_uart, int8_t c); + +#endif /* __PL011_UART_H_ */ diff --git a/src/platform/drivers/pl011_uart/pl011_uart.c b/src/platform/drivers/pl011_uart/pl011_uart.c index a9b35a397..cd10ff96b 100644 --- a/src/platform/drivers/pl011_uart/pl011_uart.c +++ b/src/platform/drivers/pl011_uart/pl011_uart.c @@ -1,114 +1,100 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#include - - -void uart_disable(volatile struct Pl011_Uart_hw * ptr_uart){ - - uint32_t ctrl_reg = ptr_uart->control; - ctrl_reg &= ((~UART_CR_UARTEN) | (~UART_CR_TXE) | (~UART_CR_RXE)); - ptr_uart->control = ctrl_reg; - -} - - -void uart_enable(volatile struct Pl011_Uart_hw * ptr_uart){ - - uint32_t ctrl_reg = ptr_uart->control; - ctrl_reg |= (UART_CR_UARTEN | UART_CR_TXE | UART_CR_RXE); - ptr_uart->control = ctrl_reg; - -} - - -void uart_set_baud_rate(volatile struct Pl011_Uart_hw * ptr_uart, uint32_t baud_rate){ - - uint32_t temp; - uint32_t ibrd; - uint32_t mod; - uint32_t fbrd; - - if(baud_rate == 0) - { - baud_rate = UART_BAUD_RATE; - } - - /* Set baud rate, IBRD = UART_CLK / (16 * BAUD_RATE) - FBRD = ROUND((64 * MOD(UART_CLK,(16 * BAUD_RATE))) / (16 * BAUD_RATE)) */ - temp = 16 * baud_rate; - ibrd = UART_CLK / temp; - mod = UART_CLK % temp; - fbrd = (4 * mod) / baud_rate; - - /* Set the values of the baudrate divisors */ - ptr_uart->integer_br = ibrd; - ptr_uart->fractional_br = fbrd; - -} - - -void uart_init(volatile struct Pl011_Uart_hw * ptr_uart/*, uint32_t baud_rate*/) { - - uint32_t lcrh_reg; - - /* First, disable everything */ - ptr_uart->control = 0x0; - - /* Disable FIFOs */ - lcrh_reg = ptr_uart->line_control; - lcrh_reg &= ~UART_LCR_FEN; - ptr_uart->line_control = lcrh_reg; - - /* Default baudrate = 115200 */ - uint32_t baud_rate = UART_BAUD_RATE; - uart_set_baud_rate(ptr_uart, baud_rate); - - /* Set the UART to be 8 bits, 1 stop bit and no parity, FIFOs enable*/ - ptr_uart->line_control = (UART_LCR_WLEN_8 | UART_LCR_FEN); - - /* Enable the UART, enable TX and enable loop back*/ - ptr_uart->control = (UART_CR_UARTEN | UART_CR_TXE | UART_CR_LBE); - - /* Set the receive interrupt FIFO level to 1/2 full */ - ptr_uart->isr_fifo_level_sel = UART_IFLS_RXIFLSEL_1_2; - - ptr_uart->data = 0x0; - while(ptr_uart->flag & UART_FR_BUSY); - - /* Enable RX */ - ptr_uart->control = (UART_CR_UARTEN | UART_CR_RXE | UART_CR_TXE); - - /* Clear interrupts */ - ptr_uart->isr_clear = (UART_ICR_OEIC | UART_ICR_BEIC | UART_ICR_PEIC | UART_ICR_FEIC); - - /* Enable receive and receive timeout interrupts */ - ptr_uart->isr_mask = (UART_MIS_RXMIS | UART_MIS_RTMIS); - -} - - -uint32_t uart_getc(volatile struct Pl011_Uart_hw * ptr_uart){ - - uint32_t data = 0; - - //wait until there is data in FIFO - while(!(ptr_uart->flag & UART_FR_RXFE)); - - data = ptr_uart->data; - return data; - -} - - -void uart_putc(volatile struct Pl011_Uart_hw * ptr_uart,int8_t c){ - - //wait until txFIFO is not full - while(ptr_uart->flag & UART_FR_TXFF); - - ptr_uart->data = c; - -} - +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +void uart_disable(volatile struct Pl011_Uart_hw* ptr_uart) +{ + uint32_t ctrl_reg = ptr_uart->control; + ctrl_reg &= ((~UART_CR_UARTEN) | (~UART_CR_TXE) | (~UART_CR_RXE)); + ptr_uart->control = ctrl_reg; +} + +void uart_enable(volatile struct Pl011_Uart_hw* ptr_uart) +{ + uint32_t ctrl_reg = ptr_uart->control; + ctrl_reg |= (UART_CR_UARTEN | UART_CR_TXE | UART_CR_RXE); + ptr_uart->control = ctrl_reg; +} + +void uart_set_baud_rate(volatile struct Pl011_Uart_hw* ptr_uart, uint32_t baud_rate) +{ + uint32_t temp; + uint32_t ibrd; + uint32_t mod; + uint32_t fbrd; + + if (baud_rate == 0) { + baud_rate = UART_BAUD_RATE; + } + + /* Set baud rate, IBRD = UART_CLK / (16 * BAUD_RATE) + FBRD = ROUND((64 * MOD(UART_CLK,(16 * BAUD_RATE))) / (16 * BAUD_RATE)) */ + temp = 16 * baud_rate; + ibrd = UART_CLK / temp; + mod = UART_CLK % temp; + fbrd = (4 * mod) / baud_rate; + + /* Set the values of the baudrate divisors */ + ptr_uart->integer_br = ibrd; + ptr_uart->fractional_br = fbrd; +} + +void uart_init(volatile struct Pl011_Uart_hw* ptr_uart /*, uint32_t baud_rate*/) +{ + uint32_t lcrh_reg; + + /* First, disable everything */ + ptr_uart->control = 0x0; + + /* Disable FIFOs */ + lcrh_reg = ptr_uart->line_control; + lcrh_reg &= ~UART_LCR_FEN; + ptr_uart->line_control = lcrh_reg; + + /* Default baudrate = 115200 */ + uint32_t baud_rate = UART_BAUD_RATE; + uart_set_baud_rate(ptr_uart, baud_rate); + + /* Set the UART to be 8 bits, 1 stop bit and no parity, FIFOs enable*/ + ptr_uart->line_control = (UART_LCR_WLEN_8 | UART_LCR_FEN); + + /* Enable the UART, enable TX and enable loop back*/ + ptr_uart->control = (UART_CR_UARTEN | UART_CR_TXE | UART_CR_LBE); + + /* Set the receive interrupt FIFO level to 1/2 full */ + ptr_uart->isr_fifo_level_sel = UART_IFLS_RXIFLSEL_1_2; + + ptr_uart->data = 0x0; + while (ptr_uart->flag & UART_FR_BUSY) { } + + /* Enable RX */ + ptr_uart->control = (UART_CR_UARTEN | UART_CR_RXE | UART_CR_TXE); + + /* Clear interrupts */ + ptr_uart->isr_clear = (UART_ICR_OEIC | UART_ICR_BEIC | UART_ICR_PEIC | UART_ICR_FEIC); + + /* Enable receive and receive timeout interrupts */ + ptr_uart->isr_mask = (UART_MIS_RXMIS | UART_MIS_RTMIS); +} + +uint32_t uart_getc(volatile struct Pl011_Uart_hw* ptr_uart) +{ + uint32_t data = 0; + + // wait until there is data in FIFO + while (!(ptr_uart->flag & UART_FR_RXFE)) { } + + data = ptr_uart->data; + return data; +} + +void uart_putc(volatile struct Pl011_Uart_hw* ptr_uart, int8_t c) +{ + // wait until txFIFO is not full + while (ptr_uart->flag & UART_FR_TXFF) { } + + ptr_uart->data = c; +} diff --git a/src/platform/drivers/sbi_uart/inc/drivers/sbi_uart.h b/src/platform/drivers/sbi_uart/inc/drivers/sbi_uart.h index c93fb4304..9679a1534 100644 --- a/src/platform/drivers/sbi_uart/inc/drivers/sbi_uart.h +++ b/src/platform/drivers/sbi_uart/inc/drivers/sbi_uart.h @@ -1,17 +1,17 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#ifndef __SBI_UART_H__ -#define __SBI_UART_H__ - -#include - -typedef volatile uint8_t bao_uart_t; - -bool uart_init(bao_uart_t* uart); -void uart_enable(bao_uart_t* uart); -void uart_putc(bao_uart_t* uart, const char c); - -#endif /* __SBI_UART_H__ */ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __SBI_UART_H__ +#define __SBI_UART_H__ + +#include + +typedef volatile uint8_t bao_uart_t; + +bool uart_init(bao_uart_t* uart); +void uart_enable(bao_uart_t* uart); +void uart_putc(bao_uart_t* uart, const char c); + +#endif /* __SBI_UART_H__ */ diff --git a/src/platform/drivers/sbi_uart/sbi_uart.c b/src/platform/drivers/sbi_uart/sbi_uart.c index 9b9ae4444..bc0295032 100644 --- a/src/platform/drivers/sbi_uart/sbi_uart.c +++ b/src/platform/drivers/sbi_uart/sbi_uart.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -10,8 +10,9 @@ bool uart_init(bao_uart_t* uart) { return true; } -void uart_enable(bao_uart_t* uart) {} +void uart_enable(bao_uart_t* uart) { } -void uart_putc(bao_uart_t* uart, const char c) { +void uart_putc(bao_uart_t* uart, const char c) +{ sbi_console_putchar(c); } diff --git a/src/platform/drivers/zynq_uart/inc/drivers/zynq_uart.h b/src/platform/drivers/zynq_uart/inc/drivers/zynq_uart.h index 8dbbfad23..06d9f2508 100644 --- a/src/platform/drivers/zynq_uart/inc/drivers/zynq_uart.h +++ b/src/platform/drivers/zynq_uart/inc/drivers/zynq_uart.h @@ -1,284 +1,318 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#ifndef __UART_ZYNQ_H -#define __UART_ZYNQ_H - -#include -#include - -/** UART Interrupts ID*/ - -#define UART_0_INTERRUPT (53) -#define UART_1_INTERRUPT (54) - -/** Number of available UARTs */ - -#define NUM_UART (2) - -/** UART Control register configurations */ - -#define UART_CONTROL_STPBRK (0b1 << 8) // Stop transmitter break -#define UART_CONTROL_STTBRK (0b1 << 7) // Start transmitter break -#define UART_CONTROL_RSTTO (0b1 << 6) // Restart receiver timeout counter -#define UART_CONTROL_TXDIS (0b1 << 5) // Transmit disable -#define UART_CONTROL_TXEN (0b1 << 4) // Transmit enable -#define UART_CONTROL_RXDIS (0b1 << 3) // Receive disable -#define UART_CONTROL_RXEN (0b1 << 2) // Receive enable -#define UART_CONTROL_TXRES (0b1 << 1) // Software reset for Tx data path -#define UART_CONTROL_RXRES (0b1 << 0) // Software reset for Rx data path - -/** UART Mode Register configuration */ - -#define UART_MODE_CHMODE_N (0b00 << 8) // Channel mode (Normal) -#define UART_MODE_CHMODE_AE (0b01 << 8) // Channel mode (Automatic Echo) -#define UART_MODE_CHMODE_LL (0b10 << 8) // Channel mode (Local Loopback) -#define UART_MODE_CHMODE_RL (0b11 << 8) // Channel mode (Remote Loopback) -#define UART_MODE_NBSTOP_1 (0b00 << 6) // Number of stop bits (1) -#define UART_MODE_NBSTOP_1_5 (0b01 << 6) // Number of stop bits (1.5) -#define UART_MODE_NBSTOP_2 (0b10 << 6) // Number of stop bits (2) -#define UART_MODE_PAR_EP (0b000 << 3) // Parity type select (Even Parity) -#define UART_MODE_PAR_OP (0b001 << 3) // Parity type select (Odd Parity) -#define UART_MODE_PAR_F0P (0b010 << 3) // Parity type select (forced to 0 Parity, Space Parity Mode) -#define UART_MODE_PAR_F1P (0b011 << 3) // Parity type select (forced to 1 Parity, Mark Parity Mode) -#define UART_MODE_PAR_NP (0b100 << 3) // Parity type select (No Parity) -#define UART_MODE_CHRL_6 (0b11 << 1) // Character length select (6 bits) -#define UART_MODE_CHRL_7 (0b10 << 1) // Character length select (7 bits) -#define UART_MODE_CHRL_8 (0b00 << 1) // Character length select (8 bits) -#define UART_MODE_CHRL_REF (0b0 << 0) // Clock source select (clock source is uart_ref_clk) -#define UART_MODE_CLKS_REF_8 (0b1 << 0) // Clock source select (clock source is uart_ref_clk/8) - -/** UART Interrupt Enable Register configurations */ - -#define UART_ISR_EN_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (enable, clears mask=0) -#define UART_ISR_EN_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (enable, clears mask=0) -#define UART_ISR_EN_TNFUL (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (enable, clears mask=0) -#define UART_ISR_EN_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (enable, clears mask=0) -#define UART_ISR_EN_DMSI (0b1 << 9) // Delta Modem Status Indicator interrupt (enable, clears mask=0) -#define UART_ISR_EN_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (enable, clears mask=0) -#define UART_ISR_EN_PARE (0b1 << 7) // Receiver Parity Error interrupt (enable, clears mask=0) -#define UART_ISR_EN_FRAME (0b1 << 6) // Receiver Framing Error interrupt (enable, clears mask=0) -#define UART_ISR_EN_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (enable, clears mask=0) -#define UART_ISR_EN_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (enable, clears mask=0) -#define UART_ISR_EN_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (enable, clears mask=0) -#define UART_ISR_EN_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (enable, clears mask=0) -#define UART_ISR_EN_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (enable, clears mask=0) -#define UART_ISR_EN_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (enable, clears mask=0) - -/** UART Interrupt Disable Register configurations */ - -#define UART_ISR_DIS_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (disable, sets mask=1) -#define UART_ISR_DIS_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (disable, sets mask=1) -#define UART_ISR_DIS_TNFUL (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (disable, sets mask=1) -#define UART_ISR_DIS_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (disable, sets mask=1) -#define UART_ISR_DIS_DMSI (0b1 << 9) // Delta Modem Status Indicator interrupt (disable, sets mask=1) -#define UART_ISR_DIS_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (disable, sets mask=1) -#define UART_ISR_DIS_PARE (0b1 << 7) // Receiver Parity Error interrupt (disable, sets mask=1) -#define UART_ISR_DIS_FRAME (0b1 << 6) // Receiver Framing Error interrupt (disable, sets mask=1) -#define UART_ISR_DIS_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (disable, sets mask=1) -#define UART_ISR_DIS_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (disable, sets mask=1) -#define UART_ISR_DIS_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (disable, sets mask=1) -#define UART_ISR_DIS_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (disable, sets mask=1) -#define UART_ISR_DIS_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (disable, sets mask=1) -#define UART_ISR_DIS_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (disable, sets mask=1) - -/** UART Interrupt Mask Register configurations */ - -#define UART_ISR_MASK_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (enabled) -#define UART_ISR_MASK_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (enabled) -#define UART_ISR_MASK_TNFUL (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (enabled) -#define UART_ISR_MASK_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (enabled) -#define UART_ISR_MASK_DMSI (0b1 << 9) // Delta Modem Status Indicator interrupt (enabled) -#define UART_ISR_MASK_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (enabled) -#define UART_ISR_MASK_PARE (0b1 << 7) // Receiver Parity Error interrupt (enabled) -#define UART_ISR_MASK_FRAME (0b1 << 6) // Receiver Framing Error interrupt (enabled) -#define UART_ISR_MASK_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (enabled) -#define UART_ISR_MASK_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (enabled) -#define UART_ISR_MASK_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (enabled) -#define UART_ISR_MASK_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (enabled) -#define UART_ISR_MASK_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (enabled) -#define UART_ISR_MASK_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (enabled) - -/** UART Channel Interrupt Status Register configurations */ - -#define UART_ISR_STATUS_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (interrupt occured) -#define UART_ISR_STATUS_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (interrupt occurred) -#define UART_ISR_STATUS_TNFUL (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (interrupt occurred) -#define UART_ISR_STATUS_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (interrupt occurred) -#define UART_ISR_STATUS_DMSI (0b1 << 9) // Delta Modem Status Indicator interrupt (interrupt occurred) -#define UART_ISR_STATUS_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (interrupt occurred) -#define UART_ISR_STATUS_PARE (0b1 << 7) // Receiver Parity Error interrupt (interrupt occurred) -#define UART_ISR_STATUS_FRAME (0b1 << 6) // Receiver Framing Error interrupt (interrupt occurred) -#define UART_ISR_STATUS_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (interrupt occurred) -#define UART_ISR_STATUS_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (interrupt occurred) -#define UART_ISR_STATUS_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (interrupt occurred) -#define UART_ISR_STATUS_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (interrupt occurred) -#define UART_ISR_STATUS_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (interrupt occurred) -#define UART_ISR_STATUS_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (interrupt occurred) - -/** UART Baud Rate Generator Register */ - -#define UART_BR_GEN_DIS (0) // Baud Rate Clock Divisor Value (Disables baud_sample) -#define UART_BR_GEN_BYPASS (1) // Baud Rate Clock Divisor Value (Clock divisor bypass) - -/** UART Receiver Timeout Register */ - -#define UART_RX_TIMEOUT_DIS (0) // Receiver timeout value (Disables receiver timeout counter) - -/** UART Receiver FIFO Trigger Level Register */ - -#define UART_RX_FIFO_TRIG_DIS (0) // RX FIFO trigger level value (Disables RX FIFO trigger level function) - -/** UART Modem Control Register */ - -#define UART_MODEM_CTRL_FCM (0b1 << 5) // Automatic flow control mode (enable) -#define UART_MODEM_CTRL_RTS_FL1 (0b0 << 1) // Request to send output control (EMIOUARTxRTSN output forced to logic 1) -#define UART_MODEM_CTRL_RTS_FL0 (0b1 << 1) // Request to send output control (EMIOUARTxRTSN output forced to logic 0) -#define UART_MODEM_CTRL_DTR_FL1 (0b0 << 0) // Data Terminal Ready (EMIOUARTxDTRN output forced to logic 1) -#define UART_MODEM_CTRL_DTR_FL0 (0b1 << 0) // Data Terminal Ready (EMIOUARTxDTRN output forced to logic 0) - -/** UART Modem Status Register */ - -#define UART_MODEM_STATUS_FCMS (0b1 << 8) // Flow control mode (enabled) -#define UART_MODEM_STATUS_DCD_H (0b0 << 7) // Data Carrier Detect (DCD) input signal from PL(EMIOUARTxDCDN) status (input is high) -#define UART_MODEM_STATUS_DCD_L (0b1 << 7) // Data Carrier Detect (DCD) input signal from PL(EMIOUARTxDCDN) status (input is low) -#define UART_MODEM_STATUS_RI_H (0b0 << 6) // Ring Indicator (RI) input signal from PL(EMIOUARTxRIN) status (input is high) -#define UART_MODEM_STATUS_RI_L (0b1 << 6) // Ring Indicator (RI) input signal from PL(EMIOUARTxRIN) status (input is low) -#define UART_MODEM_STATUS_DSR_H (0b0 << 5) // Data Set Ready (DSR) input signal from PL(EMIOUARTxDSRN) status (input is high) -#define UART_MODEM_STATUS_DSR_L (0b1 << 5) // Data Set Ready (DSR) input signal from PL(EMIOUARTxDSRN) status (input is low) -#define UART_MODEM_STATUS_CTS_H (0b0 << 4) // Clear to Send (CTS) input signal from PL(EMIOUARTxCTSN) status (input is high) -#define UART_MODEM_STATUS_CTS_L (0b1 << 4) // Clear to Send (CTS) input signal from PL(EMIOUARTxCTSN) status (input is low) -#define UART_MODEM_STATUS_DDCD (0b1 << 3) // Delta Data Carrier Detect status (change has occurred) -#define UART_MODEM_STATUS_TERI (0b1 << 2) // Trailing Edge Ring Indicator status (Trailing edge has occurred) -#define UART_MODEM_STATUS_DDSR (0b1 << 1) // Delta Data Set Ready status (change has occurred) -#define UART_MODEM_STATUS_DCTS (0b1 << 0) // Delta Clear To Send status (change has occurred) - -/** UART Channel Status Register */ - -#define UART_CH_STATUS_TNFUL (1 << 14) //TX FIFO Nearly Full Status -#define UART_CH_STATUS_TTRIG (1 << 13) //TX FIFO Trigger Status -#define UART_CH_STATUS_FDELT (1 << 12) //RX FIFO fill over flow delay -#define UART_CH_STATUS_TACTIVE (1 << 11) //TX Active -#define UART_CH_STATUS_RACTIVE (1 << 10) //RX Active -#define UART_CH_STATUS_TFUL (1 << 4) //TX FIFO full -#define UART_CH_STATUS_TEMPTY (1 << 3) //TX FIFO empty -#define UART_CH_STATUS_RFUL (1 << 2) //RX FIFO full -#define UART_CH_STATUS_REMPTY (1 << 1) //RX FIFO empty -#define UART_CH_STATUS_RTRIG (1 << 0) //RX FIFO fill over trigger - -/** UART Baud Rate Divider Register */ - -#define UART_BR_DIV_DIS (0) // Baud rate divider value (0-3 ignored) - -/** UART Flow Control Delay Register */ - -#define UART_FLOW_CTRL_DL_DIS (0) // RxFIFO trigger level for Ready To Send (RTS)output signal (EMIOUARTxRTSN) de-assertion (0-3 disable) - -/** UART Transmitter FIFO Trigger Level Register */ - -#define UART_TX_FIFO_TRIG_DIS (0) // TX FIFO trigger level value (Disables TX FIFO trigger level function) - -/** UART Receiver FIFO Byte Status Register */ - -#define UART_RX_BS_BYTE3_BRKE (1 << 11) // Byte3 Break Error -#define UART_RX_BS_BYTE3_FRME (1 << 10) // Byte3 Frame Error -#define UART_RX_BS_BYTE3_PARE (1 << 9) // Byte3 Parity Error -#define UART_RX_BS_BYTE2_BRKE (1 << 8) // Byte2 Break Error -#define UART_RX_BS_BYTE2_FRME (1 << 7) // Byte2 Frame Error -#define UART_RX_BS_BYTE2_PARE (1 << 6) // Byte2 Parity Error -#define UART_RX_BS_BYTE1_BRKE (1 << 5) // Byte1 Break Error -#define UART_RX_BS_BYTE1_FRME (1 << 4) // Byte1 Frame Error -#define UART_RX_BS_BYTE1_PARE (1 << 3) // Byte1 Parity Error -#define UART_RX_BS_BYTE0_BRKE (1 << 2) // Byte0 Break Error -#define UART_RX_BS_BYTE0_FRME (1 << 1) // Byte0 Frame Error -#define UART_RX_BS_BYTE0_PARE (1 << 0) // Byte0 Parity Error - -/** UART Configs (Zynq Ultrascale+ MPSoC) */ - -#define UART_BAUD_RATE 115200 //115.2kbps -#define UART_FREQ_CLK 50000000 //100MHz -#define UART_MAX_ERROR 5 // 0.5% acceptable error (error%/10) -#define UART_RX_TRIGGER_LVL 1 // - -/** UART Configs for 115200 @100MHz */ - -#define UART_BDIV_115200 5 -#define UART_CD_115200 143 - -/** For printk */ - -#define serial_puts(str_buffer) uart_puts(1,(const int8_t *)str_buffer) - -/** Zynq UART register structure */ -struct Uart_Zynq_hw { - /* UART Control register */ - /* 0x0000 */ - volatile uint32_t control; - /* UART Mode Register */ - /* 0x0004 */ - volatile uint32_t mode; - /* UART Interrupt Enable Register */ - /* 0x0008 */ - volatile uint32_t isr_en; - /* UART Interrupt Disable Register */ - /* 0x000C */ - volatile uint32_t isr_dis; - /* UART Interrupt Mask Register */ - /* 0x0010 */ - volatile uint32_t isr_mask; - /* UART Channel Interrupt Status Register */ - /* 0x0014 */ - volatile uint32_t isr_status; - /* UART Baud Rate Generator Register */ - /* 0x0018 */ - volatile uint32_t br_gen; - /* UART Receiver Time out Register */ - /* 0x001C */ - volatile uint32_t rx_timeout; - /* UART Receiver FIFO Trigger Level Register */ - /* 0x0020 */ - volatile uint32_t rx_fifo_trig; - /* UART Modem Control Register */ - /* 0x0024 */ - volatile uint32_t modem_ctrl; - /* UART Modem Status Register */ - /* 0x0028 */ - volatile uint32_t modem_status; - /* UART Channel Status Register */ - /* 0x002C */ - volatile uint32_t ch_status; - /* UART Transmit and Receive FIFO */ - /* 0x0030 */ - volatile uint32_t tx_rx_fifo; - /* UART Baud Rate Divider Register */ - /* 0x0034 */ - volatile uint32_t br_div; - /* UART Flow Control Delay Register */ - /* 0x0038 */ - volatile uint32_t flow_ctrl_dl; - /* Reserved: 2 words (0x8)*/ - /* 0x003C */ - const uint32_t reserved[2]; - /* UART Transmitter FIFO Trigger Level Register */ - /* 0x0044 */ - volatile uint32_t tx_fifo_trig; - /* UART Transmitter FIFO Trigger Level Register */ - /* 0x0048 */ - volatile uint32_t rx_fifo_byte; - -}; - -typedef struct Uart_Zynq_hw bao_uart_t; - -/** Public Zynq UART interfaces */ - -bool uart_init(volatile struct Uart_Zynq_hw* uart); -void uart_enable(volatile struct Uart_Zynq_hw* uart); -void uart_disable(volatile struct Uart_Zynq_hw* uart); -bool uart_set_baud_rate(volatile struct Uart_Zynq_hw* uart, uint32_t baud_rate); -uint32_t uart_getc(volatile struct Uart_Zynq_hw* uart); -void uart_putc(volatile struct Uart_Zynq_hw* uart,int8_t c); - -#endif /* __UART_ZYNQ_H */ +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#ifndef __UART_ZYNQ_H +#define __UART_ZYNQ_H + +#include +#include + +/** UART Interrupts ID*/ + +#define UART_0_INTERRUPT (53) +#define UART_1_INTERRUPT (54) + +/** Number of available UARTs */ + +#define NUM_UART (2) + +/** UART Control register configurations */ + +#define UART_CONTROL_STPBRK (0b1 << 8) // Stop transmitter break +#define UART_CONTROL_STTBRK (0b1 << 7) // Start transmitter break +#define UART_CONTROL_RSTTO (0b1 << 6) // Restart receiver timeout counter +#define UART_CONTROL_TXDIS (0b1 << 5) // Transmit disable +#define UART_CONTROL_TXEN (0b1 << 4) // Transmit enable +#define UART_CONTROL_RXDIS (0b1 << 3) // Receive disable +#define UART_CONTROL_RXEN (0b1 << 2) // Receive enable +#define UART_CONTROL_TXRES (0b1 << 1) // Software reset for Tx data path +#define UART_CONTROL_RXRES (0b1 << 0) // Software reset for Rx data path + +/** UART Mode Register configuration */ + +#define UART_MODE_CHMODE_N (0b00 << 8) // Channel mode (Normal) +#define UART_MODE_CHMODE_AE (0b01 << 8) // Channel mode (Automatic Echo) +#define UART_MODE_CHMODE_LL (0b10 << 8) // Channel mode (Local Loopback) +#define UART_MODE_CHMODE_RL (0b11 << 8) // Channel mode (Remote Loopback) +#define UART_MODE_NBSTOP_1 (0b00 << 6) // Number of stop bits (1) +#define UART_MODE_NBSTOP_1_5 (0b01 << 6) // Number of stop bits (1.5) +#define UART_MODE_NBSTOP_2 (0b10 << 6) // Number of stop bits (2) +#define UART_MODE_PAR_EP (0b000 << 3) // Parity type select (Even Parity) +#define UART_MODE_PAR_OP (0b001 << 3) // Parity type select (Odd Parity) +#define UART_MODE_PAR_F0P (0b010 << 3) // Parity type select (forced to 0 Parity, Space Parity Mode) +#define UART_MODE_PAR_F1P (0b011 << 3) // Parity type select (forced to 1 Parity, Mark Parity Mode) +#define UART_MODE_PAR_NP (0b100 << 3) // Parity type select (No Parity) +#define UART_MODE_CHRL_6 (0b11 << 1) // Character length select (6 bits) +#define UART_MODE_CHRL_7 (0b10 << 1) // Character length select (7 bits) +#define UART_MODE_CHRL_8 (0b00 << 1) // Character length select (8 bits) +#define UART_MODE_CHRL_REF (0b0 << 0) // Clock source select (clock source is uart_ref_clk) +#define UART_MODE_CLKS_REF_8 (0b1 << 0) // Clock source select (clock source is uart_ref_clk/8) + +/** UART Interrupt Enable Register configurations */ + +#define UART_ISR_EN_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (enable, clears mask=0) +#define UART_ISR_EN_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (enable, clears mask=0) +#define UART_ISR_EN_TNFUL \ + (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (enable, clears + // mask=0) +#define UART_ISR_EN_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (enable, clears mask=0) +#define UART_ISR_EN_DMSI \ + (0b1 << 9) // Delta Modem Status Indicator interrupt (enable, clears mask=0) +#define UART_ISR_EN_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (enable, clears mask=0) +#define UART_ISR_EN_PARE (0b1 << 7) // Receiver Parity Error interrupt (enable, clears mask=0) +#define UART_ISR_EN_FRAME (0b1 << 6) // Receiver Framing Error interrupt (enable, clears mask=0) +#define UART_ISR_EN_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (enable, clears mask=0) +#define UART_ISR_EN_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (enable, clears mask=0) +#define UART_ISR_EN_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (enable, clears mask=0) +#define UART_ISR_EN_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (enable, clears mask=0) +#define UART_ISR_EN_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (enable, clears mask=0) +#define UART_ISR_EN_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (enable, clears mask=0) + +/** UART Interrupt Disable Register configurations */ + +#define UART_ISR_DIS_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (disable, sets mask=1) +#define UART_ISR_DIS_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (disable, sets mask=1) +#define UART_ISR_DIS_TNFUL \ + (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (disable, sets mask=1) +#define UART_ISR_DIS_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (disable, sets mask=1) +#define UART_ISR_DIS_DMSI \ + (0b1 << 9) // Delta Modem Status Indicator interrupt (disable, sets mask=1) +#define UART_ISR_DIS_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (disable, sets mask=1) +#define UART_ISR_DIS_PARE (0b1 << 7) // Receiver Parity Error interrupt (disable, sets mask=1) +#define UART_ISR_DIS_FRAME (0b1 << 6) // Receiver Framing Error interrupt (disable, sets mask=1) +#define UART_ISR_DIS_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (disable, sets mask=1) +#define UART_ISR_DIS_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (disable, sets mask=1) +#define UART_ISR_DIS_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (disable, sets mask=1) +#define UART_ISR_DIS_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (disable, sets mask=1) +#define UART_ISR_DIS_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (disable, sets mask=1) +#define UART_ISR_DIS_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (disable, sets mask=1) + +/** UART Interrupt Mask Register configurations */ + +#define UART_ISR_MASK_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (enabled) +#define UART_ISR_MASK_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (enabled) +#define UART_ISR_MASK_TNFUL (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (enabled) +#define UART_ISR_MASK_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (enabled) +#define UART_ISR_MASK_DMSI (0b1 << 9) // Delta Modem Status Indicator interrupt (enabled) +#define UART_ISR_MASK_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (enabled) +#define UART_ISR_MASK_PARE (0b1 << 7) // Receiver Parity Error interrupt (enabled) +#define UART_ISR_MASK_FRAME (0b1 << 6) // Receiver Framing Error interrupt (enabled) +#define UART_ISR_MASK_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (enabled) +#define UART_ISR_MASK_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (enabled) +#define UART_ISR_MASK_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (enabled) +#define UART_ISR_MASK_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (enabled) +#define UART_ISR_MASK_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (enabled) +#define UART_ISR_MASK_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (enabled) + +/** UART Channel Interrupt Status Register configurations */ + +#define UART_ISR_STATUS_RBRK (0b1 << 13) // Receiver FIFO Break Detect interrupt (interrupt occured) +#define UART_ISR_STATUS_TOVR (0b1 << 12) // Transmitter FIFO Overflow interrupt (interrupt occurred) +#define UART_ISR_STATUS_TNFUL \ + (0b1 << 11) // Transmitter FIFO Nearly Full interrupt (interrupt occurred) +#define UART_ISR_STATUS_TTRIG (0b1 << 10) // Transmitter FIFO Trigger interrupt (interrupt occurred) +#define UART_ISR_STATUS_DMSI \ + (0b1 << 9) // Delta Modem Status Indicator interrupt (interrupt occurred) +#define UART_ISR_STATUS_TIMEOUT (0b1 << 8) // Receiver Timeout Error interrupt (interrupt occurred) +#define UART_ISR_STATUS_PARE (0b1 << 7) // Receiver Parity Error interrupt (interrupt occurred) +#define UART_ISR_STATUS_FRAME (0b1 << 6) // Receiver Framing Error interrupt (interrupt occurred) +#define UART_ISR_STATUS_ROVR (0b1 << 5) // Receiver Overflow Error interrupt (interrupt occurred) +#define UART_ISR_STATUS_TFUL (0b1 << 4) // Transmitter FIFO Full interrupt (interrupt occurred) +#define UART_ISR_STATUS_TEMPTY (0b1 << 3) // Transmitter FIFO Empty interrupt (interrupt occurred) +#define UART_ISR_STATUS_RFUL (0b1 << 2) // Receiver FIFO Full interrupt (interrupt occurred) +#define UART_ISR_STATUS_REMPTY (0b1 << 1) // Receiver FIFO Empty interrupt (interrupt occurred) +#define UART_ISR_STATUS_RTRIG (0b1 << 0) // Receiver FIFO Trigger interrupt (interrupt occurred) + +/** UART Baud Rate Generator Register */ + +#define UART_BR_GEN_DIS (0) // Baud Rate Clock Divisor Value (Disables baud_sample) +#define UART_BR_GEN_BYPASS (1) // Baud Rate Clock Divisor Value (Clock divisor bypass) + +/** UART Receiver Timeout Register */ + +#define UART_RX_TIMEOUT_DIS (0) // Receiver timeout value (Disables receiver timeout counter) + +/** UART Receiver FIFO Trigger Level Register */ + +#define UART_RX_FIFO_TRIG_DIS \ + (0) // RX FIFO trigger level value (Disables RX FIFO trigger level function) + +/** UART Modem Control Register */ + +#define UART_MODEM_CTRL_FCM (0b1 << 5) // Automatic flow control mode (enable) +#define UART_MODEM_CTRL_RTS_FL1 \ + (0b0 << 1) // Request to send output control (EMIOUARTxRTSN output forced to + // logic 1) +#define UART_MODEM_CTRL_RTS_FL0 \ + (0b1 << 1) // Request to send output control (EMIOUARTxRTSN output forced to + // logic 0) +#define UART_MODEM_CTRL_DTR_FL1 \ + (0b0 << 0) // Data Terminal Ready (EMIOUARTxDTRN output forced to logic 1) +#define UART_MODEM_CTRL_DTR_FL0 \ + (0b1 << 0) // Data Terminal Ready (EMIOUARTxDTRN output forced to logic 0) + +/** UART Modem Status Register */ + +#define UART_MODEM_STATUS_FCMS (0b1 << 8) // Flow control mode (enabled) +#define UART_MODEM_STATUS_DCD_H \ + (0b0 << 7) // Data Carrier Detect (DCD) input signal from PL(EMIOUARTxDCDN) + // status (input is high) +#define UART_MODEM_STATUS_DCD_L \ + (0b1 << 7) // Data Carrier Detect (DCD) input signal from PL(EMIOUARTxDCDN) + // status (input is low) +#define UART_MODEM_STATUS_RI_H \ + (0b0 << 6) // Ring Indicator (RI) input signal from PL(EMIOUARTxRIN) status + // (input is high) +#define UART_MODEM_STATUS_RI_L \ + (0b1 << 6) // Ring Indicator (RI) input signal from PL(EMIOUARTxRIN) status + // (input is low) +#define UART_MODEM_STATUS_DSR_H \ + (0b0 << 5) // Data Set Ready (DSR) input signal from PL(EMIOUARTxDSRN) + // status (input is high) +#define UART_MODEM_STATUS_DSR_L \ + (0b1 << 5) // Data Set Ready (DSR) input signal from PL(EMIOUARTxDSRN) + // status (input is low) +#define UART_MODEM_STATUS_CTS_H \ + (0b0 << 4) // Clear to Send (CTS) input signal from PL(EMIOUARTxCTSN) status + // (input is high) +#define UART_MODEM_STATUS_CTS_L \ + (0b1 << 4) // Clear to Send (CTS) input signal from PL(EMIOUARTxCTSN) status + // (input is low) +#define UART_MODEM_STATUS_DDCD (0b1 << 3) // Delta Data Carrier Detect status (change has occurred) +#define UART_MODEM_STATUS_TERI \ + (0b1 << 2) // Trailing Edge Ring Indicator status (Trailing edge has + // occurred) +#define UART_MODEM_STATUS_DDSR (0b1 << 1) // Delta Data Set Ready status (change has occurred) +#define UART_MODEM_STATUS_DCTS (0b1 << 0) // Delta Clear To Send status (change has occurred) + +/** UART Channel Status Register */ + +#define UART_CH_STATUS_TNFUL (1 << 14) // TX FIFO Nearly Full Status +#define UART_CH_STATUS_TTRIG (1 << 13) // TX FIFO Trigger Status +#define UART_CH_STATUS_FDELT (1 << 12) // RX FIFO fill over flow delay +#define UART_CH_STATUS_TACTIVE (1 << 11) // TX Active +#define UART_CH_STATUS_RACTIVE (1 << 10) // RX Active +#define UART_CH_STATUS_TFUL (1 << 4) // TX FIFO full +#define UART_CH_STATUS_TEMPTY (1 << 3) // TX FIFO empty +#define UART_CH_STATUS_RFUL (1 << 2) // RX FIFO full +#define UART_CH_STATUS_REMPTY (1 << 1) // RX FIFO empty +#define UART_CH_STATUS_RTRIG (1 << 0) // RX FIFO fill over trigger + +/** UART Baud Rate Divider Register */ + +#define UART_BR_DIV_DIS (0) // Baud rate divider value (0-3 ignored) + +/** UART Flow Control Delay Register */ + +#define UART_FLOW_CTRL_DL_DIS \ + (0) // RxFIFO trigger level for Ready To Send (RTS)output signal + // (EMIOUARTxRTSN) de-assertion (0-3 disable) + +/** UART Transmitter FIFO Trigger Level Register */ + +#define UART_TX_FIFO_TRIG_DIS \ + (0) // TX FIFO trigger level value (Disables TX FIFO trigger level function) + +/** UART Receiver FIFO Byte Status Register */ + +#define UART_RX_BS_BYTE3_BRKE (1 << 11) // Byte3 Break Error +#define UART_RX_BS_BYTE3_FRME (1 << 10) // Byte3 Frame Error +#define UART_RX_BS_BYTE3_PARE (1 << 9) // Byte3 Parity Error +#define UART_RX_BS_BYTE2_BRKE (1 << 8) // Byte2 Break Error +#define UART_RX_BS_BYTE2_FRME (1 << 7) // Byte2 Frame Error +#define UART_RX_BS_BYTE2_PARE (1 << 6) // Byte2 Parity Error +#define UART_RX_BS_BYTE1_BRKE (1 << 5) // Byte1 Break Error +#define UART_RX_BS_BYTE1_FRME (1 << 4) // Byte1 Frame Error +#define UART_RX_BS_BYTE1_PARE (1 << 3) // Byte1 Parity Error +#define UART_RX_BS_BYTE0_BRKE (1 << 2) // Byte0 Break Error +#define UART_RX_BS_BYTE0_FRME (1 << 1) // Byte0 Frame Error +#define UART_RX_BS_BYTE0_PARE (1 << 0) // Byte0 Parity Error + +/** UART Configs (Zynq Ultrascale+ MPSoC) */ + +#define UART_BAUD_RATE 115200 // 115.2kbps +#define UART_FREQ_CLK 50000000 // 100MHz +#define UART_MAX_ERROR 5 // 0.5% acceptable error (error%/10) +#define UART_RX_TRIGGER_LVL 1 // + +/** UART Configs for 115200 @100MHz */ + +#define UART_BDIV_115200 5 +#define UART_CD_115200 143 + +/** For printk */ + +#define serial_puts(str_buffer) uart_puts(1, (const int8_t*)str_buffer) + +/** Zynq UART register structure */ +struct Uart_Zynq_hw { + /* UART Control register */ + /* 0x0000 */ + volatile uint32_t control; + /* UART Mode Register */ + /* 0x0004 */ + volatile uint32_t mode; + /* UART Interrupt Enable Register */ + /* 0x0008 */ + volatile uint32_t isr_en; + /* UART Interrupt Disable Register */ + /* 0x000C */ + volatile uint32_t isr_dis; + /* UART Interrupt Mask Register */ + /* 0x0010 */ + volatile uint32_t isr_mask; + /* UART Channel Interrupt Status Register */ + /* 0x0014 */ + volatile uint32_t isr_status; + /* UART Baud Rate Generator Register */ + /* 0x0018 */ + volatile uint32_t br_gen; + /* UART Receiver Time out Register */ + /* 0x001C */ + volatile uint32_t rx_timeout; + /* UART Receiver FIFO Trigger Level Register */ + /* 0x0020 */ + volatile uint32_t rx_fifo_trig; + /* UART Modem Control Register */ + /* 0x0024 */ + volatile uint32_t modem_ctrl; + /* UART Modem Status Register */ + /* 0x0028 */ + volatile uint32_t modem_status; + /* UART Channel Status Register */ + /* 0x002C */ + volatile uint32_t ch_status; + /* UART Transmit and Receive FIFO */ + /* 0x0030 */ + volatile uint32_t tx_rx_fifo; + /* UART Baud Rate Divider Register */ + /* 0x0034 */ + volatile uint32_t br_div; + /* UART Flow Control Delay Register */ + /* 0x0038 */ + volatile uint32_t flow_ctrl_dl; + /* Reserved: 2 words (0x8)*/ + /* 0x003C */ + const uint32_t reserved[2]; + /* UART Transmitter FIFO Trigger Level Register */ + /* 0x0044 */ + volatile uint32_t tx_fifo_trig; + /* UART Transmitter FIFO Trigger Level Register */ + /* 0x0048 */ + volatile uint32_t rx_fifo_byte; +}; + +typedef struct Uart_Zynq_hw bao_uart_t; + +/** Public Zynq UART interfaces */ + +bool uart_init(volatile struct Uart_Zynq_hw* uart); +void uart_enable(volatile struct Uart_Zynq_hw* uart); +void uart_disable(volatile struct Uart_Zynq_hw* uart); +bool uart_set_baud_rate(volatile struct Uart_Zynq_hw* uart, uint32_t baud_rate); +uint32_t uart_getc(volatile struct Uart_Zynq_hw* uart); +void uart_putc(volatile struct Uart_Zynq_hw* uart, int8_t c); + +#endif /* __UART_ZYNQ_H */ diff --git a/src/platform/drivers/zynq_uart/zynq_uart.c b/src/platform/drivers/zynq_uart/zynq_uart.c index 1ae5dd96d..3df565360 100644 --- a/src/platform/drivers/zynq_uart/zynq_uart.c +++ b/src/platform/drivers/zynq_uart/zynq_uart.c @@ -1,110 +1,109 @@ -/** - * SPDX-License-Identifier: Apache-2.0 - * Copyright (c) Bao Project and Contributors. All rights reserved. - */ - -#include - -bool uart_init(volatile struct Uart_Zynq_hw* uart) -{ - uint32_t ret; - - ret = uart_set_baud_rate(uart, UART_BAUD_RATE); - if (ret == false) { - return false; - } - - /* Set the level of the RxFIFO trigger level */ - uart->rx_fifo_trig = UART_RX_TRIGGER_LVL; - /* Program the Receiver Timeout Mechanism (Disabled) */ - uart->rx_timeout = UART_RX_TIMEOUT_DIS; - - /* Clear all the interrupts in Interrupt Status Register */ - uart->isr_status = 0xFFFFFFFF; - /* Enable RxFIFO Trigger Interrupt */ - uart->isr_en = UART_ISR_EN_RTRIG; - - /** Enable (closer to Reset) the Controller */ - uart->control |= - (UART_CONTROL_STPBRK | UART_CONTROL_RXRES | UART_CONTROL_TXRES); - - return true; -} - -void uart_enable(volatile struct Uart_Zynq_hw* uart) -{ - uint32_t ctrl_reg = uart->control; - - ctrl_reg = (UART_CONTROL_STPBRK | UART_CONTROL_TXEN | UART_CONTROL_RXEN | - UART_CONTROL_RXRES | UART_CONTROL_TXRES); - - uart->control = ctrl_reg; -} - -void uart_disable(volatile struct Uart_Zynq_hw* uart) -{ - uint32_t ctrl_reg = uart->control; - - ctrl_reg = (UART_CONTROL_STPBRK | UART_CONTROL_TXDIS | UART_CONTROL_RXDIS); - - uart->control = ctrl_reg; -} - -bool uart_set_baud_rate(volatile struct Uart_Zynq_hw* uart, uint32_t baud_rate) -{ - // uint32_t sel_clk = UART_FREQ_CLK; - uint8_t bdiv = 0; - uint16_t cd_calc = 0; - - /** Handling corner case */ - if (baud_rate == 0) { - baud_rate = UART_BAUD_RATE; - } - - /* baud_rate = sel_clk / (CD * (BDIV+1)) - * baud_rate -> Baud Rate - * sel_clk -> Selected Clock - * CD -> Baud Rate Generator - * BDIV -> Baud Rate Divider - */ - // TODO - Add support for auto Baud Rate generation */ - bdiv = UART_BDIV_115200; - cd_calc = UART_CD_115200; - - /** Configure the Baud Rate */ - /* Disable the Rx and Tx path */ - uart->control = (UART_CONTROL_RXDIS | UART_CONTROL_TXDIS); - /* Write the calculated CD value */ - uart->br_gen = cd_calc; - /* Write the calculated BDIV value */ - uart->br_div = bdiv; - /* Reset Tx and Rx paths */ - uart->control = (UART_CONTROL_TXRES | UART_CONTROL_RXRES); - /* Enable the Rx and Tx path */ - uart->control = (UART_CONTROL_TXEN | UART_CONTROL_RXEN); - - return true; -} - -uint32_t uart_getc(volatile struct Uart_Zynq_hw* uart) -{ - uint32_t data = 0; - - // Chose one of the following: (Trigger Level or Not Empty) - /* Wait until RxFIFO is filled up to the trigger level */ - while (!uart->ch_status & UART_CH_STATUS_RTRIG); - /* Wait until RxFIFO is not empty */ - // while(!uart->ch_status & UART_CH_STATUS_REMPTY); - - data = uart->tx_rx_fifo; - - return data; -} - -void uart_putc(volatile struct Uart_Zynq_hw* uart, int8_t c) -{ - /* Wait until txFIFO is not full */ - while (uart->ch_status & UART_CH_STATUS_TFUL); - - uart->tx_rx_fifo = c; -} +/** + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Bao Project and Contributors. All rights reserved. + */ + +#include + +bool uart_init(volatile struct Uart_Zynq_hw* uart) +{ + uint32_t ret; + + ret = uart_set_baud_rate(uart, UART_BAUD_RATE); + if (ret == false) { + return false; + } + + /* Set the level of the RxFIFO trigger level */ + uart->rx_fifo_trig = UART_RX_TRIGGER_LVL; + /* Program the Receiver Timeout Mechanism (Disabled) */ + uart->rx_timeout = UART_RX_TIMEOUT_DIS; + + /* Clear all the interrupts in Interrupt Status Register */ + uart->isr_status = 0xFFFFFFFF; + /* Enable RxFIFO Trigger Interrupt */ + uart->isr_en = UART_ISR_EN_RTRIG; + + /** Enable (closer to Reset) the Controller */ + uart->control |= (UART_CONTROL_STPBRK | UART_CONTROL_RXRES | UART_CONTROL_TXRES); + + return true; +} + +void uart_enable(volatile struct Uart_Zynq_hw* uart) +{ + uint32_t ctrl_reg = uart->control; + + ctrl_reg = (UART_CONTROL_STPBRK | UART_CONTROL_TXEN | UART_CONTROL_RXEN | UART_CONTROL_RXRES | + UART_CONTROL_TXRES); + + uart->control = ctrl_reg; +} + +void uart_disable(volatile struct Uart_Zynq_hw* uart) +{ + uint32_t ctrl_reg = uart->control; + + ctrl_reg = (UART_CONTROL_STPBRK | UART_CONTROL_TXDIS | UART_CONTROL_RXDIS); + + uart->control = ctrl_reg; +} + +bool uart_set_baud_rate(volatile struct Uart_Zynq_hw* uart, uint32_t baud_rate) +{ + // uint32_t sel_clk = UART_FREQ_CLK; + uint8_t bdiv = 0; + uint16_t cd_calc = 0; + + /** Handling corner case */ + if (baud_rate == 0) { + baud_rate = UART_BAUD_RATE; + } + + /* baud_rate = sel_clk / (CD * (BDIV+1)) + * baud_rate -> Baud Rate + * sel_clk -> Selected Clock + * CD -> Baud Rate Generator + * BDIV -> Baud Rate Divider + */ + // TODO - Add support for auto Baud Rate generation */ + bdiv = UART_BDIV_115200; + cd_calc = UART_CD_115200; + + /** Configure the Baud Rate */ + /* Disable the Rx and Tx path */ + uart->control = (UART_CONTROL_RXDIS | UART_CONTROL_TXDIS); + /* Write the calculated CD value */ + uart->br_gen = cd_calc; + /* Write the calculated BDIV value */ + uart->br_div = bdiv; + /* Reset Tx and Rx paths */ + uart->control = (UART_CONTROL_TXRES | UART_CONTROL_RXRES); + /* Enable the Rx and Tx path */ + uart->control = (UART_CONTROL_TXEN | UART_CONTROL_RXEN); + + return true; +} + +uint32_t uart_getc(volatile struct Uart_Zynq_hw* uart) +{ + uint32_t data = 0; + + // Chose one of the following: (Trigger Level or Not Empty) + /* Wait until RxFIFO is filled up to the trigger level */ + while (!uart->ch_status & UART_CH_STATUS_RTRIG) { } + /* Wait until RxFIFO is not empty */ + // while(!uart->ch_status & UART_CH_STATUS_REMPTY) { } + + data = uart->tx_rx_fifo; + + return data; +} + +void uart_putc(volatile struct Uart_Zynq_hw* uart, int8_t c) +{ + /* Wait until txFIFO is not full */ + while (uart->ch_status & UART_CH_STATUS_TFUL) { } + + uart->tx_rx_fifo = c; +} diff --git a/src/platform/fvp-a/fvpa_desc.c b/src/platform/fvp-a/fvpa_desc.c index 7cad46adc..c0d62f435 100644 --- a/src/platform/fvp-a/fvpa_desc.c +++ b/src/platform/fvp-a/fvpa_desc.c @@ -13,12 +13,12 @@ struct platform platform = { { // DRAM, 0GB-2GB .base = 0x80000000, - .size = 0x80000000 - } + .size = 0x80000000, + }, }, .console = { - .base = 0x1C090000 // UART0 (PL011) + .base = 0x1C090000, // UART0 (PL011) }, .arch = { @@ -28,8 +28,8 @@ struct platform platform = { .gich_addr = 0x2C010000, .gicv_addr = 0x2C02F000, .gicr_addr = 0x2F100000, - .maintenance_id = 25 + .maintenance_id = 25, }, - } + }, }; diff --git a/src/platform/fvp-a/inc/plat/psci.h b/src/platform/fvp-a/inc/plat/psci.h index d9a46fe33..f51686c99 100644 --- a/src/platform/fvp-a/inc/plat/psci.h +++ b/src/platform/fvp-a/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_POWERDOWN (0UL << 30) // TBD -#define PSCI_STATE_TYPE_BIT (0UL << 30) // TBD +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_POWERDOWN (0UL << 30) // TBD +#define PSCI_STATE_TYPE_BIT (0UL << 30) // TBD -#endif // __PLAT_PSCI_H__ +#endif // __PLAT_PSCI_H__ diff --git a/src/platform/fvp-r/fvpr_desc.c b/src/platform/fvp-r/fvpr_desc.c index 5aae595e1..dde9951f2 100644 --- a/src/platform/fvp-r/fvpr_desc.c +++ b/src/platform/fvp-r/fvpr_desc.c @@ -16,12 +16,12 @@ struct platform platform = { { // DRAM, 0GB-2GB .base = 0x00000000, - .size = 0x80000000 + .size = 0x80000000, } }, .console = { - .base = 0x9C090000 // UART0 (PL011) + .base = 0x9C090000, // UART0 (PL011) }, .arch = { @@ -31,12 +31,12 @@ struct platform platform = { .gich_addr = 0xAC010000, .gicv_addr = 0xAC02F000, .gicr_addr = 0xAF100000, - .maintenance_id = 25 + .maintenance_id = 25, }, .generic_timer = { - .base_addr = 0xAA430000 - } - } + .base_addr = 0xAA430000, + }, + }, }; diff --git a/src/platform/fvp-r/inc/plat/psci.h b/src/platform/fvp-r/inc/plat/psci.h index ca8a64df5..fc196ef54 100644 --- a/src/platform/fvp-r/inc/plat/psci.h +++ b/src/platform/fvp-r/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 // TBD -#define PSCI_POWER_STATE_LVL_1 0x1000000 // TBD -#define PSCI_POWER_STATE_LVL_2 0x2000000 // TBD -#define PSCI_STATE_TYPE_STANDBY 0x00000 // TBD -#define PSCI_STATE_TYPE_POWERDOWN (0UL << 30) // TBD -#define PSCI_STATE_TYPE_BIT (0UL << 30) // TBD +#define PSCI_POWER_STATE_LVL_0 0x0000000 // TBD +#define PSCI_POWER_STATE_LVL_1 0x1000000 // TBD +#define PSCI_POWER_STATE_LVL_2 0x2000000 // TBD +#define PSCI_STATE_TYPE_STANDBY 0x00000 // TBD +#define PSCI_STATE_TYPE_POWERDOWN (0UL << 30) // TBD +#define PSCI_STATE_TYPE_BIT (0UL << 30) // TBD -#endif // __PLAT_PSCI_H__ +#endif // __PLAT_PSCI_H__ diff --git a/src/platform/hikey960/hikey960_desc.c b/src/platform/hikey960/hikey960_desc.c index 287b76fe0..c0f30d406 100644 --- a/src/platform/hikey960/hikey960_desc.c +++ b/src/platform/hikey960/hikey960_desc.c @@ -30,19 +30,19 @@ struct platform platform = { .gicc_addr = 0xE82B2000, .gich_addr = 0xE82B4000, .gicv_addr = 0xE82B6000, - .maintenance_id = 25 + .maintenance_id = 25, }, .generic_timer = { - .base_addr = 0xFFF08000 /* SYS_CNT */ + .base_addr = 0xFFF08000, /* SYS_CNT */ }, .smmu = { - .base = 0xE8DC0000 - } + .base = 0xE8DC0000, + }, }, .console = { - .base = 0xFFF32000 /* UART 6 */ + .base = 0xFFF32000, /* UART 6 */ }, }; diff --git a/src/platform/hikey960/inc/plat/platform.h b/src/platform/hikey960/inc/plat/platform.h index 7f31217dd..d6ed66099 100644 --- a/src/platform/hikey960/inc/plat/platform.h +++ b/src/platform/hikey960/inc/plat/platform.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ diff --git a/src/platform/hikey960/inc/plat/psci.h b/src/platform/hikey960/inc/plat/psci.h index 6fec7c190..26a6207ea 100644 --- a/src/platform/hikey960/inc/plat/psci.h +++ b/src/platform/hikey960/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_POWERDOWN (1UL << 16) -#define PSCI_STATE_TYPE_BIT (1UL << 16) +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_POWERDOWN (1UL << 16) +#define PSCI_STATE_TYPE_BIT (1UL << 16) #endif // __PLAT_PSCI_H__ diff --git a/src/platform/imx8qm/imx8qm_desc.c b/src/platform/imx8qm/imx8qm_desc.c index ce8f2e8cf..cf5e7e434 100644 --- a/src/platform/imx8qm/imx8qm_desc.c +++ b/src/platform/imx8qm/imx8qm_desc.c @@ -11,23 +11,23 @@ struct platform platform = { .regions = (struct mem_region[]) { { .base = 0x80020000, - .size = 0x80000000 - 0x20000 // 2 GiB - 128 KiB (reserved for ATF) + .size = 0x80000000 - 0x20000, // 2 GiB - 128 KiB (reserved for ATF) }, { .base = 0x880000000, - .size = 0x100000000 // 4 GiB - } + .size = 0x100000000, // 4 GiB + }, }, .console = { - .base = 0x5a060000 + .base = 0x5a060000, }, .arch = { .clusters = { .num = 2, - .core_num = (size_t[]) {4, 2} + .core_num = (size_t[]) {4, 2}, }, .gic = { @@ -36,7 +36,7 @@ struct platform platform = { .gicc_addr = 0x52000000, .gich_addr = 0x52010000, .gicv_addr = 0x52020000, - .maintenance_id = 25 + .maintenance_id = 25, }, .smmu = { @@ -45,5 +45,5 @@ struct platform platform = { .global_mask = 0x7fc0, }, - } + }, }; diff --git a/src/platform/imx8qm/inc/plat/psci.h b/src/platform/imx8qm/inc/plat/psci.h index 71a8115c7..5b1a01691 100644 --- a/src/platform/imx8qm/inc/plat/psci.h +++ b/src/platform/imx8qm/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_BIT (1UL << 16) -#define PSCI_STATE_TYPE_POWERDOWN PSCI_STATE_TYPE_BIT +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_BIT (1UL << 16) +#define PSCI_STATE_TYPE_POWERDOWN PSCI_STATE_TYPE_BIT #endif // __PLAT_PSCI_H__ diff --git a/src/platform/qemu-aarch64-virt/inc/plat/platform.h b/src/platform/qemu-aarch64-virt/inc/plat/platform.h index 7f31217dd..d6ed66099 100644 --- a/src/platform/qemu-aarch64-virt/inc/plat/platform.h +++ b/src/platform/qemu-aarch64-virt/inc/plat/platform.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ diff --git a/src/platform/qemu-aarch64-virt/inc/plat/psci.h b/src/platform/qemu-aarch64-virt/inc/plat/psci.h index 5abb22c66..9f777b4fb 100644 --- a/src/platform/qemu-aarch64-virt/inc/plat/psci.h +++ b/src/platform/qemu-aarch64-virt/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_BIT (1UL << 16) -#define PSCI_STATE_TYPE_POWERDOWN (0x1) +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_BIT (1UL << 16) +#define PSCI_STATE_TYPE_POWERDOWN (0x1) #endif // __PLAT_PSCI_H__ diff --git a/src/platform/qemu-aarch64-virt/virt_desc.c b/src/platform/qemu-aarch64-virt/virt_desc.c index 55d877a3a..4a31166d5 100644 --- a/src/platform/qemu-aarch64-virt/virt_desc.c +++ b/src/platform/qemu-aarch64-virt/virt_desc.c @@ -12,8 +12,8 @@ struct platform platform = { .regions = (struct mem_region[]) { { .base = 0x40000000, - .size = 0x100000000 - } + .size = 0x100000000, + }, }, .console = { @@ -27,8 +27,8 @@ struct platform platform = { .gich_addr = 0x08030000, .gicv_addr = 0x08040000, .gicr_addr = 0x080A0000, - .maintenance_id = 25 + .maintenance_id = 25, }, - } + }, }; diff --git a/src/platform/qemu-riscv64-virt/inc/plat/platform.h b/src/platform/qemu-riscv64-virt/inc/plat/platform.h index eb312b356..e62fe25b3 100644 --- a/src/platform/qemu-riscv64-virt/inc/plat/platform.h +++ b/src/platform/qemu-riscv64-virt/inc/plat/platform.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ @@ -8,6 +8,6 @@ #include -#define CPU_EXT_SSTC 1 +#define CPU_EXT_SSTC 1 #endif diff --git a/src/platform/qemu-riscv64-virt/virt_desc.c b/src/platform/qemu-riscv64-virt/virt_desc.c index abd03723a..ddc089d91 100644 --- a/src/platform/qemu-riscv64-virt/virt_desc.c +++ b/src/platform/qemu-riscv64-virt/virt_desc.c @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved. */ @@ -14,18 +14,18 @@ struct platform platform = { .regions = (struct mem_region[]) { { .base = 0x80200000, - .size = 0x100000000 - 0x200000 - } + .size = 0x100000000 - 0x200000, + }, }, .arch = { - #if (IRQC == PLIC) - .irqc.plic.base = 0xc000000, - #elif (IRQC == APLIC) +#if (IRQC == PLIC) + .irqc.plic.base = 0xc000000, +#elif (IRQC == APLIC) .irqc.aia.aplic.base = 0xd000000, - #else - #error "unknown IRQC type " IRQC - #endif - } +#else +#error "unknown IRQC type " IRQC +#endif + }, }; diff --git a/src/platform/rpi4/inc/plat/platform.h b/src/platform/rpi4/inc/plat/platform.h index 4d557d970..508309901 100644 --- a/src/platform/rpi4/inc/plat/platform.h +++ b/src/platform/rpi4/inc/plat/platform.h @@ -1,12 +1,12 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ #ifndef __PLAT_PLATFORM_H__ #define __PLAT_PLATFORM_H__ -#define UART8250_REG_WIDTH (4) +#define UART8250_REG_WIDTH (4) #define UART8250_PAGE_OFFSET (0x40) #include diff --git a/src/platform/rpi4/inc/plat/psci.h b/src/platform/rpi4/inc/plat/psci.h index e793fa32f..67241c529 100644 --- a/src/platform/rpi4/inc/plat/psci.h +++ b/src/platform/rpi4/inc/plat/psci.h @@ -6,9 +6,9 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000002 -#define PSCI_STATE_TYPE_STANDBY 0x0000000 -#define PSCI_STATE_TYPE_BIT (1UL << 16) -#define PSCI_STATE_TYPE_POWERDOWN PSCI_STATE_TYPE_BIT +#define PSCI_POWER_STATE_LVL_0 0x0000002 +#define PSCI_STATE_TYPE_STANDBY 0x0000000 +#define PSCI_STATE_TYPE_BIT (1UL << 16) +#define PSCI_STATE_TYPE_POWERDOWN PSCI_STATE_TYPE_BIT #endif // __PLAT_PSCI_H__ diff --git a/src/platform/rpi4/rpi4_desc.c b/src/platform/rpi4/rpi4_desc.c index a06b0be62..f6d0bceb3 100644 --- a/src/platform/rpi4/rpi4_desc.c +++ b/src/platform/rpi4/rpi4_desc.c @@ -12,20 +12,20 @@ struct platform platform = { { /* * - 0x8000 at the bottom reserved for atf - * - 0x4c00000 (76 MiB) at the top reserved for gpu (depends on - * gpu_mem in config.txt. this is the default) + * - 0x4c00000 (76 MiB) at the top reserved for gpu (depends on gpu_mem in config.txt. + * this is the default) */ .base = 0x80000, - .size = 0x40000000 - 0x80000 - 0x4c00000 + .size = 0x40000000 - 0x80000 - 0x4c00000, }, { .base = 0x40000000, - .size = ((RPI4_MEM_GB-1) * 0x40000000ULL) - 0x4000000 - } + .size = ((RPI4_MEM_GB-1) * 0x40000000ULL) - 0x4000000, + }, }, .console = { - .base = 0xfe215000 + .base = 0xfe215000, }, .arch = { @@ -34,7 +34,7 @@ struct platform platform = { .gicc_addr = 0xff842000, .gich_addr = 0xff844000, .gicv_addr = 0xff846000, - .maintenance_id = 25 + .maintenance_id = 25, }, - } + }, }; diff --git a/src/platform/tx2/inc/plat/platform.h b/src/platform/tx2/inc/plat/platform.h index 0f98ba051..051f95a98 100644 --- a/src/platform/tx2/inc/plat/platform.h +++ b/src/platform/tx2/inc/plat/platform.h @@ -6,7 +6,7 @@ #ifndef __PLAT_PLATFORM_H__ #define __PLAT_PLATFORM_H__ -#define UART8250_REG_WIDTH 4 +#define UART8250_REG_WIDTH 4 #include diff --git a/src/platform/tx2/inc/plat/psci.h b/src/platform/tx2/inc/plat/psci.h index b271f9abf..4e2ad4755 100644 --- a/src/platform/tx2/inc/plat/psci.h +++ b/src/platform/tx2/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00006 -#define PSCI_STATE_TYPE_BIT (1UL << 30) -#define PSCI_STATE_TYPE_POWERDOWN (0x7 | PSCI_STATE_TYPE_BIT) +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00006 +#define PSCI_STATE_TYPE_BIT (1UL << 30) +#define PSCI_STATE_TYPE_POWERDOWN (0x7 | PSCI_STATE_TYPE_BIT) #endif // __PLAT_PSCI_H__ diff --git a/src/platform/tx2/tx2_desc.c b/src/platform/tx2/tx2_desc.c index 4f51bdbc5..a9cb4abc3 100644 --- a/src/platform/tx2/tx2_desc.c +++ b/src/platform/tx2/tx2_desc.c @@ -11,12 +11,12 @@ struct platform platform = { .regions = (struct mem_region[]) { { .base = 0x80000000, - .size = 0x200000000 + .size = 0x200000000, }, }, .console = { - .base = 0x03100000 + .base = 0x03100000, }, .arch = { @@ -25,7 +25,7 @@ struct platform platform = { .gicc_addr = 0x03882000, .gich_addr = 0x03884000, .gicv_addr = 0x03886000, - .maintenance_id = 25 + .maintenance_id = 25, }, .clusters = { @@ -37,7 +37,7 @@ struct platform platform = { .base = 0x12000000, .interrupt_id = 187, .global_mask = 0x7f80, - } - } + }, + }, }; diff --git a/src/platform/ultra96/inc/plat/psci.h b/src/platform/ultra96/inc/plat/psci.h index 4855665b7..372a7390e 100644 --- a/src/platform/ultra96/inc/plat/psci.h +++ b/src/platform/ultra96/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_POWERDOWN (1UL << 30) -#define PSCI_STATE_TYPE_BIT (1UL << 30) +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_POWERDOWN (1UL << 30) +#define PSCI_STATE_TYPE_BIT (1UL << 30) #endif // __PLAT_PSCI_H__ diff --git a/src/platform/ultra96/ultra96_desc.c b/src/platform/ultra96/ultra96_desc.c index 0771fb0a9..1227ec505 100644 --- a/src/platform/ultra96/ultra96_desc.c +++ b/src/platform/ultra96/ultra96_desc.c @@ -11,16 +11,16 @@ struct platform platform = { .regions = (struct mem_region[]) { { .base = 0x00000000, - .size = 0x100000 + .size = 0x100000, }, { .base = 0x00100000, - .size = 0x7FE00000 + .size = 0x7FE00000, }, }, .console = { - .base = 0xFF010000 + .base = 0xFF010000, }, .arch = { @@ -29,12 +29,12 @@ struct platform platform = { .gicc_addr = 0xF902f000, .gich_addr = 0xF9040000, .gicv_addr = 0xF906f000, - .maintenance_id = 25 + .maintenance_id = 25, }, .generic_timer = { - .base_addr = 0xFF260000 - } - } + .base_addr = 0xFF260000, + }, + }, }; diff --git a/src/platform/zcu102/inc/plat/platform.h b/src/platform/zcu102/inc/plat/platform.h index e4af8fcf8..d0f77cbd3 100644 --- a/src/platform/zcu102/inc/plat/platform.h +++ b/src/platform/zcu102/inc/plat/platform.h @@ -1,5 +1,5 @@ /** - * SPDX-License-Identifier: Apache-2.0 + * SPDX-License-Identifier: Apache-2.0 * Copyright (c) Bao Project and Contributors. All rights reserved */ diff --git a/src/platform/zcu102/inc/plat/psci.h b/src/platform/zcu102/inc/plat/psci.h index 4855665b7..372a7390e 100644 --- a/src/platform/zcu102/inc/plat/psci.h +++ b/src/platform/zcu102/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_POWERDOWN (1UL << 30) -#define PSCI_STATE_TYPE_BIT (1UL << 30) +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_POWERDOWN (1UL << 30) +#define PSCI_STATE_TYPE_BIT (1UL << 30) #endif // __PLAT_PSCI_H__ diff --git a/src/platform/zcu102/zcu102_desc.c b/src/platform/zcu102/zcu102_desc.c index 69116f4d6..cf599a6fe 100644 --- a/src/platform/zcu102/zcu102_desc.c +++ b/src/platform/zcu102/zcu102_desc.c @@ -11,30 +11,28 @@ struct platform platform = { .regions = (struct mem_region[]) { { /** - * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN - * is loaded in a non secure zone, more specifically at the end of - * the first memory bank. Being in a non-secure zone means that can - * be easily overwritten. + * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN is loaded in a non + * secure zone, more specifically at the end of the first memory bank. Being in a + * non-secure zone means that can be easily overwritten. * - * The memory size is therefore shrunk to take this into account and - * avoid memory corruption. + * The memory size is therefore shrunk to take this into account and avoid memory + * corruption. * - * Note that if the ATF is compiled with debug symbols or with a - * custom SPD service, then it gets loaded at the *beginning* of the - * first memory bank, in that case the base address should be - * changed to 0x80000, and the size shrunk accorindgly. + * Note that if the ATF is compiled with debug symbols or with a custom SPD service, + * then it gets loaded at the *beginning* of the first memory bank, in that case the + * base address should be changed to 0x80000, and the size shrunk accorindgly. */ .base = 0x00000000, - .size = 0x80000000 - 0x16000 + .size = 0x80000000 - 0x16000, }, { .base = 0x800000000, - .size = 0x80000000 + .size = 0x80000000, } }, .console = { - .base = 0xFF000000 + .base = 0xFF000000, }, .arch = { @@ -43,17 +41,17 @@ struct platform platform = { .gicc_addr = 0xF902f000, .gich_addr = 0xF9040000, .gicv_addr = 0xF906f000, - .maintenance_id = 25 + .maintenance_id = 25, }, .smmu = { .base = 0xFD800000, - .interrupt_id = 187 + .interrupt_id = 187, }, .generic_timer = { - .base_addr = 0xFF260000 - } - } + .base_addr = 0xFF260000, + }, + }, }; diff --git a/src/platform/zcu104/inc/plat/psci.h b/src/platform/zcu104/inc/plat/psci.h index 4855665b7..372a7390e 100644 --- a/src/platform/zcu104/inc/plat/psci.h +++ b/src/platform/zcu104/inc/plat/psci.h @@ -6,11 +6,11 @@ #ifndef __PLAT_PSCI_H__ #define __PLAT_PSCI_H__ -#define PSCI_POWER_STATE_LVL_0 0x0000000 -#define PSCI_POWER_STATE_LVL_1 0x1000000 -#define PSCI_POWER_STATE_LVL_2 0x2000000 -#define PSCI_STATE_TYPE_STANDBY 0x00000 -#define PSCI_STATE_TYPE_POWERDOWN (1UL << 30) -#define PSCI_STATE_TYPE_BIT (1UL << 30) +#define PSCI_POWER_STATE_LVL_0 0x0000000 +#define PSCI_POWER_STATE_LVL_1 0x1000000 +#define PSCI_POWER_STATE_LVL_2 0x2000000 +#define PSCI_STATE_TYPE_STANDBY 0x00000 +#define PSCI_STATE_TYPE_POWERDOWN (1UL << 30) +#define PSCI_STATE_TYPE_BIT (1UL << 30) #endif // __PLAT_PSCI_H__ diff --git a/src/platform/zcu104/zcu104_desc.c b/src/platform/zcu104/zcu104_desc.c index 7b17f2932..4baa5abaa 100644 --- a/src/platform/zcu104/zcu104_desc.c +++ b/src/platform/zcu104/zcu104_desc.c @@ -11,26 +11,24 @@ struct platform platform = { .regions = (struct mem_region[]) { { /** - * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN - * is loaded in a non secure zone, more specifically at the end of - * the first memory bank. Being in a non-secure zone means that can - * be easily overwritten. + * The Arm Trusted Firmware shipped in the default Xilinx BOOT.BIN is loaded in a non + * secure zone, more specifically at the end of the first memory bank. Being in a + * non-secure zone means that can be easily overwritten. * - * The memory size is therefore shrunk to take this into account and - * avoid memory corruption. + * The memory size is therefore shrunk to take this into account and avoid memory + * corruption. * - * Note that if the ATF is compiled with debug symbols or with a - * custom SPD service, then it gets loaded at the *beginning* of the - * first memory bank, in that case the base address should be - * changed to 0x80000, and the size shrunk accorindgly. + * Note that if the ATF is compiled with debug symbols or with a custom SPD service, + * then it gets loaded at the *beginning* of the first memory bank, in that case the + * base address should be changed to 0x80000, and the size shrunk accorindgly. */ .base = 0x00080000, - .size = 0x7FF00000 - 0x16000 + .size = 0x7FF00000 - 0x16000, } }, .console = { - .base = 0xFF000000 + .base = 0xFF000000, }, .arch = { @@ -39,16 +37,16 @@ struct platform platform = { .gicc_addr = 0xF902f000, .gich_addr = 0xF9040000, .gicv_addr = 0xF906f000, - .maintenance_id = 25 + .maintenance_id = 25, }, .smmu = { .base = 0xFD800000, - .interrupt_id = 187 - }, + .interrupt_id = 187, + },, .generic_timer = { - .base_addr = 0xFF260000 - } - } + .base_addr = 0xFF260000, + }, + }, };