|
2 | 2 |
|
3 | 3 | .globl _start |
4 | 4 | _start: |
5 | | - MRS X0, MPIDR_EL1 // Check Core Id, we only use one core. |
6 | | - MOV X1, #0XC1000000 |
7 | | - BIC X0, X0, X1 |
8 | | - CBZ X0, master |
9 | | - B hang |
| 5 | + mrs x0, mpidr_el1 // check core id, we only use one core. |
| 6 | + mov x1, #0xc1000000 |
| 7 | + bic x0, x0, x1 |
| 8 | + cbz x0, master |
| 9 | + b hang |
10 | 10 |
|
11 | 11 | master: |
12 | | - LDR X0, =0X04008000 |
13 | | - MOV SP, X0 // Set EL3 SP |
14 | | - BL el2_main |
| 12 | + ldr x0, =0x1000000 |
| 13 | + mov sp, x0 // set el3 sp |
| 14 | + bl el2_main |
15 | 15 |
|
16 | 16 | hang: |
17 | | - B hang |
18 | | - |
| 17 | + b hang |
| 18 | + |
19 | 19 | .globl get_current_el |
20 | 20 | get_current_el: |
21 | | - MRS X0, CURRENTEL |
22 | | - MOV X1, #2 |
23 | | - LSR X0, X0, #2 |
24 | | - RET |
25 | | - |
| 21 | + mrs x0, currentel |
| 22 | + mov x1, #2 |
| 23 | + lsr x0, x0, #2 |
| 24 | + ret |
| 25 | + |
26 | 26 | .globl el1_mmu_activate |
27 | 27 | el1_mmu_activate: |
28 | | - LDR X0, =0X04CC |
29 | | - MSR MAIR_EL1, X0 |
30 | | - ISB |
31 | | - |
32 | | - LDR X1, =0X02000000 |
33 | | - MSR TTBR0_EL1, X1 |
34 | | - ISB |
35 | | - |
36 | | - MRS X2, TCR_EL1 |
37 | | - LDR X3, =0x70040FFBF |
38 | | - BIC X2, X2, X3 |
39 | | - |
40 | | - LDR X3, =0X200803F18 |
41 | | - ORR X2, X2, X3 |
42 | | - MSR TCR_EL1, X2 |
43 | | - ISB |
44 | | - |
45 | | - MRS X3, SCTLR_EL1 |
46 | | - LDR X4, =0X80000 |
47 | | - BIC X3, X3, X4 |
48 | | - |
49 | | - LDR X4, =0X1005 |
50 | | - ORR X3, X3, X4 |
51 | | - MSR SCTLR_EL1, X3 |
52 | | - ISB |
53 | | - RET |
| 28 | + ldr x0, =0x04cc |
| 29 | + msr mair_el1, x0 |
| 30 | + isb |
| 31 | + |
| 32 | + ldr x1, =0x02000000 |
| 33 | + msr ttbr0_el1, x1 |
| 34 | + isb |
| 35 | + |
| 36 | + mrs x2, tcr_el1 |
| 37 | + ldr x3, =0x70040ffbf |
| 38 | + bic x2, x2, x3 |
| 39 | + |
| 40 | + ldr x3, =0x200803f18 |
| 41 | + orr x2, x2, x3 |
| 42 | + msr tcr_el1, x2 |
| 43 | + isb |
| 44 | + |
| 45 | + mrs x3, sctlr_el1 |
| 46 | + ldr x4, =0x80000 |
| 47 | + bic x3, x3, x4 |
| 48 | + |
| 49 | + ldr x4, =0x1005 |
| 50 | + orr x3, x3, x4 |
| 51 | + msr sctlr_el1, x3 |
| 52 | + isb |
| 53 | + ret |
54 | 54 |
|
55 | 55 | .globl jump_to_el1 |
56 | 56 | jump_to_el1: |
57 | | - MRS X0, CURRENTEL // Check if already in EL1 |
58 | | - CMP X0, #4 |
59 | | - BEQ 1f |
60 | | - |
61 | | - LDR X0, =0X03C08000 |
62 | | - MSR SP_EL1, X0 // Init the stack of EL1 |
63 | | - |
64 | | - // Disable coprocessor traps |
65 | | - MOV X0, #0X33ff |
66 | | - MSR CPTR_EL2, X0 // Disable coprocessor traps to EL2 |
67 | | - MSR HSTR_EL2, xzr // Disable coprocessor traps to EL2 |
68 | | - MOV X0, #3 << 20 |
69 | | - MSR CPACR_EL1, X0 // Enable FP/SIMD at EL1 |
70 | | - |
71 | | - // Initialize HCR_EL2 |
72 | | - MOV X0, #(1 << 31) |
73 | | - MSR HCR_EL2, X0 // Set EL1 to 64 bit |
74 | | - MOV X0, #0X0800 |
75 | | - MOVK X0, #0X30d0, LSL #16 |
76 | | - MSR SCTLR_EL1, X0 |
77 | | - |
78 | | - // Return to the EL1_SP1 mode from EL2 |
79 | | - MOV X0, #0X3C5 |
80 | | - MSR SPSR_EL2, X0 // EL1_SP0 | D | A | I | F |
81 | | - ADR X0, 1f |
82 | | - MSR ELR_EL2, X0 |
83 | | - ERET |
84 | | - |
| 57 | + mrs x0, currentel // check if already in el1 |
| 58 | + cmp x0, #4 |
| 59 | + beq 1f |
| 60 | + |
| 61 | + ldr x0, =0xf00000 |
| 62 | + msr sp_el1, x0 // init the stack of el1 |
| 63 | + |
| 64 | + // disable coprocessor traps |
| 65 | + mov x0, #0x33ff |
| 66 | + msr cptr_el2, x0 // disable coprocessor traps to el2 |
| 67 | + msr hstr_el2, xzr // disable coprocessor traps to el2 |
| 68 | + mov x0, #3 << 20 |
| 69 | + msr cpacr_el1, x0 // enable fp/simd at el1 |
| 70 | + |
| 71 | + // initialize hcr_el2 |
| 72 | + mov x0, #(1 << 31) |
| 73 | + msr hcr_el2, x0 // set el1 to 64 bit |
| 74 | + mov x0, #0x0800 |
| 75 | + movk x0, #0x30d0, lsl #16 |
| 76 | + msr sctlr_el1, x0 |
| 77 | + |
| 78 | + // return to the el1_sp1 mode from el2 |
| 79 | + mov x0, #0x3c5 |
| 80 | + msr spsr_el2, x0 // el1_sp0 | d | a | i | f |
| 81 | + adr x0, 1f |
| 82 | + msr elr_el2, x0 |
| 83 | + eret |
| 84 | + |
85 | 85 | 1: |
86 | | - MRS X0, SCTLR_EL1 |
87 | | - ORR X0, X0, #(1 << 12) |
88 | | - MSR SCTLR_EL1, X0 // enable instruction cache |
89 | | - |
90 | | - B main |
91 | | - |
| 86 | + mrs x0, sctlr_el1 |
| 87 | + orr x0, x0, #(1 << 12) |
| 88 | + msr sctlr_el1, x0 // enable instruction cache |
| 89 | + b main |
| 90 | + |
92 | 91 | .globl tlb_invalidate |
93 | 92 | tlb_invalidate: |
94 | | - DSB ISHST // ensure write has completed |
95 | | - LDR X0, [X0] // load VA from X0 |
96 | | - TLBI VAAE1, X0 // invalidate TLB by VA, All ASID, EL1. |
97 | | - DSB ISH // ensure completion of TLB invalidation |
98 | | - ISB // synchronize context and ensure that no instructions are fetched using the old translation |
99 | | - RET |
100 | | - |
101 | | -.globl PUT32 |
102 | | -PUT32: |
103 | | - STR W1,[X0] |
104 | | - RET |
105 | | - |
106 | | -.globl GET32 |
107 | | -GET32: |
108 | | - LDR W0,[X0] |
109 | | - RET |
| 93 | + dsb ishst // ensure write has completed |
| 94 | + ldr x0, [x0] // load va from x0 |
| 95 | + tlbi vmalle1 // invalidate tlb by va, all asid, el1. |
| 96 | + dsb ish // ensure completion of tlb invalidation |
| 97 | + isb // synchronize context and ensure that no instructions |
| 98 | + // are fetched using the old translation |
| 99 | + ret |
| 100 | + |
| 101 | +.globl put32 |
| 102 | +put32: |
| 103 | + str w1,[x0] |
| 104 | + ret |
| 105 | + |
| 106 | +.globl get32 |
| 107 | +get32: |
| 108 | + ldr w0,[x0] |
| 109 | + ret |
110 | 110 |
|
111 | 111 | .globl dummy |
112 | 112 | dummy: |
113 | | - RET |
| 113 | + ret |
0 commit comments