1
+ // Copyright © 2020, Oracle and/or its affiliates.
1
2
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
3
// SPDX-License-Identifier: Apache-2.0
3
4
//
@@ -11,6 +12,7 @@ use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs};
11
12
use kvm_ioctls:: VcpuFd ;
12
13
use vm_memory:: { Address , Bytes , GuestAddress , GuestMemory , GuestMemoryMmap } ;
13
14
15
+ use super :: super :: { BootProtocol , EntryPoint } ;
14
16
use super :: gdt:: { gdt_entry, kvm_segment_from_gdt} ;
15
17
16
18
// Initial pagetables.
@@ -100,20 +102,33 @@ impl fmt::Display for SetupRegistersError {
100
102
/// # Errors
101
103
///
102
104
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_regs`] errors.
103
- pub fn setup_regs ( vcpu : & VcpuFd , boot_ip : u64 ) -> std:: result:: Result < ( ) , SetupRegistersError > {
104
- let regs: kvm_regs = kvm_regs {
105
- rflags : 0x0000_0000_0000_0002u64 ,
106
- rip : boot_ip,
107
- // Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments are
108
- // made to rsp (i.e. reserving space for local variables or pushing values on to the stack),
109
- // local variables and function parameters are still accessible from a constant offset from
110
- // rbp.
111
- rsp : super :: layout:: BOOT_STACK_POINTER ,
112
- // Starting stack pointer.
113
- rbp : super :: layout:: BOOT_STACK_POINTER ,
114
- // Must point to zero page address per Linux ABI. This is x86_64 specific.
115
- rsi : super :: layout:: ZERO_PAGE_START ,
116
- ..Default :: default ( )
105
+ pub fn setup_regs (
106
+ vcpu : & VcpuFd ,
107
+ entry_point : EntryPoint ,
108
+ ) -> std:: result:: Result < ( ) , SetupRegistersError > {
109
+ let regs: kvm_regs = match entry_point. protocol {
110
+ BootProtocol :: PvhBoot => kvm_regs {
111
+ // Configure regs as required by PVH boot protocol.
112
+ rflags : 0x0000_0000_0000_0002u64 ,
113
+ rbx : super :: layout:: PVH_INFO_START ,
114
+ rip : entry_point. entry_addr . raw_value ( ) ,
115
+ ..Default :: default ( )
116
+ } ,
117
+ BootProtocol :: LinuxBoot => kvm_regs {
118
+ // Configure regs as required by Linux 64-bit boot protocol.
119
+ rflags : 0x0000_0000_0000_0002u64 ,
120
+ rip : entry_point. entry_addr . raw_value ( ) ,
121
+ // Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments
122
+ // are made to rsp (i.e. reserving space for local variables or pushing
123
+ // values on to the stack), local variables and function parameters are
124
+ // still accessible from a constant offset from rbp.
125
+ rsp : super :: layout:: BOOT_STACK_POINTER ,
126
+ // Starting stack pointer.
127
+ rbp : super :: layout:: BOOT_STACK_POINTER ,
128
+ // Must point to zero page address per Linux ABI. This is x86_64 specific.
129
+ rsi : super :: layout:: ZERO_PAGE_START ,
130
+ ..Default :: default ( )
131
+ } ,
117
132
} ;
118
133
119
134
vcpu. set_regs ( & regs) . map_err ( SetupRegistersError )
@@ -142,6 +157,7 @@ pub enum SetupSpecialRegistersError {
142
157
///
143
158
/// * `mem` - The memory that will be passed to the guest.
144
159
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
160
+ /// * `boot_prot` - The boot protocol being used.
145
161
///
146
162
/// # Errors
147
163
///
@@ -153,14 +169,18 @@ pub enum SetupSpecialRegistersError {
153
169
pub fn setup_sregs (
154
170
mem : & GuestMemoryMmap ,
155
171
vcpu : & VcpuFd ,
172
+ boot_prot : BootProtocol ,
156
173
) -> std:: result:: Result < ( ) , SetupSpecialRegistersError > {
157
174
let mut sregs: kvm_sregs = vcpu
158
175
. get_sregs ( )
159
176
. map_err ( SetupSpecialRegistersError :: GetSpecialRegisters ) ?;
160
177
161
- configure_segments_and_sregs ( mem, & mut sregs)
178
+ configure_segments_and_sregs ( mem, & mut sregs, boot_prot )
162
179
. map_err ( SetupSpecialRegistersError :: ConfigureSegmentsAndSpecialRegisters ) ?;
163
- setup_page_tables ( mem, & mut sregs) . map_err ( SetupSpecialRegistersError :: SetupPageTables ) ?; // TODO(dgreid) - Can this be done once per system instead?
180
+ if let BootProtocol :: LinuxBoot = boot_prot {
181
+ setup_page_tables ( mem, & mut sregs) . map_err ( SetupSpecialRegistersError :: SetupPageTables ) ?;
182
+ // TODO(dgreid) - Can this be done once per system instead?
183
+ }
164
184
165
185
vcpu. set_sregs ( & sregs)
166
186
. map_err ( SetupSpecialRegistersError :: SetSpecialRegisters )
@@ -175,6 +195,7 @@ const EFER_LMA: u64 = 0x400;
175
195
const EFER_LME : u64 = 0x100 ;
176
196
177
197
const X86_CR0_PE : u64 = 0x1 ;
198
+ const X86_CR0_ET : u64 = 0x10 ;
178
199
const X86_CR0_PG : u64 = 0x8000_0000 ;
179
200
const X86_CR4_PAE : u64 = 0x20 ;
180
201
@@ -198,13 +219,31 @@ fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<()> {
198
219
. map_err ( |_| Error :: WriteIDT )
199
220
}
200
221
201
- fn configure_segments_and_sregs ( mem : & GuestMemoryMmap , sregs : & mut kvm_sregs ) -> Result < ( ) > {
202
- let gdt_table: [ u64 ; BOOT_GDT_MAX ] = [
203
- gdt_entry ( 0 , 0 , 0 ) , // NULL
204
- gdt_entry ( 0xa09b , 0 , 0xfffff ) , // CODE
205
- gdt_entry ( 0xc093 , 0 , 0xfffff ) , // DATA
206
- gdt_entry ( 0x808b , 0 , 0xfffff ) , // TSS
207
- ] ;
222
+ fn configure_segments_and_sregs (
223
+ mem : & GuestMemoryMmap ,
224
+ sregs : & mut kvm_sregs ,
225
+ boot_prot : BootProtocol ,
226
+ ) -> Result < ( ) > {
227
+ let gdt_table: [ u64 ; BOOT_GDT_MAX ] = match boot_prot {
228
+ BootProtocol :: PvhBoot => {
229
+ // Configure GDT entries as specified by PVH boot protocol
230
+ [
231
+ gdt_entry ( 0 , 0 , 0 ) , // NULL
232
+ gdt_entry ( 0xc09b , 0 , 0xffff_ffff ) , // CODE
233
+ gdt_entry ( 0xc093 , 0 , 0xffff_ffff ) , // DATA
234
+ gdt_entry ( 0x008b , 0 , 0x67 ) , // TSS
235
+ ]
236
+ }
237
+ BootProtocol :: LinuxBoot => {
238
+ // Configure GDT entries as specified by Linux 64bit boot protocol
239
+ [
240
+ gdt_entry ( 0 , 0 , 0 ) , // NULL
241
+ gdt_entry ( 0xa09b , 0 , 0xfffff ) , // CODE
242
+ gdt_entry ( 0xc093 , 0 , 0xfffff ) , // DATA
243
+ gdt_entry ( 0x808b , 0 , 0xfffff ) , // TSS
244
+ ]
245
+ }
246
+ } ;
208
247
209
248
let code_seg = kvm_segment_from_gdt ( gdt_table[ 1 ] , 1 ) ;
210
249
let data_seg = kvm_segment_from_gdt ( gdt_table[ 2 ] , 2 ) ;
@@ -227,9 +266,17 @@ fn configure_segments_and_sregs(mem: &GuestMemoryMmap, sregs: &mut kvm_sregs) ->
227
266
sregs. ss = data_seg;
228
267
sregs. tr = tss_seg;
229
268
230
- // 64-bit protected mode
231
- sregs. cr0 |= X86_CR0_PE ;
232
- sregs. efer |= EFER_LME | EFER_LMA ;
269
+ match boot_prot {
270
+ BootProtocol :: PvhBoot => {
271
+ sregs. cr0 = X86_CR0_PE | X86_CR0_ET ;
272
+ sregs. cr4 = 0 ;
273
+ }
274
+ BootProtocol :: LinuxBoot => {
275
+ // 64-bit protected mode
276
+ sregs. cr0 |= X86_CR0_PE ;
277
+ sregs. efer |= EFER_LME | EFER_LMA ;
278
+ }
279
+ }
233
280
234
281
Ok ( ( ) )
235
282
}
@@ -287,24 +334,45 @@ mod tests {
287
334
gm. read_obj ( read_addr) . unwrap ( )
288
335
}
289
336
290
- fn validate_segments_and_sregs ( gm : & GuestMemoryMmap , sregs : & kvm_sregs ) {
337
+ fn validate_segments_and_sregs (
338
+ gm : & GuestMemoryMmap ,
339
+ sregs : & kvm_sregs ,
340
+ boot_prot : BootProtocol ,
341
+ ) {
342
+ if let BootProtocol :: LinuxBoot = boot_prot {
343
+ assert_eq ! ( 0xaf_9b00_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 8 ) ) ;
344
+ assert_eq ! ( 0xcf_9300_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 16 ) ) ;
345
+ assert_eq ! ( 0x8f_8b00_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 24 ) ) ;
346
+
347
+ assert_eq ! ( 0xffff_ffff , sregs. tr. limit) ;
348
+
349
+ assert ! ( sregs. cr0 & X86_CR0_PE != 0 ) ;
350
+ assert ! ( sregs. efer & EFER_LME != 0 && sregs. efer & EFER_LMA != 0 ) ;
351
+ } else {
352
+ // Validate values that are specific to PVH boot protocol
353
+ assert_eq ! ( 0xcf_9b00_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 8 ) ) ;
354
+ assert_eq ! ( 0xcf_9300_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 16 ) ) ;
355
+ assert_eq ! ( 0x00_8b00_0000_0067 , read_u64( gm, BOOT_GDT_OFFSET + 24 ) ) ;
356
+
357
+ assert_eq ! ( 0x67 , sregs. tr. limit) ;
358
+ assert_eq ! ( 0 , sregs. tr. g) ;
359
+
360
+ assert ! ( sregs. cr0 & X86_CR0_PE != 0 && sregs. cr0 & X86_CR0_ET != 0 ) ;
361
+ assert_eq ! ( 0 , sregs. cr4) ;
362
+ }
363
+
364
+ // Common settings for both PVH and Linux boot protocol
291
365
assert_eq ! ( 0x0 , read_u64( gm, BOOT_GDT_OFFSET ) ) ;
292
- assert_eq ! ( 0xaf_9b00_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 8 ) ) ;
293
- assert_eq ! ( 0xcf_9300_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 16 ) ) ;
294
- assert_eq ! ( 0x8f_8b00_0000_ffff , read_u64( gm, BOOT_GDT_OFFSET + 24 ) ) ;
295
366
assert_eq ! ( 0x0 , read_u64( gm, BOOT_IDT_OFFSET ) ) ;
296
367
297
368
assert_eq ! ( 0 , sregs. cs. base) ;
298
- assert_eq ! ( 0xfffff , sregs. ds. limit) ;
369
+ assert_eq ! ( 0xffff_ffff , sregs. ds. limit) ;
299
370
assert_eq ! ( 0x10 , sregs. es. selector) ;
300
371
assert_eq ! ( 1 , sregs. fs. present) ;
301
372
assert_eq ! ( 1 , sregs. gs. g) ;
302
373
assert_eq ! ( 0 , sregs. ss. avl) ;
303
374
assert_eq ! ( 0 , sregs. tr. base) ;
304
- assert_eq ! ( 0xfffff , sregs. tr. limit) ;
305
375
assert_eq ! ( 0 , sregs. tr. avl) ;
306
- assert ! ( sregs. cr0 & X86_CR0_PE != 0 ) ;
307
- assert ! ( sregs. efer & EFER_LME != 0 && sregs. efer & EFER_LMA != 0 ) ;
308
376
}
309
377
310
378
fn validate_page_tables ( gm : & GuestMemoryMmap , sregs : & kvm_sregs ) {
@@ -356,7 +424,12 @@ mod tests {
356
424
..Default :: default ( )
357
425
} ;
358
426
359
- setup_regs ( & vcpu, expected_regs. rip ) . unwrap ( ) ;
427
+ let entry_point: EntryPoint = EntryPoint {
428
+ entry_addr : GuestAddress ( expected_regs. rip ) ,
429
+ protocol : BootProtocol :: LinuxBoot ,
430
+ } ;
431
+
432
+ setup_regs ( & vcpu, entry_point) . unwrap ( ) ;
360
433
361
434
let actual_regs: kvm_regs = vcpu. get_regs ( ) . unwrap ( ) ;
362
435
assert_eq ! ( actual_regs, expected_regs) ;
@@ -369,16 +442,22 @@ mod tests {
369
442
let vcpu = vm. create_vcpu ( 0 ) . unwrap ( ) ;
370
443
let gm = create_guest_mem ( None ) ;
371
444
372
- assert ! ( vcpu. set_sregs( & Default :: default ( ) ) . is_ok( ) ) ;
373
- setup_sregs ( & gm, & vcpu) . unwrap ( ) ;
374
-
375
- let mut sregs: kvm_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
376
- // for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
377
- // We set it to 1, otherwise the test will fail.
378
- sregs. gs . g = 1 ;
379
-
380
- validate_segments_and_sregs ( & gm, & sregs) ;
381
- validate_page_tables ( & gm, & sregs) ;
445
+ [ BootProtocol :: LinuxBoot , BootProtocol :: PvhBoot ]
446
+ . iter ( )
447
+ . for_each ( |boot_prot| {
448
+ assert ! ( vcpu. set_sregs( & Default :: default ( ) ) . is_ok( ) ) ;
449
+ setup_sregs ( & gm, & vcpu, * boot_prot) . unwrap ( ) ;
450
+
451
+ let mut sregs: kvm_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
452
+ // for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
453
+ // We set it to 1, otherwise the test will fail.
454
+ sregs. gs . g = 1 ;
455
+
456
+ validate_segments_and_sregs ( & gm, & sregs, * boot_prot) ;
457
+ if let BootProtocol :: LinuxBoot = * boot_prot {
458
+ validate_page_tables ( & gm, & sregs) ;
459
+ }
460
+ } ) ;
382
461
}
383
462
384
463
#[ test]
@@ -423,9 +502,13 @@ mod tests {
423
502
fn test_configure_segments_and_sregs ( ) {
424
503
let mut sregs: kvm_sregs = Default :: default ( ) ;
425
504
let gm = create_guest_mem ( None ) ;
426
- configure_segments_and_sregs ( & gm, & mut sregs) . unwrap ( ) ;
505
+ configure_segments_and_sregs ( & gm, & mut sregs, BootProtocol :: LinuxBoot ) . unwrap ( ) ;
506
+
507
+ validate_segments_and_sregs ( & gm, & sregs, BootProtocol :: LinuxBoot ) ;
508
+
509
+ configure_segments_and_sregs ( & gm, & mut sregs, BootProtocol :: PvhBoot ) . unwrap ( ) ;
427
510
428
- validate_segments_and_sregs ( & gm, & sregs) ;
511
+ validate_segments_and_sregs ( & gm, & sregs, BootProtocol :: PvhBoot ) ;
429
512
}
430
513
431
514
#[ test]
0 commit comments