Skip to content

Commit 6e8ceaf

Browse files
cpercivaaljimenezb
andcommitted
pvh/arch-x86_64: Initialize vCPU regs for PVH
Set the initial values of the KVM vCPU registers as specified in the PVH boot ABI: https://xenbits.xen.org/docs/unstable/misc/pvh.html Signed-off-by: Colin Percival <[email protected]> Co-authored-by: Alejandro Jimenez <[email protected]>
1 parent b389e5f commit 6e8ceaf

File tree

7 files changed

+197
-67
lines changed

7 files changed

+197
-67
lines changed

src/arch/src/x86_64/gdt.rs

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
// Copyright © 2020, Oracle and/or its affiliates.
2+
//
13
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
24
// SPDX-License-Identifier: Apache-2.0
35
//
@@ -24,8 +26,34 @@ fn get_base(entry: u64) -> u64 {
2426
| (((entry) & 0x0000_0000_FFFF_0000) >> 16)
2527
}
2628

29+
// Extract the segment limit from the GDT segment descriptor.
30+
//
31+
// In a segment descriptor, the limit field is 20 bits, so it can directly describe
32+
// a range from 0 to 0xFFFFF (1 MB). When G flag is set (4-KByte page granularity) it
33+
// scales the value in the limit field by a factor of 2^12 (4 Kbytes), making the effective
34+
// limit range from 0xFFF (4 KBytes) to 0xFFFF_FFFF (4 GBytes).
35+
//
36+
// However, the limit field in the VMCS definition is a 32 bit field, and the limit value is not
37+
// automatically scaled using the G flag. This means that for a desired range of 4GB for a
38+
// given segment, its limit must be specified as 0xFFFF_FFFF. Therefore the method of obtaining
39+
// the limit from the GDT entry is not sufficient, since it only provides 20 bits when 32 bits
40+
// are necessary. Fortunately, we can check if the G flag is set when extracting the limit since
41+
// the full GDT entry is passed as an argument, and perform the scaling of the limit value to
42+
// return the full 32 bit value.
43+
//
44+
// The scaling mentioned above is required when using PVH boot, since the guest boots in protected
45+
// (32-bit) mode and must be able to access the entire 32-bit address space. It does not cause
46+
// issues for the case of direct boot to 64-bit (long) mode, since in 64-bit mode the processor does
47+
// not perform runtime limit checking on code or data segments.
2748
fn get_limit(entry: u64) -> u32 {
28-
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32
49+
let limit: u32 =
50+
((((entry) & 0x000F_0000_0000_0000) >> 32) | ((entry) & 0x0000_0000_0000_FFFF)) as u32;
51+
52+
// Perform manual limit scaling if G flag is set
53+
match get_g(entry) {
54+
0 => limit,
55+
_ => (limit << 12) | 0xFFF, // G flag is either 0 or 1
56+
}
2957
}
3058

3159
fn get_g(entry: u64) -> u8 {
@@ -109,7 +137,7 @@ mod tests {
109137
assert_eq!(0xB, seg.type_);
110138
// base and limit
111139
assert_eq!(0x10_0000, seg.base);
112-
assert_eq!(0xfffff, seg.limit);
140+
assert_eq!(0xffff_ffff, seg.limit);
113141
assert_eq!(0x0, seg.unusable);
114142
}
115143
}

src/arch/src/x86_64/layout.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,5 +27,8 @@ pub const IRQ_MAX: u32 = 23;
2727
/// Address for the TSS setup.
2828
pub const KVM_TSS_ADDRESS: u64 = 0xfffb_d000;
2929

30+
/// Address of the hvm_start_info struct used in PVH boot
31+
pub const PVH_INFO_START: u64 = 0x6000;
32+
3033
/// The 'zero page', a.k.a linux kernel bootparams.
3134
pub const ZERO_PAGE_START: u64 = 0x7000;

src/arch/src/x86_64/regs.rs

Lines changed: 130 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
// Copyright © 2020, Oracle and/or its affiliates.
12
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
23
// SPDX-License-Identifier: Apache-2.0
34
//
@@ -11,6 +12,7 @@ use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs};
1112
use kvm_ioctls::VcpuFd;
1213
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap};
1314

15+
use super::super::{BootProtocol, EntryPoint};
1416
use super::gdt::{gdt_entry, kvm_segment_from_gdt};
1517

1618
// Initial pagetables.
@@ -100,20 +102,33 @@ impl fmt::Display for SetupRegistersError {
100102
/// # Errors
101103
///
102104
/// When [`kvm_ioctls::ioctls::vcpu::VcpuFd::set_regs`] errors.
103-
pub fn setup_regs(vcpu: &VcpuFd, boot_ip: u64) -> std::result::Result<(), SetupRegistersError> {
104-
let regs: kvm_regs = kvm_regs {
105-
rflags: 0x0000_0000_0000_0002u64,
106-
rip: boot_ip,
107-
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments are
108-
// made to rsp (i.e. reserving space for local variables or pushing values on to the stack),
109-
// local variables and function parameters are still accessible from a constant offset from
110-
// rbp.
111-
rsp: super::layout::BOOT_STACK_POINTER,
112-
// Starting stack pointer.
113-
rbp: super::layout::BOOT_STACK_POINTER,
114-
// Must point to zero page address per Linux ABI. This is x86_64 specific.
115-
rsi: super::layout::ZERO_PAGE_START,
116-
..Default::default()
105+
pub fn setup_regs(
106+
vcpu: &VcpuFd,
107+
entry_point: EntryPoint,
108+
) -> std::result::Result<(), SetupRegistersError> {
109+
let regs: kvm_regs = match entry_point.protocol {
110+
BootProtocol::PvhBoot => kvm_regs {
111+
// Configure regs as required by PVH boot protocol.
112+
rflags: 0x0000_0000_0000_0002u64,
113+
rbx: super::layout::PVH_INFO_START,
114+
rip: entry_point.entry_addr.raw_value(),
115+
..Default::default()
116+
},
117+
BootProtocol::LinuxBoot => kvm_regs {
118+
// Configure regs as required by Linux 64-bit boot protocol.
119+
rflags: 0x0000_0000_0000_0002u64,
120+
rip: entry_point.entry_addr.raw_value(),
121+
// Frame pointer. It gets a snapshot of the stack pointer (rsp) so that when adjustments
122+
// are made to rsp (i.e. reserving space for local variables or pushing
123+
// values on to the stack), local variables and function parameters are
124+
// still accessible from a constant offset from rbp.
125+
rsp: super::layout::BOOT_STACK_POINTER,
126+
// Starting stack pointer.
127+
rbp: super::layout::BOOT_STACK_POINTER,
128+
// Must point to zero page address per Linux ABI. This is x86_64 specific.
129+
rsi: super::layout::ZERO_PAGE_START,
130+
..Default::default()
131+
},
117132
};
118133

119134
vcpu.set_regs(&regs).map_err(SetupRegistersError)
@@ -142,6 +157,7 @@ pub enum SetupSpecialRegistersError {
142157
///
143158
/// * `mem` - The memory that will be passed to the guest.
144159
/// * `vcpu` - Structure for the VCPU that holds the VCPU's fd.
160+
/// * `boot_prot` - The boot protocol being used.
145161
///
146162
/// # Errors
147163
///
@@ -153,14 +169,18 @@ pub enum SetupSpecialRegistersError {
153169
pub fn setup_sregs(
154170
mem: &GuestMemoryMmap,
155171
vcpu: &VcpuFd,
172+
boot_prot: BootProtocol,
156173
) -> std::result::Result<(), SetupSpecialRegistersError> {
157174
let mut sregs: kvm_sregs = vcpu
158175
.get_sregs()
159176
.map_err(SetupSpecialRegistersError::GetSpecialRegisters)?;
160177

161-
configure_segments_and_sregs(mem, &mut sregs)
178+
configure_segments_and_sregs(mem, &mut sregs, boot_prot)
162179
.map_err(SetupSpecialRegistersError::ConfigureSegmentsAndSpecialRegisters)?;
163-
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?; // TODO(dgreid) - Can this be done once per system instead?
180+
if let BootProtocol::LinuxBoot = boot_prot {
181+
setup_page_tables(mem, &mut sregs).map_err(SetupSpecialRegistersError::SetupPageTables)?;
182+
// TODO(dgreid) - Can this be done once per system instead?
183+
}
164184

165185
vcpu.set_sregs(&sregs)
166186
.map_err(SetupSpecialRegistersError::SetSpecialRegisters)
@@ -175,6 +195,7 @@ const EFER_LMA: u64 = 0x400;
175195
const EFER_LME: u64 = 0x100;
176196

177197
const X86_CR0_PE: u64 = 0x1;
198+
const X86_CR0_ET: u64 = 0x10;
178199
const X86_CR0_PG: u64 = 0x8000_0000;
179200
const X86_CR4_PAE: u64 = 0x20;
180201

@@ -198,13 +219,31 @@ fn write_idt_value(val: u64, guest_mem: &GuestMemoryMmap) -> Result<()> {
198219
.map_err(|_| Error::WriteIDT)
199220
}
200221

201-
fn configure_segments_and_sregs(mem: &GuestMemoryMmap, sregs: &mut kvm_sregs) -> Result<()> {
202-
let gdt_table: [u64; BOOT_GDT_MAX] = [
203-
gdt_entry(0, 0, 0), // NULL
204-
gdt_entry(0xa09b, 0, 0xfffff), // CODE
205-
gdt_entry(0xc093, 0, 0xfffff), // DATA
206-
gdt_entry(0x808b, 0, 0xfffff), // TSS
207-
];
222+
fn configure_segments_and_sregs(
223+
mem: &GuestMemoryMmap,
224+
sregs: &mut kvm_sregs,
225+
boot_prot: BootProtocol,
226+
) -> Result<()> {
227+
let gdt_table: [u64; BOOT_GDT_MAX] = match boot_prot {
228+
BootProtocol::PvhBoot => {
229+
// Configure GDT entries as specified by PVH boot protocol
230+
[
231+
gdt_entry(0, 0, 0), // NULL
232+
gdt_entry(0xc09b, 0, 0xffff_ffff), // CODE
233+
gdt_entry(0xc093, 0, 0xffff_ffff), // DATA
234+
gdt_entry(0x008b, 0, 0x67), // TSS
235+
]
236+
}
237+
BootProtocol::LinuxBoot => {
238+
// Configure GDT entries as specified by Linux 64bit boot protocol
239+
[
240+
gdt_entry(0, 0, 0), // NULL
241+
gdt_entry(0xa09b, 0, 0xfffff), // CODE
242+
gdt_entry(0xc093, 0, 0xfffff), // DATA
243+
gdt_entry(0x808b, 0, 0xfffff), // TSS
244+
]
245+
}
246+
};
208247

209248
let code_seg = kvm_segment_from_gdt(gdt_table[1], 1);
210249
let data_seg = kvm_segment_from_gdt(gdt_table[2], 2);
@@ -227,9 +266,17 @@ fn configure_segments_and_sregs(mem: &GuestMemoryMmap, sregs: &mut kvm_sregs) ->
227266
sregs.ss = data_seg;
228267
sregs.tr = tss_seg;
229268

230-
// 64-bit protected mode
231-
sregs.cr0 |= X86_CR0_PE;
232-
sregs.efer |= EFER_LME | EFER_LMA;
269+
match boot_prot {
270+
BootProtocol::PvhBoot => {
271+
sregs.cr0 = X86_CR0_PE | X86_CR0_ET;
272+
sregs.cr4 = 0;
273+
}
274+
BootProtocol::LinuxBoot => {
275+
// 64-bit protected mode
276+
sregs.cr0 |= X86_CR0_PE;
277+
sregs.efer |= EFER_LME | EFER_LMA;
278+
}
279+
}
233280

234281
Ok(())
235282
}
@@ -287,24 +334,45 @@ mod tests {
287334
gm.read_obj(read_addr).unwrap()
288335
}
289336

290-
fn validate_segments_and_sregs(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
337+
fn validate_segments_and_sregs(
338+
gm: &GuestMemoryMmap,
339+
sregs: &kvm_sregs,
340+
boot_prot: BootProtocol,
341+
) {
342+
if let BootProtocol::LinuxBoot = boot_prot {
343+
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
344+
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
345+
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
346+
347+
assert_eq!(0xffff_ffff, sregs.tr.limit);
348+
349+
assert!(sregs.cr0 & X86_CR0_PE != 0);
350+
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
351+
} else {
352+
// Validate values that are specific to PVH boot protocol
353+
assert_eq!(0xcf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
354+
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
355+
assert_eq!(0x00_8b00_0000_0067, read_u64(gm, BOOT_GDT_OFFSET + 24));
356+
357+
assert_eq!(0x67, sregs.tr.limit);
358+
assert_eq!(0, sregs.tr.g);
359+
360+
assert!(sregs.cr0 & X86_CR0_PE != 0 && sregs.cr0 & X86_CR0_ET != 0);
361+
assert_eq!(0, sregs.cr4);
362+
}
363+
364+
// Common settings for both PVH and Linux boot protocol
291365
assert_eq!(0x0, read_u64(gm, BOOT_GDT_OFFSET));
292-
assert_eq!(0xaf_9b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 8));
293-
assert_eq!(0xcf_9300_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 16));
294-
assert_eq!(0x8f_8b00_0000_ffff, read_u64(gm, BOOT_GDT_OFFSET + 24));
295366
assert_eq!(0x0, read_u64(gm, BOOT_IDT_OFFSET));
296367

297368
assert_eq!(0, sregs.cs.base);
298-
assert_eq!(0xfffff, sregs.ds.limit);
369+
assert_eq!(0xffff_ffff, sregs.ds.limit);
299370
assert_eq!(0x10, sregs.es.selector);
300371
assert_eq!(1, sregs.fs.present);
301372
assert_eq!(1, sregs.gs.g);
302373
assert_eq!(0, sregs.ss.avl);
303374
assert_eq!(0, sregs.tr.base);
304-
assert_eq!(0xfffff, sregs.tr.limit);
305375
assert_eq!(0, sregs.tr.avl);
306-
assert!(sregs.cr0 & X86_CR0_PE != 0);
307-
assert!(sregs.efer & EFER_LME != 0 && sregs.efer & EFER_LMA != 0);
308376
}
309377

310378
fn validate_page_tables(gm: &GuestMemoryMmap, sregs: &kvm_sregs) {
@@ -356,7 +424,12 @@ mod tests {
356424
..Default::default()
357425
};
358426

359-
setup_regs(&vcpu, expected_regs.rip).unwrap();
427+
let entry_point: EntryPoint = EntryPoint {
428+
entry_addr: GuestAddress(expected_regs.rip),
429+
protocol: BootProtocol::LinuxBoot,
430+
};
431+
432+
setup_regs(&vcpu, entry_point).unwrap();
360433

361434
let actual_regs: kvm_regs = vcpu.get_regs().unwrap();
362435
assert_eq!(actual_regs, expected_regs);
@@ -369,16 +442,22 @@ mod tests {
369442
let vcpu = vm.create_vcpu(0).unwrap();
370443
let gm = create_guest_mem(None);
371444

372-
assert!(vcpu.set_sregs(&Default::default()).is_ok());
373-
setup_sregs(&gm, &vcpu).unwrap();
374-
375-
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
376-
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
377-
// We set it to 1, otherwise the test will fail.
378-
sregs.gs.g = 1;
379-
380-
validate_segments_and_sregs(&gm, &sregs);
381-
validate_page_tables(&gm, &sregs);
445+
[BootProtocol::LinuxBoot, BootProtocol::PvhBoot]
446+
.iter()
447+
.for_each(|boot_prot| {
448+
assert!(vcpu.set_sregs(&Default::default()).is_ok());
449+
setup_sregs(&gm, &vcpu, *boot_prot).unwrap();
450+
451+
let mut sregs: kvm_sregs = vcpu.get_sregs().unwrap();
452+
// for AMD KVM_GET_SREGS returns g = 0 for each kvm_segment.
453+
// We set it to 1, otherwise the test will fail.
454+
sregs.gs.g = 1;
455+
456+
validate_segments_and_sregs(&gm, &sregs, *boot_prot);
457+
if let BootProtocol::LinuxBoot = *boot_prot {
458+
validate_page_tables(&gm, &sregs);
459+
}
460+
});
382461
}
383462

384463
#[test]
@@ -423,9 +502,13 @@ mod tests {
423502
fn test_configure_segments_and_sregs() {
424503
let mut sregs: kvm_sregs = Default::default();
425504
let gm = create_guest_mem(None);
426-
configure_segments_and_sregs(&gm, &mut sregs).unwrap();
505+
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::LinuxBoot).unwrap();
506+
507+
validate_segments_and_sregs(&gm, &sregs, BootProtocol::LinuxBoot);
508+
509+
configure_segments_and_sregs(&gm, &mut sregs, BootProtocol::PvhBoot).unwrap();
427510

428-
validate_segments_and_sregs(&gm, &sregs);
511+
validate_segments_and_sregs(&gm, &sregs, BootProtocol::PvhBoot);
429512
}
430513

431514
#[test]

src/vmm/src/builder.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ pub fn build_microvm_for_boot(
385385
&vmm,
386386
vcpus.as_mut(),
387387
vcpu_config,
388-
entry_point.entry_addr,
388+
entry_point,
389389
&initrd,
390390
boot_cmdline,
391391
)?;
@@ -849,7 +849,7 @@ pub fn configure_system_for_boot(
849849
vmm: &Vmm,
850850
vcpus: &mut [Vcpu],
851851
vcpu_config: VcpuConfig,
852-
entry_addr: GuestAddress,
852+
entry_point: EntryPoint,
853853
initrd: &Option<InitrdConfig>,
854854
boot_cmdline: LoaderKernelCmdline,
855855
) -> std::result::Result<(), StartMicrovmError> {
@@ -860,7 +860,7 @@ pub fn configure_system_for_boot(
860860
vcpu.kvm_vcpu
861861
.configure(
862862
vmm.guest_memory(),
863-
entry_addr,
863+
entry_point,
864864
&vcpu_config,
865865
vmm.vm.supported_cpuid().clone(),
866866
)
@@ -893,7 +893,7 @@ pub fn configure_system_for_boot(
893893
{
894894
for vcpu in vcpus.iter_mut() {
895895
vcpu.kvm_vcpu
896-
.configure(vmm.guest_memory(), entry_addr)
896+
.configure(vmm.guest_memory(), entry_point.entry_addr)
897897
.map_err(Error::VcpuConfigure)
898898
.map_err(Internal)?;
899899
}

0 commit comments

Comments
 (0)