Skip to content

Commit 8da221c

Browse files
authored
Merge pull request #103 from tiala/secure_avic_ohcl
Linux AMD Secure AVIC function support for OpenHCL kernel
2 parents f655e8e + 0c73b72 commit 8da221c

File tree

27 files changed

+866
-81
lines changed

27 files changed

+866
-81
lines changed

Microsoft/x64-cvm.config

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,6 @@ CONFIG_VIRT_DRIVERS=y
33
CONFIG_TDX_GUEST_DRIVER=y
44
CONFIG_SEV_GUEST=y
55
CONFIG_AMD_MEM_ENCRYPT=y
6+
CONFIG_AMD_SECURE_AVIC=y
67
CONFIG_CRYPTO_AES=y
78
CONFIG_CRYPTO_LIB_AES=y

arch/x86/Kconfig

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -473,6 +473,19 @@ config X86_X2APIC
473473

474474
If you don't know what to do here, say N.
475475

476+
config AMD_SECURE_AVIC
477+
bool "AMD Secure AVIC"
478+
depends on AMD_MEM_ENCRYPT && X86_X2APIC
479+
help
480+
Enable this to get AMD Secure AVIC support on guests that have this feature.
481+
482+
AMD Secure AVIC provides hardware acceleration for performance sensitive
483+
APIC accesses and support for managing guest owned APIC state for SEV-SNP
484+
guests. Secure AVIC does not support xapic mode. It has functional
485+
dependency on x2apic being enabled in the guest.
486+
487+
If you don't know what to do here, say N.
488+
476489
config X86_POSTED_MSI
477490
bool "Enable MSI and MSI-x delivery by posted interrupts"
478491
depends on X86_64 && IRQ_REMAP

arch/x86/boot/compressed/sev.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -357,6 +357,7 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
357357
MSR_AMD64_SNP_VMSA_REG_PROT | \
358358
MSR_AMD64_SNP_RESERVED_BIT13 | \
359359
MSR_AMD64_SNP_RESERVED_BIT15 | \
360+
MSR_AMD64_SNP_SECURE_AVIC | \
360361
MSR_AMD64_SNP_RESERVED_MASK)
361362

362363
/*

arch/x86/coco/core.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,9 @@ static bool noinstr amd_cc_platform_has(enum cc_attr attr)
100100
case CC_ATTR_HOST_SEV_SNP:
101101
return cc_flags.host_sev_snp;
102102

103+
case CC_ATTR_SNP_SECURE_AVIC:
104+
return sev_status & MSR_AMD64_SNP_SECURE_AVIC;
105+
103106
default:
104107
return false;
105108
}

arch/x86/coco/sev/core.c

Lines changed: 113 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ static const char * const sev_status_feat_names[] = {
7878
[MSR_AMD64_SNP_IBS_VIRT_BIT] = "IBSVirt",
7979
[MSR_AMD64_SNP_VMSA_REG_PROT_BIT] = "VMSARegProt",
8080
[MSR_AMD64_SNP_SMT_PROT_BIT] = "SMTProt",
81+
[MSR_AMD64_SNP_SECURE_AVIC_BIT] = "SecureAVIC",
8182
};
8283

8384
/* For early boot hypervisor communication in SEV-ES enabled guests */
@@ -1181,6 +1182,9 @@ static int wakeup_cpu_via_vmgexit(u32 apic_id, unsigned long start_ip, unsigned
11811182
vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT;
11821183
vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT;
11831184

1185+
if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
1186+
vmsa->vintr_ctrl |= (V_GIF_MASK | V_NMI_ENABLE_MASK);
1187+
11841188
/* SVME must be set. */
11851189
vmsa->efer = EFER_SVME;
11861190

@@ -1322,18 +1326,14 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
13221326
return 0;
13231327
}
13241328

1325-
static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1329+
static enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write)
13261330
{
13271331
struct pt_regs *regs = ctxt->regs;
13281332
enum es_result ret;
1329-
u64 exit_info_1;
1330-
1331-
/* Is it a WRMSR? */
1332-
exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
13331333

13341334
if (regs->cx == MSR_SVSM_CAA) {
13351335
/* Writes to the SVSM CAA msr are ignored */
1336-
if (exit_info_1)
1336+
if (write)
13371337
return ES_OK;
13381338

13391339
regs->ax = lower_32_bits(this_cpu_read(svsm_caa_pa));
@@ -1343,21 +1343,26 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
13431343
}
13441344

13451345
ghcb_set_rcx(ghcb, regs->cx);
1346-
if (exit_info_1) {
1346+
if (write) {
13471347
ghcb_set_rax(ghcb, regs->ax);
13481348
ghcb_set_rdx(ghcb, regs->dx);
13491349
}
13501350

1351-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1351+
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, write, 0);
13521352

1353-
if ((ret == ES_OK) && (!exit_info_1)) {
1353+
if ((ret == ES_OK) && (!write)) {
13541354
regs->ax = ghcb->save.rax;
13551355
regs->dx = ghcb->save.rdx;
13561356
}
13571357

13581358
return ret;
13591359
}
13601360

1361+
static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1362+
{
1363+
return sev_es_ghcb_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30);
1364+
}
1365+
13611366
static void snp_register_per_cpu_ghcb(void)
13621367
{
13631368
struct sev_es_runtime_data *data;
@@ -2066,6 +2071,105 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co
20662071
return ret;
20672072
}
20682073

2074+
u64 savic_ghcb_msr_read(u32 reg)
2075+
{
2076+
u64 msr = APIC_BASE_MSR + (reg >> 4);
2077+
struct pt_regs regs = { .cx = msr };
2078+
struct es_em_ctxt ctxt = { .regs = &regs };
2079+
struct ghcb_state state;
2080+
enum es_result res;
2081+
struct ghcb *ghcb;
2082+
2083+
guard(irqsave)();
2084+
2085+
ghcb = __sev_get_ghcb(&state);
2086+
vc_ghcb_invalidate(ghcb);
2087+
2088+
res = sev_es_ghcb_handle_msr(ghcb, &ctxt, false);
2089+
if (res != ES_OK) {
2090+
pr_err("Secure AVIC msr (0x%llx) read returned error (%d)\n", msr, res);
2091+
/* MSR read failures are treated as fatal errors */
2092+
snp_abort();
2093+
}
2094+
2095+
__sev_put_ghcb(&state);
2096+
2097+
return regs.ax | regs.dx << 32;
2098+
}
2099+
2100+
void savic_ghcb_msr_write(u32 reg, u64 value)
2101+
{
2102+
u64 msr = APIC_BASE_MSR + (reg >> 4);
2103+
struct pt_regs regs = {
2104+
.cx = msr,
2105+
.ax = lower_32_bits(value),
2106+
.dx = upper_32_bits(value)
2107+
};
2108+
struct es_em_ctxt ctxt = { .regs = &regs };
2109+
struct ghcb_state state;
2110+
enum es_result res;
2111+
struct ghcb *ghcb;
2112+
2113+
guard(irqsave)();
2114+
2115+
ghcb = __sev_get_ghcb(&state);
2116+
vc_ghcb_invalidate(ghcb);
2117+
2118+
res = sev_es_ghcb_handle_msr(ghcb, &ctxt, true);
2119+
if (res != ES_OK) {
2120+
pr_err("Secure AVIC msr (0x%llx) write returned error (%d)\n", msr, res);
2121+
/* MSR writes should never fail. Any failure is fatal error for SNP guest */
2122+
snp_abort();
2123+
}
2124+
2125+
__sev_put_ghcb(&state);
2126+
}
2127+
2128+
enum es_result savic_register_gpa(u64 gpa)
2129+
{
2130+
struct ghcb_state state;
2131+
struct es_em_ctxt ctxt;
2132+
enum es_result res;
2133+
struct ghcb *ghcb;
2134+
2135+
guard(irqsave)();
2136+
2137+
ghcb = __sev_get_ghcb(&state);
2138+
vc_ghcb_invalidate(ghcb);
2139+
2140+
ghcb_set_rax(ghcb, SVM_VMGEXIT_SAVIC_SELF_GPA);
2141+
ghcb_set_rbx(ghcb, gpa);
2142+
res = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_SAVIC,
2143+
SVM_VMGEXIT_SAVIC_REGISTER_GPA, 0);
2144+
2145+
__sev_put_ghcb(&state);
2146+
2147+
return res;
2148+
}
2149+
2150+
enum es_result savic_unregister_gpa(u64 *gpa)
2151+
{
2152+
struct ghcb_state state;
2153+
struct es_em_ctxt ctxt;
2154+
enum es_result res;
2155+
struct ghcb *ghcb;
2156+
2157+
guard(irqsave)();
2158+
2159+
ghcb = __sev_get_ghcb(&state);
2160+
vc_ghcb_invalidate(ghcb);
2161+
2162+
ghcb_set_rax(ghcb, SVM_VMGEXIT_SAVIC_SELF_GPA);
2163+
res = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_SAVIC,
2164+
SVM_VMGEXIT_SAVIC_UNREGISTER_GPA, 0);
2165+
if (gpa && res == ES_OK)
2166+
*gpa = ghcb->save.rbx;
2167+
2168+
__sev_put_ghcb(&state);
2169+
2170+
return res;
2171+
}
2172+
20692173
static __always_inline bool vc_is_db(unsigned long error_code)
20702174
{
20712175
return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;

arch/x86/hyperv/hv_apic.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/hyperv.h>
2727
#include <linux/slab.h>
2828
#include <linux/cpuhotplug.h>
29+
#include <linux/cc_platform.h>
2930
#include <asm/hypervisor.h>
3031
#include <asm/mshyperv.h>
3132
#include <asm/apic.h>
@@ -53,6 +54,11 @@ static void hv_apic_icr_write(u32 low, u32 id)
5354
wrmsrl(HV_X64_MSR_ICR, reg_val);
5455
}
5556

57+
void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set)
58+
{
59+
apic_update_vector(cpu, vector, set);
60+
}
61+
5662
static u32 hv_apic_read(u32 reg)
5763
{
5864
u32 reg_val, hi;
@@ -288,6 +294,9 @@ static void hv_send_ipi_self(int vector)
288294

289295
void __init hv_apic_init(void)
290296
{
297+
if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
298+
return;
299+
291300
if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
292301
pr_info("Hyper-V: Using IPI hypercalls\n");
293302
/*

arch/x86/hyperv/hv_init.c

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
void *hv_hypercall_pg;
4141
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
4242

43+
void *hv_vp_early_input_arg;
4344
union hv_ghcb * __percpu *hv_ghcb_pg;
4445

4546
/* Storage to save the hypercall page temporarily for hibernation */
@@ -84,6 +85,10 @@ static int hv_cpu_init(unsigned int cpu)
8485
if (ret)
8586
return ret;
8687

88+
/* Allow Hyper-V stimer vector to be injected from Hypervisor. */
89+
if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
90+
apic_update_vector(cpu, HYPERV_STIMER0_VECTOR, true);
91+
8792
return hyperv_init_ghcb();
8893
}
8994

@@ -191,6 +196,9 @@ static int hv_cpu_die(unsigned int cpu)
191196
*ghcb_va = NULL;
192197
}
193198

199+
if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
200+
apic_update_vector(cpu, HYPERV_STIMER0_VECTOR, false);
201+
194202
hv_common_cpu_die(cpu);
195203

196204
if (hv_reenlightenment_cb == NULL)
@@ -357,13 +365,30 @@ void __init hyperv_init(void)
357365
u64 guest_id;
358366
union hv_x64_msr_hypercall_contents hypercall_msr;
359367
int cpuhp;
368+
int ret;
360369

361370
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
362371
return;
363372

364373
if (hv_common_init())
365374
return;
366375

376+
if (cc_platform_has(CC_ATTR_SNP_SECURE_AVIC)) {
377+
hv_vp_early_input_arg = kcalloc(num_possible_cpus(),
378+
PAGE_SIZE,
379+
GFP_KERNEL);
380+
if (hv_vp_early_input_arg) {
381+
ret = set_memory_decrypted((u64)hv_vp_early_input_arg,
382+
num_possible_cpus());
383+
if (ret) {
384+
kfree(hv_vp_early_input_arg);
385+
goto common_free;
386+
}
387+
} else {
388+
goto common_free;
389+
}
390+
}
391+
367392
/*
368393
* The VP assist page is useless to a TDX guest: the only use we
369394
* would have for it is lazy EOI, which can not be used with TDX.
@@ -378,7 +403,7 @@ void __init hyperv_init(void)
378403
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
379404

380405
if (!hv_isolation_type_tdx())
381-
goto common_free;
406+
goto free_vp_early_input_arg;
382407
}
383408

384409
if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
@@ -538,6 +563,10 @@ void __init hyperv_init(void)
538563
free_vp_assist_page:
539564
kfree(hv_vp_assist_page);
540565
hv_vp_assist_page = NULL;
566+
free_vp_early_input_arg:
567+
set_memory_encrypted((u64)hv_vp_early_input_arg, num_possible_cpus());
568+
kfree(hv_vp_early_input_arg);
569+
hv_vp_early_input_arg = NULL;
541570
common_free:
542571
hv_common_free();
543572
}

arch/x86/hyperv/ivm.c

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,44 @@ static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
289289
free_page((unsigned long)vmsa);
290290
}
291291

292+
enum es_result hv_set_savic_backing_page(u64 gfn)
293+
{
294+
u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_SET_VP_REGISTERS;
295+
struct hv_set_vp_registers_input *input
296+
= hv_vp_early_input_arg + smp_processor_id() * PAGE_SIZE;
297+
union hv_x64_register_sev_gpa_page value;
298+
unsigned long flags;
299+
int retry = 5;
300+
u64 ret;
301+
302+
local_irq_save(flags);
303+
304+
value.enabled = 1;
305+
value.reserved = 0;
306+
value.pagenumber = gfn;
307+
308+
memset(input, 0, struct_size(input, element, 1));
309+
input->header.partitionid = HV_PARTITION_ID_SELF;
310+
input->header.vpindex = HV_VP_INDEX_SELF;
311+
input->header.inputvtl = ms_hyperv.vtl;
312+
input->element[0].name = HV_X64_REGISTER_SEV_AVIC_GPA;
313+
input->element[0].value.reg64 = value.u64;
314+
315+
do {
316+
ret = hv_do_hypercall(control, input, NULL);
317+
} while (ret == HV_STATUS_TIME_OUT && retry--);
318+
319+
if (!hv_result_success(ret))
320+
pr_err("Failed to set secure AVIC backing page %llx.\n", ret);
321+
322+
local_irq_restore(flags);
323+
324+
if (hv_result_success(ret))
325+
return ES_OK;
326+
else
327+
return ES_VMM_ERROR;
328+
}
329+
292330
int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
293331
{
294332
struct sev_es_save_area *vmsa = (struct sev_es_save_area *)

0 commit comments

Comments
 (0)