Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,7 @@
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" Secure Encrypted Virtualization - Secure Nested Paging */
#define X86_FEATURE_SNP_SECURE_TSC (19*32+ 8) /* SEV-SNP Secure TSC */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* hardware-enforced cache coherency */
#define X86_FEATURE_RESTRICTED_INJECTION (19*32+12) /* AMD SEV Restricted Injection */
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
#define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3)
#define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4)
#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5)
#define SVM_SEV_FEAT_SECURE_TSC BIT(9)

#define VMCB_ALLOWED_SEV_FEATURES_VALID BIT_ULL(63)

Expand Down
108 changes: 78 additions & 30 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
#include "lapic.h"

#define GHCB_VERSION_MAX 2ULL
#define GHCB_VERSION_DEFAULT 2ULL
#define GHCB_VERSION_MIN 1ULL

#define GHCB_HV_FT_SUPPORTED (GHCB_HV_FT_SNP | \
Expand Down Expand Up @@ -154,6 +153,14 @@ static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
return svm->sev_es.vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
}

static bool snp_is_secure_tsc_enabled(struct kvm *kvm)
{
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);

return (sev->vmsa_features & SVM_SEV_FEAT_SECURE_TSC) &&
!WARN_ON_ONCE(!sev_snp_guest(kvm));
}

/* Must be called with the sev_bitmap_lock held */
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{
Expand Down Expand Up @@ -413,6 +420,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_platform_init_args init_args = {0};
bool es_active = vm_type != KVM_X86_SEV_VM;
bool snp_active = vm_type == KVM_X86_SNP_VM;
u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
int ret;

Expand All @@ -422,12 +430,26 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
if (data->flags)
return -EINVAL;

if (!snp_active)
valid_vmsa_features &= ~SVM_SEV_FEAT_SECURE_TSC;

if (data->vmsa_features & ~valid_vmsa_features)
return -EINVAL;

if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version))
return -EINVAL;

/*
* KVM supports the full range of mandatory features defined by version
* 2 of the GHCB protocol, so default to that for SEV-ES guests created
* via KVM_SEV_INIT2 (KVM_SEV_INIT forces version 1).
*/
if (es_active && !data->ghcb_version)
data->ghcb_version = 2;

if (snp_active && data->ghcb_version < 2)
return -EINVAL;

if (unlikely(sev->active))
return -EINVAL;

Expand All @@ -436,15 +458,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
sev->vmsa_features = data->vmsa_features;
sev->ghcb_version = data->ghcb_version;

/*
* Currently KVM supports the full range of mandatory features defined
* by version 2 of the GHCB protocol, so default to that for SEV-ES
* guests created via KVM_SEV_INIT2.
*/
if (sev->es_active && !sev->ghcb_version)
sev->ghcb_version = GHCB_VERSION_DEFAULT;

if (vm_type == KVM_X86_SNP_VM)
if (snp_active)
sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE;

ret = sev_asid_new(sev);
Expand All @@ -462,7 +476,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
}

/* This needs to happen after SEV/SNP firmware initialization. */
if (vm_type == KVM_X86_SNP_VM) {
if (snp_active) {
ret = snp_guest_req_init(kvm);
if (ret)
goto e_free;
Expand Down Expand Up @@ -1974,7 +1988,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
dst_svm = to_svm(dst_vcpu);

sev_init_vmcb(dst_svm);
sev_init_vmcb(dst_svm, false);

if (!dst->es_active)
continue;
Expand Down Expand Up @@ -2186,6 +2200,13 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!(params.policy & SNP_POLICY_MASK_RSVD_MBO))
return -EINVAL;

if (snp_is_secure_tsc_enabled(kvm)) {
if (WARN_ON_ONCE(!kvm->arch.default_tsc_khz))
return -EINVAL;

start.desired_tsc_khz = kvm->arch.default_tsc_khz;
}

sev->policy = params.policy;

sev->snp_context = snp_context_create(kvm, argp);
Expand All @@ -2194,6 +2215,7 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)

start.gctx_paddr = __psp_pa(sev->snp_context);
start.policy = params.policy;

memcpy(start.gosvw, params.gosvw, sizeof(params.gosvw));
rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_START, &start, &argp->error);
if (rc) {
Expand Down Expand Up @@ -3124,6 +3146,9 @@ void __init sev_hardware_setup(void)

if (sev_snp_restricted_injection_enabled)
sev_supported_vmsa_features |= SVM_SEV_FEAT_RESTRICTED_INJECTION;

if (sev_snp_enabled && tsc_khz && cpu_feature_enabled(X86_FEATURE_SNP_SECURE_TSC))
sev_supported_vmsa_features |= SVM_SEV_FEAT_SECURE_TSC;
}

void sev_hardware_unsetup(void)
Expand Down Expand Up @@ -3948,17 +3973,14 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
/*
* Invoked as part of svm_vcpu_reset() processing of an init event.
*/
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
static void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_memory_slot *slot;
struct page *page;
kvm_pfn_t pfn;
gfn_t gfn;

if (!sev_snp_guest(vcpu->kvm))
return;

guard(mutex)(&svm->sev_es.snp_vmsa_mutex);

if (!svm->sev_es.snp_ap_waiting_for_reset)
Expand Down Expand Up @@ -4878,6 +4900,9 @@ void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID));

svm_set_intercept_for_msr(vcpu, MSR_AMD64_GUEST_TSC_FREQ, MSR_TYPE_R,
!snp_is_secure_tsc_enabled(vcpu->kvm));

/*
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
* the host/guest supports its use.
Expand Down Expand Up @@ -4915,8 +4940,9 @@ static void sev_snp_init_vmcb(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl &= ~V_NMI_ENABLE_MASK;
}

static void sev_es_init_vmcb(struct vcpu_svm *svm)
static void sev_es_init_vmcb(struct vcpu_svm *svm, bool init_event)
{
struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
struct vmcb *vmcb = svm->vmcb01.ptr;

svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
Expand Down Expand Up @@ -4978,10 +5004,21 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)

if (sev_snp_guest(svm->vcpu.kvm))
sev_snp_init_vmcb(svm);

/*
* Set the GHCB MSR value as per the GHCB specification when emulating
* vCPU RESET for an SEV-ES guest.
*/
if (!init_event)
set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
GHCB_VERSION_MIN,
sev_enc_bit));
}

void sev_init_vmcb(struct vcpu_svm *svm)
void sev_init_vmcb(struct vcpu_svm *svm, bool init_event)
{
struct kvm_vcpu *vcpu = &svm->vcpu;

svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
clr_exception_intercept(svm, UD_VECTOR);

Expand All @@ -4991,25 +5028,36 @@ void sev_init_vmcb(struct vcpu_svm *svm)
*/
clr_exception_intercept(svm, GP_VECTOR);

if (sev_es_guest(svm->vcpu.kvm))
sev_es_init_vmcb(svm);
if (init_event && sev_snp_guest(vcpu->kvm))
sev_snp_init_protected_guest_state(vcpu);

if (sev_es_guest(vcpu->kvm))
sev_es_init_vmcb(svm, init_event);
}

void sev_es_vcpu_reset(struct vcpu_svm *svm)
int sev_vcpu_create(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
struct vcpu_svm *svm = to_svm(vcpu);
struct page *vmsa_page;

mutex_init(&svm->sev_es.snp_vmsa_mutex);

if (!sev_es_guest(vcpu->kvm))
return 0;

/*
* Set the GHCB MSR value as per the GHCB specification when emulating
* vCPU RESET for an SEV-ES guest.
* SEV-ES guests require a separate (from the VMCB) VMSA page used to
* contain the encrypted register state of the guest.
*/
set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
GHCB_VERSION_MIN,
sev_enc_bit));
vmsa_page = snp_safe_alloc_page();
if (!vmsa_page)
return -ENOMEM;

mutex_init(&svm->sev_es.snp_vmsa_mutex);
svm->sev_es.hvdb_gpa = INVALID_PAGE;
svm->sev_es.vmsa = page_address(vmsa_page);

vcpu->arch.guest_tsc_protected = snp_is_secure_tsc_enabled(vcpu->kvm);

return 0;
}

void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
Expand Down
36 changes: 10 additions & 26 deletions arch/x86/kvm/svm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1086,7 +1086,7 @@ static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
svm_recalc_msr_intercepts(vcpu);
}

static void init_vmcb(struct kvm_vcpu *vcpu)
static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb01.ptr;
Expand Down Expand Up @@ -1224,7 +1224,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_set_intercept(svm, INTERCEPT_BUSLOCK);

if (sev_guest(vcpu->kvm))
sev_init_vmcb(svm);
sev_init_vmcb(svm, init_event);

svm_hv_init_vmcb(vmcb);

Expand All @@ -1247,9 +1247,6 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)

svm->nmi_masked = false;
svm->awaiting_iret_completion = false;

if (sev_es_guest(vcpu->kvm))
sev_es_vcpu_reset(svm);
}

static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Expand All @@ -1259,10 +1256,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
svm->spec_ctrl = 0;
svm->virt_spec_ctrl = 0;

if (init_event)
sev_snp_init_protected_guest_state(vcpu);

init_vmcb(vcpu);
init_vmcb(vcpu, init_event);

if (!init_event)
__svm_vcpu_reset(vcpu);
Expand All @@ -1278,7 +1272,6 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm;
struct page *vmcb01_page;
struct page *vmsa_page = NULL;
int err;

BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
Expand All @@ -1292,26 +1285,21 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
if (sev_es_guest(vcpu->kvm)) {
struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);

/*
* SEV-ES guests require a separate VMSA page used to contain
* the encrypted register state of the guest.
*/
vmsa_page = snp_safe_alloc_page();
if (!vmsa_page)
goto error_free_vmcb_page;

/* Sync VM SEV_FEATURES to VCPU - Might be overwritten later */
svm->sev_es.vmsa_features = sev->vmsa_features;
}
err = sev_vcpu_create(vcpu);
if(err)
goto error_free_vmcb_page;

err = avic_init_vcpu(svm);
if (err)
goto error_free_vmsa_page;
goto error_free_sev;

svm->msrpm = svm_vcpu_alloc_msrpm();
if (!svm->msrpm) {
err = -ENOMEM;
goto error_free_vmsa_page;
goto error_free_sev;
}

svm->x2avic_msrs_intercepted = true;
Expand All @@ -1320,16 +1308,12 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
svm_switch_vmcb(svm, &svm->vmcb01);

if (vmsa_page)
svm->sev_es.vmsa = page_address(vmsa_page);

svm->guest_state_loaded = false;

return 0;

error_free_vmsa_page:
if (vmsa_page)
__free_page(vmsa_page);
error_free_sev:
sev_free_vcpu(vcpu);
error_free_vmcb_page:
__free_page(vmcb01_page);
out:
Expand Down
7 changes: 3 additions & 4 deletions arch/x86/kvm/svm/svm.h
Original file line number Diff line number Diff line change
Expand Up @@ -840,10 +840,9 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
/* sev.c */

int pre_sev_run(struct vcpu_svm *svm, int cpu);
void sev_init_vmcb(struct vcpu_svm *svm);
void sev_init_vmcb(struct vcpu_svm *svm, bool init_event);
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_vcpu_reset(struct vcpu_svm *svm);
void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
Expand All @@ -868,6 +867,7 @@ static inline struct page *snp_safe_alloc_page(void)
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
}

int sev_vcpu_create(struct kvm_vcpu *vcpu);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
void sev_vm_destroy(struct kvm *kvm);
void __init sev_set_cpu_caps(void);
Expand All @@ -877,7 +877,6 @@ int sev_cpu_init(struct svm_cpu_data *sd);
int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
extern unsigned int max_sev_asid;
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
Expand Down Expand Up @@ -906,6 +905,7 @@ static inline struct page *snp_safe_alloc_page(void)
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
}

static inline int sev_vcpu_create(struct kvm_vcpu *vcpu) { return 0; }
static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
static inline void sev_vm_destroy(struct kvm *kvm) {}
static inline void __init sev_set_cpu_caps(void) {}
Expand All @@ -915,7 +915,6 @@ static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
#define max_sev_asid 0
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
{
return 0;
Expand Down