kvm: x86: replace kvm_spec_ctrl_test_value with runtime test on the host

commit 841c2be09fe4f495fe5224952a419bd8c7e5b455 upstream.

To avoid complex and in some cases incorrect logic in
kvm_spec_ctrl_test_value, just try the guest's given value on the host
processor instead, and if it doesn't #GP, allow the guest to set it.

One such case is when host CPU supports STIBP mitigation
but doesn't support IBRS (as is the case with some Zen2 AMD cpus),
and in this case we were giving guest #GP when it tried to use STIBP

The reason why can can do the host test is that IA32_SPEC_CTRL msr is
passed to the guest, after the guest sets it to a non zero value
for the first time (due to performance reasons),
and as as result of this, it is pointless to emulate #GP condition on
this first access, in a different way than what the host CPU does.

This is based on a patch from Sean Christopherson, who suggested this idea.

Fixes: 6441fa6178f5 ("KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL")
Cc: stable@vger.kernel.org
Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200708115731.180097-1-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Maxim Levitsky 2020-07-08 14:57:31 +03:00 committed by Greg Kroah-Hartman
parent 0a6565762f
commit d239c08f09
4 changed files with 24 additions and 20 deletions

View File

@ -4327,7 +4327,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
!guest_has_spec_ctrl_msr(vcpu))
return 1;
if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
if (kvm_spec_ctrl_test_value(data))
return 1;
svm->spec_ctrl = data;

View File

@ -1974,7 +1974,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_has_spec_ctrl_msr(vcpu))
return 1;
if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
if (kvm_spec_ctrl_test_value(data))
return 1;
vmx->spec_ctrl = data;

View File

@ -10374,28 +10374,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
int kvm_spec_ctrl_test_value(u64 value)
{
uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
/*
* test that setting IA32_SPEC_CTRL to given value
* is allowed by the host processor
*/
/* The STIBP bit doesn't fault even if it's not advertised */
if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
!boot_cpu_has(X86_FEATURE_AMD_IBRS))
bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
u64 saved_value;
unsigned long flags;
int ret = 0;
if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
bits &= ~SPEC_CTRL_SSBD;
if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
bits &= ~SPEC_CTRL_SSBD;
local_irq_save(flags);
return bits;
if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
ret = 1;
else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
ret = 1;
else
wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);

View File

@ -368,6 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
#endif