[pve-devel] [RFC kernel] revert problematic TSC multiplier commit

Fiona Ebner f.ebner at proxmox.com
Wed Aug 31 15:28:03 CEST 2022


which might be responsible for several issues reported in the
community forum[0][1].

In my case, loading a VM snapshot that originally was taken on
a CPU from a different vendor often caused problems in other VMs(!).
In particular, it often led to RCU stalls (with similar messages as in
[1]) or slowdowns, and sometimes clock jumps far into the future (like
in [0]). With this revert applied, everything seems to run smoothly
even after loading the "bad" snapshot 10 times.

[0] https://forum.proxmox.com/threads/112756/
[1] https://forum.proxmox.com/threads/111494/

Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
---

Needed to build with an ABI bump, AFAICT because of the removed
KVM_X86_OP(write_tsc_multiplier), but it's even an ABI break I guess?
If this is not ok, we can either
A) try to revert while keeping the ABI intact
B) try to backport d1021a1bc47668aa715b7f2121086c9598bffcbf instead

That's the one reason it's RFC. The other is that I haven't had much
time to look at the actual code yet.

 ...dd-vendor-callbacks-for-writing-the-.patch | 247 ++++++++++++++++++
 1 file changed, 247 insertions(+)
 create mode 100644 patches/kernel/0032-Revert-KVM-X86-Add-vendor-callbacks-for-writing-the-.patch

diff --git a/patches/kernel/0032-Revert-KVM-X86-Add-vendor-callbacks-for-writing-the-.patch b/patches/kernel/0032-Revert-KVM-X86-Add-vendor-callbacks-for-writing-the-.patch
new file mode 100644
index 0000000..d2f7796
--- /dev/null
+++ b/patches/kernel/0032-Revert-KVM-X86-Add-vendor-callbacks-for-writing-the-.patch
@@ -0,0 +1,247 @@
+From 0747f18be1da2033a20c2a355678b9c278ce4603 Mon Sep 17 00:00:00 2001
+From: Fiona Ebner <f.ebner at proxmox.com>
+Date: Wed, 31 Aug 2022 13:49:55 +0200
+Subject: [PATCH] Revert "KVM: X86: Add vendor callbacks for writing the TSC
+ multiplier"
+
+This reverts commit 1ab9287add5e265352d18517551abf6d01d004fd until commit
+d1021a1bc47668aa715b7f2121086c9598bffcbf which fixes it can be cleanly
+applied.
+
+Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
+---
+ arch/x86/include/asm/kvm-x86-ops.h |  1 -
+ arch/x86/include/asm/kvm_host.h    |  1 -
+ arch/x86/kvm/svm/svm.c             |  6 ------
+ arch/x86/kvm/vmx/nested.c          |  9 +++++----
+ arch/x86/kvm/vmx/vmx.c             | 11 +++++------
+ arch/x86/kvm/vmx/vmx.h             |  8 ++++++++
+ arch/x86/kvm/x86.c                 | 30 +++++++-----------------------
+ 7 files changed, 25 insertions(+), 41 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
+index 9e50da3ed01a..7b5901433b6f 100644
+--- a/arch/x86/include/asm/kvm-x86-ops.h
++++ b/arch/x86/include/asm/kvm-x86-ops.h
+@@ -90,7 +90,6 @@ KVM_X86_OP_NULL(has_wbinvd_exit)
+ KVM_X86_OP(get_l2_tsc_offset)
+ KVM_X86_OP(get_l2_tsc_multiplier)
+ KVM_X86_OP(write_tsc_offset)
+-KVM_X86_OP(write_tsc_multiplier)
+ KVM_X86_OP(get_exit_info)
+ KVM_X86_OP(check_intercept)
+ KVM_X86_OP(handle_exit_irqoff)
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 0362d3fba42a..1d19a54ad986 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1406,7 +1406,6 @@ struct kvm_x86_ops {
+ 	u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
+ 	u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
+ 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+-	void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
+ 
+ 	/*
+ 	 * Retrieve somewhat arbitrary exit information.  Intended to be used
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 938b9b24f0ee..42933c7d3948 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1126,11 +1126,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ 	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ }
+ 
+-static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+-{
+-	wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
+-}
+-
+ /* Evaluate instruction intercepts that depend on guest CPUID features. */
+ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
+ 					      struct vcpu_svm *svm)
+@@ -4645,7 +4640,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
+ 	.get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
+ 	.write_tsc_offset = svm_write_tsc_offset,
+-	.write_tsc_multiplier = svm_write_tsc_multiplier,
+ 
+ 	.load_mmu_pgd = svm_load_mmu_pgd,
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 5f91aa62bdca..91db9e92a794 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2539,8 +2539,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ 			vmx_get_l2_tsc_multiplier(vcpu));
+ 
+ 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
++
+ 	if (kvm_has_tsc_control)
+-		vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
++		decache_tsc_multiplier(vmx);
+ 
+ 	nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
+ 
+@@ -4597,12 +4598,12 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+-	if (kvm_has_tsc_control)
+-		vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
+-
+ 	if (vmx->nested.l1_tpr_threshold != -1)
+ 		vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
+ 
++	if (kvm_has_tsc_control)
++		decache_tsc_multiplier(vmx);
++
+ 	if (vmx->nested.change_vmcs01_virtual_apic_mode) {
+ 		vmx->nested.change_vmcs01_virtual_apic_mode = false;
+ 		vmx_set_virtual_apic_mode(vcpu);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index a45a43bcc844..deb9d4b73922 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1363,6 +1363,11 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
+ 
+ 		vmx->loaded_vmcs->cpu = cpu;
+ 	}
++
++	/* Setup TSC multiplier */
++	if (kvm_has_tsc_control &&
++	    vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
++		decache_tsc_multiplier(vmx);
+ }
+ 
+ /*
+@@ -1784,11 +1789,6 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ 	vmcs_write64(TSC_OFFSET, offset);
+ }
+ 
+-static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+-{
+-	vmcs_write64(TSC_MULTIPLIER, multiplier);
+-}
+-
+ /*
+  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
+  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
+@@ -7759,7 +7759,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
+ 	.get_l2_tsc_offset = vmx_get_l2_tsc_offset,
+ 	.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
+ 	.write_tsc_offset = vmx_write_tsc_offset,
+-	.write_tsc_multiplier = vmx_write_tsc_multiplier,
+ 
+ 	.load_mmu_pgd = vmx_load_mmu_pgd,
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index eb4568a3814a..7b5ddfa3d80e 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -316,6 +316,8 @@ struct vcpu_vmx {
+ 	/* apic deadline value in host tsc */
+ 	u64 hv_deadline_tsc;
+ 
++	u64 current_tsc_ratio;
++
+ 	unsigned long host_debugctlmsr;
+ 
+ 	/*
+@@ -563,6 +565,12 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
+ 			      GFP_KERNEL_ACCOUNT);
+ }
+ 
++static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
++{
++	vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
++	vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
++}
++
+ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
+ {
+ 	return secondary_exec_controls_get(vmx) &
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 48aaff0ce3b9..6284baf0ea14 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2235,15 +2235,14 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
+ 	return v;
+ }
+ 
+-static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
+-
+ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
+ {
+ 	u64 ratio;
+ 
+ 	/* Guest TSC same frequency as host TSC? */
+ 	if (!scale) {
+-		kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
++		vcpu->arch.l1_tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
++		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ 		return 0;
+ 	}
+ 
+@@ -2269,7 +2268,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
+ 		return -1;
+ 	}
+ 
+-	kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
++	vcpu->arch.l1_tsc_scaling_ratio = vcpu->arch.tsc_scaling_ratio = ratio;
+ 	return 0;
+ }
+ 
+@@ -2281,7 +2280,8 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
+ 	/* tsc_khz can be zero if TSC calibration fails */
+ 	if (user_tsc_khz == 0) {
+ 		/* set tsc_scaling_ratio to a safe value */
+-		kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
++		vcpu->arch.l1_tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
++		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ 		return -1;
+ 	}
+ 
+@@ -2439,23 +2439,6 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
+ 	static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
+ }
+ 
+-static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
+-{
+-	vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
+-
+-	/* Userspace is changing the multiplier while L2 is active */
+-	if (is_guest_mode(vcpu))
+-		vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+-			l1_multiplier,
+-			static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+-	else
+-		vcpu->arch.tsc_scaling_ratio = l1_multiplier;
+-
+-	if (kvm_has_tsc_control)
+-		static_call(kvm_x86_write_tsc_multiplier)(
+-			vcpu, vcpu->arch.tsc_scaling_ratio);
+-}
+-
+ static inline bool kvm_check_tsc_unstable(void)
+ {
+ #ifdef CONFIG_X86_64
+@@ -10628,6 +10611,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ 	else
+ 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+ 
++	kvm_set_tsc_khz(vcpu, max_tsc_khz);
++
+ 	r = kvm_mmu_create(vcpu);
+ 	if (r < 0)
+ 		return r;
+@@ -10704,7 +10689,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
+ 	kvm_vcpu_mtrr_init(vcpu);
+ 	vcpu_load(vcpu);
+-	kvm_set_tsc_khz(vcpu, max_tsc_khz);
+ 	kvm_vcpu_reset(vcpu, false);
+ 	kvm_init_mmu(vcpu);
+ 	vcpu_put(vcpu);
+-- 
+2.30.2
+
-- 
2.30.2






More information about the pve-devel mailing list