svn commit: r284712 - head/sys/amd64/vmm/amd
Neel Natu
neel at FreeBSD.org
Tue Jun 23 02:17:25 UTC 2015
Author: neel
Date: Tue Jun 23 02:17:23 2015
New Revision: 284712
URL: https://svnweb.freebsd.org/changeset/base/284712
Log:
Restore the host's GS.base before returning from 'svm_launch()'.
Previously this was done by the caller of 'svm_launch()' after it returned.
This works fine as long as no code is executed in the interim that depends
on pcpu data.
The dtrace probe 'fbt:vmm:svm_launch:return' broke this assumption because
it calls 'dtrace_probe()' which in turn relies on pcpu data.
Reported by: avg
MFC after: 1 week
Modified:
head/sys/amd64/vmm/amd/svm.c
head/sys/amd64/vmm/amd/svm.h
head/sys/amd64/vmm/amd/svm_genassym.c
head/sys/amd64/vmm/amd/svm_support.S
Modified: head/sys/amd64/vmm/amd/svm.c
==============================================================================
--- head/sys/amd64/vmm/amd/svm.c Mon Jun 22 22:16:06 2015 (r284711)
+++ head/sys/amd64/vmm/amd/svm.c Tue Jun 23 02:17:23 2015 (r284712)
@@ -1916,7 +1916,6 @@ svm_vmrun(void *arg, int vcpu, register_
struct vlapic *vlapic;
struct vm *vm;
uint64_t vmcb_pa;
- u_int thiscpu;
int handled;
svm_sc = arg;
@@ -1928,19 +1927,10 @@ svm_vmrun(void *arg, int vcpu, register_
vmexit = vm_exitinfo(vm, vcpu);
vlapic = vm_lapic(vm, vcpu);
- /*
- * Stash 'curcpu' on the stack as 'thiscpu'.
- *
- * The per-cpu data area is not accessible until MSR_GSBASE is restored
- * after the #VMEXIT. Since VMRUN is executed inside a critical section
- * 'curcpu' and 'thiscpu' are guaranteed to identical.
- */
- thiscpu = curcpu;
-
gctx = svm_get_guest_regctx(svm_sc, vcpu);
vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
- if (vcpustate->lastcpu != thiscpu) {
+ if (vcpustate->lastcpu != curcpu) {
/*
* Force new ASID allocation by invalidating the generation.
*/
@@ -1961,7 +1951,7 @@ svm_vmrun(void *arg, int vcpu, register_
* This works for now but any new side-effects of vcpu
* migration should take this case into account.
*/
- vcpustate->lastcpu = thiscpu;
+ vcpustate->lastcpu = curcpu;
vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
}
@@ -2007,14 +1997,14 @@ svm_vmrun(void *arg, int vcpu, register_
svm_inj_interrupts(svm_sc, vcpu, vlapic);
- /* Activate the nested pmap on 'thiscpu' */
- CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active);
+ /* Activate the nested pmap on 'curcpu' */
+ CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active);
/*
* Check the pmap generation and the ASID generation to
* ensure that the vcpu does not use stale TLB mappings.
*/
- check_asid(svm_sc, vcpu, pmap, thiscpu);
+ check_asid(svm_sc, vcpu, pmap, curcpu);
ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
vcpustate->dirty = 0;
@@ -2022,23 +2012,9 @@ svm_vmrun(void *arg, int vcpu, register_
/* Launch Virtual Machine. */
VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
- svm_launch(vmcb_pa, gctx);
-
- CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
+ svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]);
- /*
- * Restore MSR_GSBASE to point to the pcpu data area.
- *
- * Note that accesses done via PCPU_GET/PCPU_SET will work
- * only after MSR_GSBASE is restored.
- *
- * Also note that we don't bother restoring MSR_KGSBASE
- * since it is not used in the kernel and will be restored
- * when the VMRUN ioctl returns to userspace.
- */
- wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]);
- KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch",
- thiscpu, curcpu));
+ CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
/*
* The host GDTR and IDTR is saved by VMRUN and restored
Modified: head/sys/amd64/vmm/amd/svm.h
==============================================================================
--- head/sys/amd64/vmm/amd/svm.h Mon Jun 22 22:16:06 2015 (r284711)
+++ head/sys/amd64/vmm/amd/svm.h Tue Jun 23 02:17:23 2015 (r284712)
@@ -29,6 +29,8 @@
#ifndef _SVM_H_
#define _SVM_H_
+struct pcpu;
+
/*
* Guest register state that is saved outside the VMCB.
*/
@@ -49,6 +51,6 @@ struct svm_regctx {
register_t sctx_r15;
};
-void svm_launch(uint64_t pa, struct svm_regctx *);
+void svm_launch(uint64_t pa, struct svm_regctx *gctx, struct pcpu *pcpu);
#endif /* _SVM_H_ */
Modified: head/sys/amd64/vmm/amd/svm_genassym.c
==============================================================================
--- head/sys/amd64/vmm/amd/svm_genassym.c Mon Jun 22 22:16:06 2015 (r284711)
+++ head/sys/amd64/vmm/amd/svm_genassym.c Tue Jun 23 02:17:23 2015 (r284712)
@@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/assym.h>
+#include <x86/specialreg.h>
#include "svm.h"
@@ -46,3 +47,4 @@ ASSYM(SCTX_R12, offsetof(struct svm_regc
ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13));
ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14));
ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15));
+ASSYM(MSR_GSBASE, MSR_GSBASE);
Modified: head/sys/amd64/vmm/amd/svm_support.S
==============================================================================
--- head/sys/amd64/vmm/amd/svm_support.S Mon Jun 22 22:16:06 2015 (r284711)
+++ head/sys/amd64/vmm/amd/svm_support.S Tue Jun 23 02:17:23 2015 (r284712)
@@ -42,13 +42,17 @@
#define VMSAVE .byte 0x0f, 0x01, 0xdb
/*
- * svm_launch(uint64_t vmcb, struct svm_regctx *gctx)
+ * svm_launch(uint64_t vmcb, struct svm_regctx *gctx, struct pcpu *pcpu)
* %rdi: physical address of VMCB
* %rsi: pointer to guest context
+ * %rdx: pointer to the pcpu data
*/
ENTRY(svm_launch)
VENTER
+ /* save pointer to the pcpu data */
+ push %rdx
+
/*
* Host register state saved across a VMRUN.
*
@@ -116,6 +120,13 @@ ENTRY(svm_launch)
pop %r12
pop %rbx
+ /* Restore %GS.base to point to the host's pcpu data */
+ pop %rdx
+ mov %edx, %eax
+ shr $32, %rdx
+ mov $MSR_GSBASE, %ecx
+ wrmsr
+
VLEAVE
ret
END(svm_launch)
More information about the svn-src-all
mailing list