svn commit: r253336 - in projects/bhyve_npt_pmap/sys/amd64: include vmm vmm/amd vmm/intel
Neel Natu
neel at FreeBSD.org
Sun Jul 14 04:42:37 UTC 2013
Author: neel
Date: Sun Jul 14 04:42:35 2013
New Revision: 253336
URL: http://svnweb.freebsd.org/changeset/base/253336
Log:
Stash the 'pmap' and 'eptp' for the VM in each vcpu's vmxctx since they are
accessed in the vmresume fast path.
Set the bit corresponding to 'curcpu' in nested pmap's 'pm_active' on a
vm enter. This allow's the TLB shootdown on the host to know what cpus need
to participate.
Invalidate the guest physical mappings using 'invept' if the 'vmxctx->eptgen'
if out-of-date with the nested pmap's 'pm_eptgen'. If the 'invept' instruction
fails then vmx_launch() or vmx_resume() may return with an error code of
'VMX_RETURN_INVEPT'.
Modified:
projects/bhyve_npt_pmap/sys/amd64/include/vmm.h
projects/bhyve_npt_pmap/sys/amd64/vmm/amd/amdv.c
projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c
projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h
projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_genassym.c
projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_support.S
projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c
Modified: projects/bhyve_npt_pmap/sys/amd64/include/vmm.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/include/vmm.h Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/include/vmm.h Sun Jul 14 04:42:35 2013 (r253336)
@@ -48,7 +48,8 @@ enum x2apic_state;
typedef int (*vmm_init_func_t)(void);
typedef int (*vmm_cleanup_func_t)(void);
typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
-typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip);
+typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
+ struct pmap *pmap);
typedef void (*vmi_cleanup_func_t)(void *vmi);
typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
uint64_t *retval);
Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/amd/amdv.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/amd/amdv.c Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/amd/amdv.c Sun Jul 14 04:42:35 2013 (r253336)
@@ -62,7 +62,7 @@ amdv_vminit(struct vm *vm, struct pmap *
}
static int
-amdv_vmrun(void *arg, int vcpu, register_t rip)
+amdv_vmrun(void *arg, int vcpu, register_t rip, struct pmap *pmap)
{
printf("amdv_vmrun: not implemented\n");
Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.c Sun Jul 14 04:42:35 2013 (r253336)
@@ -791,6 +791,9 @@ vmx_vminit(struct vm *vm, pmap_t pmap)
error = vmx_setup_cr4_shadow(&vmx->vmcs[i]);
if (error != 0)
panic("vmx_setup_cr4_shadow %d", error);
+
+ vmx->ctx[i].pmap = pmap;
+ vmx->ctx[i].eptp = vmx->eptp;
}
return (vmx);
@@ -1384,7 +1387,7 @@ vmx_exit_process(struct vmx *vmx, int vc
}
static int
-vmx_run(void *arg, int vcpu, register_t rip)
+vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
{
int error, vie, rc, handled, astpending;
uint32_t exit_reason;
@@ -1392,7 +1395,7 @@ vmx_run(void *arg, int vcpu, register_t
struct vmxctx *vmxctx;
struct vmcs *vmcs;
struct vm_exit *vmexit;
-
+
vmx = arg;
vmcs = &vmx->vmcs[vcpu];
vmxctx = &vmx->ctx[vcpu];
@@ -1401,6 +1404,11 @@ vmx_run(void *arg, int vcpu, register_t
astpending = 0;
vmexit = vm_exitinfo(vmx->vm, vcpu);
+ KASSERT(vmxctx->pmap == pmap,
+ ("pmap %p different than ctx pmap %p" pmap, vmxctx->pmap));
+ KASSERT(vmxctx->eptp == vmx->eptp,
+ ("eptp %#lx different than ctx eptp %#lx", eptp, vmxctx->eptp));
+
/*
* XXX Can we avoid doing this every time we do a vm run?
*/
@@ -1463,6 +1471,9 @@ vmx_run(void *arg, int vcpu, register_t
vmxctx->launch_error, vie);
#endif
goto err_exit;
+ case VMX_RETURN_INVEPT:
+ panic("vm %s:%d invept error %d",
+ vm_name(vmx->vm), vcpu, vmxctx->launch_error);
default:
panic("vmx_setjmp returned %d", rc);
}
Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx.h Sun Jul 14 04:42:35 2013 (r253336)
@@ -31,6 +31,8 @@
#include "vmcs.h"
+struct pmap;
+
#define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
struct vmxctx {
@@ -68,6 +70,15 @@ struct vmxctx {
int launched; /* vmcs launch state */
int launch_error;
+
+ long eptgen; /* cached pmap->pm_eptgen */
+
+ /*
+ * The 'eptp' and the 'pmap' do not change during the lifetime of
+ * the VM so it is safe to keep a copy in each vcpu's vmxctx.
+ */
+ vm_paddr_t eptp;
+ struct pmap *pmap;
};
struct vmxcap {
@@ -100,6 +111,7 @@ CTASSERT((offsetof(struct vmx, guest_msr
#define VMX_RETURN_VMRESUME 2
#define VMX_RETURN_VMLAUNCH 3
#define VMX_RETURN_AST 4
+#define VMX_RETURN_INVEPT 5
/*
* vmx_setjmp() returns:
* - 0 when it returns directly
@@ -107,6 +119,7 @@ CTASSERT((offsetof(struct vmx, guest_msr
* - 2 when it returns from vmx_resume (which would only be in the error case)
* - 3 when it returns from vmx_launch (which would only be in the error case)
* - 4 when it returns from vmx_resume or vmx_launch because of AST pending
+ * - 5 when it returns from vmx_launch/vmx_resume because of invept error
*/
int vmx_setjmp(struct vmxctx *ctx);
void vmx_longjmp(void); /* returns via vmx_setjmp */
Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_genassym.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_genassym.c Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_genassym.c Sun Jul 14 04:42:35 2013 (r253336)
@@ -72,6 +72,10 @@ ASSYM(VMXCTX_HOST_RBX, offsetof(struct v
ASSYM(VMXCTX_HOST_RIP, offsetof(struct vmxctx, host_rip));
ASSYM(VMXCTX_LAUNCH_ERROR, offsetof(struct vmxctx, launch_error));
+ASSYM(VMXCTX_EPTGEN, offsetof(struct vmxctx, eptgen));
+
+ASSYM(VMXCTX_PMAP, offsetof(struct vmxctx, pmap));
+ASSYM(VMXCTX_EPTP, offsetof(struct vmxctx, eptp));
ASSYM(VM_SUCCESS, VM_SUCCESS);
ASSYM(VM_FAIL_INVALID, VM_FAIL_INVALID);
@@ -82,8 +86,13 @@ ASSYM(VMX_RETURN_LONGJMP, VMX_RETURN_LON
ASSYM(VMX_RETURN_VMRESUME, VMX_RETURN_VMRESUME);
ASSYM(VMX_RETURN_VMLAUNCH, VMX_RETURN_VMLAUNCH);
ASSYM(VMX_RETURN_AST, VMX_RETURN_AST);
+ASSYM(VMX_RETURN_INVEPT, VMX_RETURN_INVEPT);
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid));
+
+ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
+ASSYM(PM_EPTGEN, offsetof(struct pmap, pm_eptgen));
Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_support.S
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_support.S Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/intel/vmx_support.S Sun Jul 14 04:42:35 2013 (r253336)
@@ -30,6 +30,12 @@
#include "vmx_assym.s"
+#ifdef SMP
+#define LK lock ;
+#else
+#define LK
+#endif
+
/*
* Disable interrupts before updating %rsp in VMX_CHECK_AST or
* VMX_GUEST_RESTORE.
@@ -96,6 +102,58 @@
2: movl $VM_SUCCESS,reg; \
3: movl reg,VMXCTX_LAUNCH_ERROR(%rsp)
+/*
+ * set or clear the appropriate bit in 'pm_active'
+ * %rdi = vmxctx
+ * %rax, %r11 = scratch registers
+ */
+#define VMX_SET_PM_ACTIVE \
+ movq VMXCTX_PMAP(%rdi), %r11; \
+ movl PCPU(CPUID), %eax; \
+ LK btsl %eax, PM_ACTIVE(%r11)
+
+#define VMX_CLEAR_PM_ACTIVE \
+ movq VMXCTX_PMAP(%rdi), %r11; \
+ movl PCPU(CPUID), %eax; \
+ LK btrl %eax, PM_ACTIVE(%r11)
+
+/*
+ * If 'vmxctx->eptgen' is not identical to 'pmap->pm_eptgen' then
+ * we must invalidate all mappings associated with this eptp.
+ *
+ * %rdi = vmxctx
+ * %rax, %r11 = scratch registers
+ */
+#define VMX_CHECK_EPTGEN \
+ movq VMXCTX_PMAP(%rdi), %r11; \
+ movq PM_EPTGEN(%r11), %rax; \
+ cmpq %rax, VMXCTX_EPTGEN(%rdi); \
+ je 9f; \
+ \
+ /* Refresh 'vmxctx->eptgen' */ \
+ movq %rax, VMXCTX_EPTGEN(%rdi); \
+ \
+ /* Setup the invept descriptor at the top of tmpstk */ \
+ mov %rdi, %r11; \
+ addq $VMXCTX_TMPSTKTOP, %r11; \
+ movq VMXCTX_EPTP(%rdi), %rax; \
+ movq %rax, -16(%r11); \
+ movq $0x0, -8(%r11); \
+ mov $0x1, %eax; /* Single context invalidate */ \
+ invept -16(%r11), %rax; \
+ \
+ /* Check for invept error */ \
+ VM_INSTRUCTION_ERROR(%eax); \
+ testl %eax, %eax; \
+ jz 9f; \
+ \
+ /* Return via vmx_setjmp with retval of VMX_RETURN_INVEPT */ \
+ movq $VMX_RETURN_INVEPT, %rsi; \
+ movq %rdi,%rsp; \
+ addq $VMXCTX_TMPSTKTOP, %rsp; \
+ callq vmx_return; \
+9: ;
+
.text
/*
* int vmx_setjmp(ctxp)
@@ -129,6 +187,9 @@ END(vmx_setjmp)
* Return to vmm context through vmx_setjmp() with a value of 'retval'.
*/
ENTRY(vmx_return)
+ /* The pmap is no longer active on the host cpu */
+ VMX_CLEAR_PM_ACTIVE
+
/* Restore host context. */
movq VMXCTX_HOST_R15(%rdi),%r15
movq VMXCTX_HOST_R14(%rdi),%r14
@@ -193,6 +254,10 @@ ENTRY(vmx_resume)
VMX_CHECK_AST
+ VMX_SET_PM_ACTIVE /* This vcpu is now active on the host cpu */
+
+ VMX_CHECK_EPTGEN /* Check if we have to invalidate TLB */
+
/*
* Restore guest state that is not automatically loaded from the vmcs.
*/
@@ -225,6 +290,10 @@ ENTRY(vmx_launch)
VMX_CHECK_AST
+ VMX_SET_PM_ACTIVE /* This vcpu is now active on the host cpu */
+
+ VMX_CHECK_EPTGEN /* Check if we have to invalidate TLB */
+
/*
* Restore guest state that is not automatically loaded from the vmcs.
*/
Modified: projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c Sun Jul 14 03:55:31 2013 (r253335)
+++ projects/bhyve_npt_pmap/sys/amd64/vmm/vmm.c Sun Jul 14 04:42:35 2013 (r253336)
@@ -126,8 +126,8 @@ static struct vmm_ops *ops;
#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
-#define VMRUN(vmi, vcpu, rip) \
- (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip) : ENXIO)
+#define VMRUN(vmi, vcpu, rip, pmap) \
+ (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap) : ENXIO)
#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
#define VMSPACE_ALLOC(min, max) \
(ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
@@ -759,7 +759,6 @@ vm_handle_paging(struct vm *vm, int vcpu
return (0);
}
-
static int
vm_handle_inst_emul(struct vm *vm, int vcpuid, boolean_t *retu)
{
@@ -814,18 +813,23 @@ vm_run(struct vm *vm, struct vm_run *vmr
uint64_t tscval, rip;
struct vm_exit *vme;
boolean_t retu;
+ pmap_t pmap;
vcpuid = vmrun->cpuid;
if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
return (EINVAL);
+ pmap = vmspace_pmap(vm->vmspace);
vcpu = &vm->vcpu[vcpuid];
vme = &vcpu->exitinfo;
rip = vmrun->rip;
restart:
critical_enter();
+ KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
+ ("vm_run: absurd pm_active"));
+
tscval = rdtsc();
pcb = PCPU_GET(curpcb);
@@ -836,7 +840,7 @@ restart:
vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
vcpu->hostcpu = curcpu;
- error = VMRUN(vm->cookie, vcpuid, rip);
+ error = VMRUN(vm->cookie, vcpuid, rip, pmap);
vcpu->hostcpu = NOCPU;
vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
More information about the svn-src-projects
mailing list