socsvn commit: r268995 - in soc2014/mihai/bhyve-icache-head/sys/amd64: include vmm
mihai at FreeBSD.org
mihai at FreeBSD.org
Tue Jun 3 08:42:08 UTC 2014
Author: mihai
Date: Tue Jun 3 08:42:06 2014
New Revision: 268995
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=268995
Log:
sys: amd64: vmm: vmm_instruction_cache.c: protect each page of the pagetable pointing to the cached rip
Modified:
soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c
soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c
Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h Tue Jun 3 07:42:51 2014 (r268994)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h Tue Jun 3 08:42:06 2014 (r268995)
@@ -29,11 +29,12 @@
struct vm;
struct vie;
+enum vie_paging_mode;
int vmm_inst_cache_init(void);
int vmm_inst_cache_cleanup(void);
int vm_inst_cache_add(struct vm *vm, uint64_t rip, uint64_t cr3,
- struct vie *vie);
+ enum vie_paging_mode paging_mode, struct vie *vie);
int vm_inst_cache_lookup(struct vm *vm, uint64_t rip, uint64_t cr3,
struct vie *vie);
int vm_inst_cache_delete(struct vm *vm, uint64_t rip, uint64_t cr3);
Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c Tue Jun 3 07:42:51 2014 (r268994)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm.c Tue Jun 3 08:42:06 2014 (r268995)
@@ -1142,7 +1142,7 @@
struct vcpu *vcpu;
struct vm_exit *vme;
int error, inst_length;
- uint64_t rip, gla, gpa, cr3, gpa_page;
+ uint64_t rip, gla, gpa, cr3;
enum vie_cpu_mode cpu_mode;
enum vie_paging_mode paging_mode;
mem_region_read_t mread;
@@ -1173,13 +1173,8 @@
if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, vie) != 0)
return (EFAULT);
- /* Write protect the page associated with gpa */
- gpa_page = trunc_page(gpa);
- vm_map_protect(&vm->vmspace->vm_map, gpa_page , gpa_page + PAGE_SIZE,
- VM_PROT_READ | VM_PROT_EXECUTE, 0);
-
/* Cache decoded instruction for further use */
- vm_inst_cache_add(vm, rip, cr3, vie);
+ vm_inst_cache_add(vm, rip, cr3, paging_mode, vie);
}
/* return to userland unless this is an in-kernel emulated device */
Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c Tue Jun 3 07:42:51 2014 (r268994)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c Tue Jun 3 08:42:06 2014 (r268995)
@@ -40,11 +40,15 @@
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_map.h>
#include <machine/vmparam.h>
#include <machine/vmm.h>
#include <machine/vmm_instruction_cache.h>
+#define PT_NLEVELS 10
+#define GB (1024 * 1024 * 1024)
+
/* Instruction caching */
struct vie_cached;
@@ -56,6 +60,10 @@
struct vm *vm;
uint64_t rip;
uint64_t cr3;
+ /* pages to be write-protected */
+ uint64_t pages[PT_NLEVELS];
+ uint64_t pages_mask[PT_NLEVELS];
+ uint8_t pages_count;
/* value */
struct vie vie;
LIST_ENTRY(vie_cached) vie_link;
@@ -143,9 +151,145 @@
SYSCTL_PROC(_hw_vmm, OID_AUTO, instruction_cache, CTLTYPE_INT | CTLFLAG_RW,
0, 0, sysctl_vmm_cached_instruction, "I", "");
+static int
+inst_cache_protect(struct vm *vm, uint64_t rip, uint64_t ptpphys,
+ enum vie_paging_mode paging_mode,
+ struct vie_cached *vie_cached)
+{
+ int nlevels, ptpshift, ptpindex;
+ uint64_t *ptpbase, pte, pgsize;
+ uint32_t *ptpbase32, pte32, i;
+ void *cookie;
+ uint64_t gla;
+
+ gla = rip;
+
+ if (paging_mode == PAGING_MODE_FLAT) {
+ /* If paging is not active we cannot protect the caching */
+ goto error;
+ }
+
+ if (paging_mode == PAGING_MODE_32) {
+ nlevels = 2;
+ while (--nlevels >= 0) {
+ /* Zero out the lower 12 bits. */
+ ptpphys &= ~0xfff;
+
+ vie_cached->pages_mask[vie_cached->pages_count] = (1 << 12) - 1;
+ vie_cached->pages[vie_cached->pages_count++] = ptpphys;
+
+ ptpbase32 = vm_gpa_hold(vm, ptpphys, PAGE_SIZE,
+ VM_PROT_READ, &cookie);
+
+ if (ptpbase32 == NULL)
+ goto error;
+
+ ptpshift = PAGE_SHIFT + nlevels * 10;
+ ptpindex = (gla >> ptpshift) & 0x3FF;
+ pgsize = 1UL << ptpshift;
+
+ pte32 = ptpbase32[ptpindex];
+
+ vm_gpa_release(cookie);
+
+ if ((pte32 & PG_V) == 0)
+ goto error;
+
+ if (pte32 & PG_PS)
+ break;
+
+ ptpphys = pte32;
+ }
+
+ /* Zero out the lower 'ptpshift' bits */
+ pte32 >>= ptpshift; pte32 <<= ptpshift;
+
+ vie_cached->pages_mask[vie_cached->pages_count] = (1 << ptpshift) - 1;
+ vie_cached->pages[vie_cached->pages_count++] = pte32;
+
+ goto protect;
+ }
+
+ if (paging_mode == PAGING_MODE_PAE) {
+ /* Zero out the lower 5 bits and the upper 12 bits */
+ ptpphys >>= 5; ptpphys <<= 17; ptpphys >>= 12;
+
+ vie_cached->pages_mask[vie_cached->pages_count] = (1 << 5) - 1;
+ vie_cached->pages[vie_cached->pages_count++] = ptpphys;
+
+ ptpbase = vm_gpa_hold(vm, ptpphys, sizeof(*ptpbase) * 4,
+ VM_PROT_READ, &cookie);
+ if (ptpbase == NULL)
+ goto error;
+
+ ptpindex = (gla >> 30) & 0x3;
+
+ pte = ptpbase[ptpindex];
+
+ vm_gpa_release(cookie);
+
+ if ((pte & PG_V) == 0)
+ goto error;
+
+ ptpphys = pte;
+
+ nlevels = 2;
+ } else
+ nlevels = 4;
+ while (--nlevels >= 0) {
+ /* Zero out the lower 12 bits and the upper 12 bits */
+ ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
+ vie_cached->pages_mask[vie_cached->pages_count] = (1 << 12) - 1;
+ vie_cached->pages[vie_cached->pages_count++] = ptpphys;
+
+ ptpbase = vm_gpa_hold(vm, ptpphys, PAGE_SIZE, VM_PROT_READ,
+ &cookie);
+ if (ptpbase == NULL)
+ goto error;
+
+ ptpshift = PAGE_SHIFT + nlevels * 9;
+ ptpindex = (gla >> ptpshift) & 0x1FF;
+ pgsize = 1UL << ptpshift;
+
+ pte = ptpbase[ptpindex];
+
+ vm_gpa_release(cookie);
+
+ if ((pte & PG_V) == 0)
+ goto error;
+
+ if (pte & PG_PS) {
+ if (pgsize > 1 * GB)
+ goto error;
+ else
+ break;
+ }
+
+ ptpphys = pte;
+ }
+
+ /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
+ pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
+
+ vie_cached->pages_mask[vie_cached->pages_count] = (1 << ptpshift) - 1;
+ vie_cached->pages[vie_cached->pages_count++] = ptpphys;
+
+protect:
+ i=0;
+ for (i = 0; i < vie_cached->pages_count; i++) {
+ vm_map_protect(&(vm_get_vmspace(vm)->vm_map), vie_cached->pages[i],
+ vie_cached->pages[i] + vie_cached->pages_mask[i] + 1,
+ VM_PROT_READ | VM_PROT_EXECUTE, 0);
+ }
+ return (0);
+
+error:
+ return (-1);
+}
int
vm_inst_cache_add(struct vm *vm, uint64_t rip, uint64_t cr3,
+ enum vie_paging_mode paging_mode,
struct vie *vie)
{
struct vie_cached *vie_cached;
@@ -162,11 +306,17 @@
vie_cached->cr3 = cr3;
bcopy(vie, &vie_cached->vie, sizeof(struct vie));
- hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
+ if (inst_cache_protect(vm, rip, cr3, paging_mode, vie_cached)) {
+ free(vie_cached, M_VIECACHED);
+ return -1;
+ }
+
+ hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
rm_wlock(&vie_cached_hash[hash].vie_cached_lock);
LIST_INSERT_HEAD(&vie_cached_hash[hash].vie_cached_head, vie_cached, vie_link);
rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
+
return (0);
}
@@ -201,17 +351,15 @@
}
int
-vm_inst_cache_delete(struct vm *vm, uint64_t rip, uint64_t cr3)
+vm_inst_cache_delete(struct vm *vm, uint64_t fault_address, uint64_t cr3)
{
struct vie_cached *vie_cached;
- int hash;
- uint64_t rip_page;
+ int hash, i;
/* Check to see if caching is enabled */
if (!vmm_cached_instruction_enable)
return (0);
- rip_page = trunc_page(rip);
hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
@@ -219,16 +367,24 @@
LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) {
if (vie_cached->vm == vm &&
- trunc_page(vie_cached->rip) == rip_page &&
vie_cached->cr3 == cr3)
{
- /* Remove the RIP found and continue searching */
- LIST_REMOVE(vie_cached, vie_link);
- /* Free the removed node */
- free(vie_cached, M_VIECACHED);
+ for (i = 0; i < vie_cached->pages_count; i++)
+ {
+ if ((fault_address & ~(vie_cached->pages_mask[i]))
+ == vie_cached->pages[i])
+ {
+ /* Remove the RIP found and continue searching */
+ LIST_REMOVE(vie_cached, vie_link);
+ /* Free the removed node */
+ free(vie_cached, M_VIECACHED);
+ break;
+ }
+ }
}
}
rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
+
return (0);
}
More information about the svn-soc-all
mailing list