socsvn commit: r287668 - in soc2015/mihai/bhyve-on-arm-head/sys: arm/arm arm/vmm modules/vmm-arm
mihai at FreeBSD.org
mihai at FreeBSD.org
Sat Jun 27 13:50:14 UTC 2015
Author: mihai
Date: Sat Jun 27 13:50:08 2015
New Revision: 287668
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=287668
Log:
soc2015: mihai: bhyve-on-arm: sys: arm: vmm: added support for VMM init and clean-up. Also enabled HYP MMU
Added:
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.h
Modified:
soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/hypervisor-stub.S
soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/locore-v6.S
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.h
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.S
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h
soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c
soc2015/mihai/bhyve-on-arm-head/sys/modules/vmm-arm/Makefile
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/hypervisor-stub.S
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/hypervisor-stub.S Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/hypervisor-stub.S Sat Jun 27 13:50:08 2015 (r287668)
@@ -6,7 +6,7 @@
#include <machine/sysreg.h>
#include <machine/cpuconf.h>
-ASENTRY_NP(_hypervisor_stub_vect_install)
+ASENTRY_NP(hypervisor_stub_vect_install)
/* If we are not in SVC mode than return */
mrs r0, cpsr
@@ -15,7 +15,7 @@
movne pc, lr
/* Install hypervisor stub vectors. */
- ldr r0, =_hypervisor_stub_vect
+ ldr r0, =hypervisor_stub_vect
mcr p15, 4, r0, c12, c0, 0 @ set HVBAR
/* Disable all the traps in the hypervisor. */
@@ -31,9 +31,9 @@
mcr p15, 4, r0, c1, c1, 1 @ HDCR
mov pc, lr
-END(_hypervisor_stub_vect_install)
+END(hypervisor_stub_vect_install)
-ASENTRY_NP(_hypervisor_stub_trap)
+ASENTRY_NP(hypervisor_stub_trap)
/*
* If the first parameter is -1 than return the
* exception vector (HVBAR), otherwise set it to
@@ -43,15 +43,15 @@
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
eret
-END(_hypervisor_stub_trap)
+END(hypervisor_stub_trap)
-
-_C_LABEL(_hypervisor_stub_vect):
+ .globl hypervisor_stub_vect
+_C_LABEL(hypervisor_stub_vect):
.word 0 /* Reset */
.word 0 /* undev */
.word 0 /* SMC */
.word 0 /* PABT */
.word 0 /* DABT */
- b _hypervisor_stub_trap /* HYP-Mode */
+ b hypervisor_stub_trap /* HYP-Mode */
.word 0 /* FIQ */
.word 0 /* IRQ */
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/locore-v6.S
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/locore-v6.S Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/arm/locore-v6.S Sat Jun 27 13:50:08 2015 (r287668)
@@ -69,7 +69,7 @@
STOP_UNWINDING /* Can't unwind into the bootloader! */
# If HYP-MODE is active, install an exception vector stub
- bl _hypervisor_stub_vect_install
+ bl hypervisor_stub_vect_install
# Return to SVC
mrs r0, cpsr
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.c Sat Jun 27 13:50:08 2015 (r287668)
@@ -17,50 +17,67 @@
#include "mmu.h"
#include "arm.h"
-static MALLOC_DEFINE(M_HYP, "ARM VMM HYP", "ARM VMM HYP");
+MALLOC_DEFINE(M_HYP, "ARM VMM HYP", "ARM VMM HYP");
-lpae_pd_entry_t hyp_l1pd[2 * LPAE_L1_ENTRIES];
-extern void _hypervisor_stub_trap(void *vect_addr);
-extern void* _init_hyp_vector;
+extern char init_hyp_vector[];
+extern char hyp_vector[];
+extern char hyp_code_start[];
+extern char hypervisor_stub_vect[];
+lpae_pd_entry_t *hyp_l1pd;
char *stack;
-char *hyp_code;
static int
arm_init(int ipinum)
{
char *stack_top;
- uint64_t *phys_hyp_l1pd;
+ lpae_vm_paddr_t phys_hyp_l1pd;
- stack = malloc(PAGE_SIZE, M_HYP);
+ stack = malloc(PAGE_SIZE, M_HYP, M_WAITOK | M_ZERO);
stack_top = stack + PAGE_SIZE;
- lpae_vmmmap_set(NULL, stack, ptophys(stack), PAGE_SIZE,
+ hyp_l1pd = malloc(2 * LPAE_L1_ENTRIES * sizeof(lpae_pd_entry_t), M_HYP, M_WAITOK | M_ZERO);
+
+ lpae_vmmmap_set(NULL,
+ (lpae_vm_vaddr_t)stack,
+ (lpae_vm_paddr_t)vtophys(stack),
+ PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE);
- printf("%s _hyp_code_start: %p\n", __func__, (void*) _hyp_code_start);
+ printf("%s hyp_code_start: %p, phys_hyp_code_start: %p\n", __func__, (void*) hyp_code_start, (void*)vtophys(hyp_code_start));
- /* Create two mappings:
+ /*
+ * Create two mappings:
* - one identity - VA == PA
* - one normal mappings to HYP pagetable
*/
- lpae_vmmmap_set(NULL, hyp_code, ptophys(_hyp_code_start), PAGE_SIZE,
+ lpae_vmmmap_set(NULL,
+ (lpae_vm_vaddr_t)hyp_code_start,
+ (lpae_vm_paddr_t)vtophys(hyp_code_start),
+ PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE);
- lpae_vmmmap_set(NULL, ptophys(_hyp_code_start), ptophys(_hyp_code_start), PAGE_SIZE,
+
+ lpae_vmmmap_set(NULL,
+ (lpae_vm_vaddr_t)vtophys(hyp_code_start),
+ (lpae_vm_paddr_t)vtophys(hyp_code_start),
+ PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE);
- /*
- * Flush all caches to be sure we have the
- * code and tables in physical memory
+ /*
+ * Flush all caches to be sure we tables in physical memory
*/
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
+ dump_lpae_mapping(NULL);
+
/*
* Install the temporary vector from which
* will do the initialization part of VMM
*/
- _hypervisor_stub_trap(vtophys(_init_hyp_vector));
+ printf("%s init_hyp_vector: %p\n", __func__, (void*) &init_hyp_vector[0]);
+
+ vmm_call_hyp((void *)vtophys(&init_hyp_vector[0]));
/*
* Special init call to activate the MMU
@@ -70,23 +87,42 @@
* - r2 - lower 32 bits for the HTTBR
* - r3 - upper 32 bits for the HTTBR
*/
- phys_hyp_l1pd = vtophys(&hyp_l1pd[0]);
- vmm_call_hyp(NULL, stack_top, LOW(phys_hyp_l1pd), HIGH(phys_hyp_l1pd);
+
+ phys_hyp_l1pd = (lpae_vm_paddr_t)vtophys(hyp_l1pd);
+
+ printf("%s hyp_l1pd: %p, phys_hyp_l1pd %p\n", __func__, (void*) hyp_l1pd, (void*)phys_hyp_l1pd);
+ vmm_call_hyp(&hyp_vector[0], stack_top, LOW(phys_hyp_l1pd), HIGH(phys_hyp_l1pd));
return 0;
}
-static void
-arm_cleanup(void *arg)
+static int
+arm_cleanup(void)
{
+ printf("%s before vmm_call_hyp\n", __func__);
+
+ vmm_call_hyp((void *) vtophys(vmm_stub_install), (void *)vtophys(&hypervisor_stub_vect[0]));
+ printf("%s after vmm_call_hyp\n", __func__);
+
+ printf("%s before freestack\n", __func__);
+
+ free(stack, M_HYP);
+ printf("%s before lpae_vmcleanup\n", __func__);
+
lpae_vmcleanup(NULL);
+ printf("%s before vmm_call_hyp\n", __func__);
+
+ free(hyp_l1pd, M_HYP);
+
+
+ return 0;
}
static void
arm_restore(void)
{
- arm_enable(NULL);
+ ;
}
static void *
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.h
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.h Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/arm.h Sat Jun 27 13:50:08 2015 (r287668)
@@ -7,5 +7,7 @@
uint64_t vmm_call_hyp(void *hyp_func_addr, ...);
-#define HIGH(x) (x >> 32)
-#define LOW(x) (x & ((1 << 32) - 1))
+void vmm_stub_install(void *hypervisor_stub_vect);
+
+#define LOW(x) (x & 0xFFFFFFFF)
+#define HIGH(x) LOW(x >> 32)
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.S
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.S Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.S Sat Jun 27 13:50:08 2015 (r287668)
@@ -6,28 +6,65 @@
#include <machine/sysreg.h>
#include <machine/cpuconf.h>
-#define SYS_WRITE0 4
+#include "hyp.h"
+
.text
- .globl _hyp_code_start
- .globl _hyp_code_end
- .globl _hyp_vector
- .globl _init_hyp_vector
+ .globl hyp_code_start
+ .globl hyp_code_end
+ .globl hyp_vector
+ .globl init_hyp_vector
.p2align 12
-_hyp_code_start:
+hyp_code_start:
__semi_call:
svc 0x123456
mov pc, lr
-ASENTRY_NP(kvm_call_hyp)
+ENTRY(vmm_call_hyp)
hvc #0
bx lr
-END(kvm_call_hyp)
+END(vmm_call_hyp)
+
+/*
+ * void vmm_stub_install(void *hypervisor_stub_vect);
+ * - r0 - the pointer to the stub vector
+ */
+ENTRY(vmm_stub_install)
+
+ /* Install hypervisor stub vectors. */
+ mcr p15, 4, r0, c12, c0, 0 @ set HVBAR
+
+ /* Disable all the traps in the hypervisor. */
+ mov r0, #0
+ mcr p15, 4, r0, c1, c1, 0 @ HCR
+ mcr p15, 4, r0, c1, c1, 2 @ HCPTR
+ mcr p15, 4, r0, c1, c1, 3 @ HSTR
+ mcr p15, 4, r0, c1, c0, 0 @ HSCTLR
+
+ /* Don't disable access to perf-mon from PL0,1 */
+ mrc p15, 4, r0, c1, c1, 1 @ HDCR
+ and r0, #0x1f @ Preserve HPMN
+ mcr p15, 4, r0, c1, c1, 1 @ HDCR
+
+ eret
+END(vmm_stub_install)
+
+ENTRY(vmm_set_get_hvbar)
+ /*
+ * If the first parameter is -1 than return the
+ * exception vector (HVBAR), otherwise set it to
+ * the value of it.
+ */
+ cmp r0, #-1
+ mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
+ mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
+ bx lr
+END(vmm_set_get_hvbar)
.align 5
-_init_hyp_vector:
+init_hyp_vector:
.word 0 /* Reset */
.word 0 /* undev */
.word 0 /* SVC */
@@ -38,29 +75,56 @@
.word 0 /* IRQ */
hyp_init_hvc:
- mov sp, r1 /* r1 contains the stack pointer */
-
- /* Find the offset between the two vectors */
- adr r0, _init_hyp_vector
- adr r1, _hyp_vector
- sub r1, r1, r0
-
- mrc p15, 4, r0, c12, c0, 0 @ get current HVBAR
- add r0, r0, r1 @ find the address of the _hyp_vector
- mcr p15, 4, r0, c12, c0, 0 @ set HVBAR to the new vector
-
- mcrr p15, 4, r2, r3, c2 @ set the HTTBR (r2 is the low word, r3 is the low word)
+ mcr p15, 4, r0, c12, c0, 0 @ set HVBAR to the new vector
+ mov sp, r1 @ set SP. r1 contains the stack pointer
+ mcrr p15, 4, r2, r3, c2 @ set the HTTBR (r2 is the low word, r3 is the low word)
isb
- @ Flush the TLB of this page
- adr r1, _hyp_code_start
- mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
+ @ Set HTCR.T0SZ=0 so x=5 (ARM man: B4.1.76)
+ @ Set HTCR.ORGN0/.IRGN0/.SH0 to 0 to disable cacheability and shareability
+ @ HTCR_MASK contains all the above bits
+ mrc p15, 4, r0, c2, c0, 2 @ HTCR
+ ldr r1,=HTCR_MASK
+ bic r0, r0, r1
+ mcr p15, 4, r0, c2, c0, 2 @ HTCR
+
+ @ TODO configure the VTCR for supporting 32/40 bit IPA
+/*
+ mrc p15, 4, r1, c2, c1, 2 @ VTCR
+ ldr r2, =VTCR_MASK
+ bic r1, r1, r2
+ bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
+ orr r1, r0, r1
+ orr r1, r1, #(VTCR_SL_L1 | VTCR_T0SZ | KVM_VTCR_S)
+ mcr p15, 4, r1, c2, c1, 2 @ VTCR
+*/
+ @ Set the HMAIR0/1 (same as MAIR0/1) registers for AttrIndx[2:0]
+ ldr r0, =HMAIR0
+ mcr p15, 4, r0, c10, c2, 0
+ ldr r0, =HMAIR1
+ mcr p15, 4, r0, c10, c2, 1
+
+ @ Flush the TLB entries from Hyp-Mode
+ mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
dsb ish
+ mrc p15, 4, r0, c1, c0, 0 @ Read current HSCTLR
+ ldr r2, =HSCTLR_MASK
+ bic r0, r0, r2
+
+ mrc p15, 0, r1, c1, c0, 0 @ Read the current SCTLR
+ ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
+ and r1, r1, r2
+ ldr r2, =(HSCTLR_M | HSCTLR_A)
+ orr r1, r1, r2
+ orr r0, r0, r1
+ isb
+ mcr p15, 4, r0, c1, c0, 0 @ Set the new HSCTLR
+
eret
.align 5
-_hyp_vector:
+hyp_vector:
b hyp_reset /* Reset */
b hyp_undef /* undef */
b hyp_svc /* SVC */
@@ -69,8 +133,8 @@
b hyp_hvc /* HYP-Mode */
b hyp_fiq /* FIQ */
b hyp_irq /* IRQ */
-
.align
+
hyp_reset:
b loop
@@ -81,6 +145,8 @@
bl __semi_call
mrs r0, ELR_hyp
b loop
+und_die_str:
+ .ascii "unexpected undefined exception in Hyp mode at r0: %#08x\n"
.align
hyp_svc:
@@ -89,16 +155,23 @@
bl __semi_call
mrs r0, ELR_hyp
b loop
- .align
+svc_die_str:
+ .ascii "unexpected HVC/SVC trap in Hyp mode at r0: %#08x\n"
+
+ .align
hyp_pabt:
mov r0, #SYS_WRITE0
adr r1, pabt_die_str
+ mov r2, lr
bl __semi_call
- mrs r0, ELR_hyp
- mrc p15, 4, r1, c5, c2, 0 @ HSR (syndrome register)
- mrc p15, 4, r2, c6, c0, 2 @ HIFAR (hyp instruction fault address)
- b loop
+// mrs r0, ELR_hyp
+// mrc p15, 4, r1, c5, c2, 0 @ HSR (syndrome register)
+// mrc p15, 4, r2, c6, c0, 2 @ HIFAR (hyp instruction fault address)
+ mov pc, r2
+# b loop
+pabt_die_str:
+ .ascii "unexpected prefetch abort in Hyp mode at r0: %#08x\n"
.align
hyp_dabt:
@@ -106,13 +179,44 @@
adr r1, dabt_die_str
bl __semi_call
mrs r0, ELR_hyp
- mrc p15, 4, r1, c5, c2, 0 @ HSR (syndrome register)
- mrc p15, 4, r2, c6, c0, 0 @ HDFAR (hyp data fault address)
+ mrc p15, 4, r1, c5, c2, 0 @ HSR (syndrome register)
+ mrc p15, 4, r2, c6, c0, 0 @ HDFAR (hyp data fault address)
b loop
+dabt_die_str:
+ .ascii "unexpected data abort in Hyp mode at r0: %#08x\n"
.align
hyp_hvc:
-
+ push {r0, r1, r2} @ Save registers in order to use them
+ mrc p15, 4, r1, c5, c2, 0 @ Check HSR for explicit HVC call
+ lsr r0, r1, #HSR_EC_SHIFT
+ cmp r0, #HSR_EC_HVC
+ bne guest_trap
+
+ mrrc p15, 6, r0, r1, c2 @ Check VMID=0 to be sure that host called HVC
+ lsr r1, r1, #VTTBR_VMID_SHIFT
+ and r1, r1, #VTTBR_VMID_MASK
+ cmp r1, #0
+ bne guest_trap
+
+host_called_hyp:
+ pop {r0, r1, r2} @ Restore registers
+ push {lr}
+ mrs lr, SPSR
+ push {lr}
+
+ mov lr, r0
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+ blx lr
+ pop {lr}
+ msr SPSR_csxf, lr
+ pop {lr}
+ eret
+
+guest_trap:
+ b loop
.align
hyp_fiq:
b loop
@@ -123,13 +227,4 @@
loop:
b loop
-und_die_str:
- .ascii "unexpected undefined exception in Hyp mode at r0: %#08x\n"
-pabt_die_str:
- .ascii "unexpected prefetch abort in Hyp mode at r0: %#08x\n"
-dabt_die_str:
- .ascii "unexpected data abort in Hyp mode at r0: %#08x\n"
-svc_die_str:
- .ascii "unexpected HVC/SVC trap in Hyp mode at r0: %#08x\n"
-
-_hyp_code_end:
+hyp_code_end:
Added: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.h
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/hyp.h Sat Jun 27 13:50:08 2015 (r287668)
@@ -0,0 +1,73 @@
+
+#define SYS_WRITE0 4
+
+#define HSR_EC_SHIFT 26
+#define HSR_EC_HVC 0x12
+#define VTTBR_VMID_SHIFT 16
+#define VTTBR_VMID_MASK 0xff
+
+/* Hyp System Control Register (HSCTLR) bits */
+#define HSCTLR_TE (1 << 30)
+#define HSCTLR_EE (1 << 25)
+#define HSCTLR_FI (1 << 21)
+#define HSCTLR_WXN (1 << 19)
+#define HSCTLR_I (1 << 12)
+#define HSCTLR_C (1 << 2)
+#define HSCTLR_A (1 << 1)
+#define HSCTLR_M (1 << 0)
+#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
+
+/* TTBCR and HTCR Registers bits */
+#define TTBCR_EAE (1 << 31)
+#define TTBCR_IMP (1 << 30)
+#define TTBCR_SH1 (3 << 28)
+#define TTBCR_ORGN1 (3 << 26)
+#define TTBCR_IRGN1 (3 << 24)
+#define TTBCR_EPD1 (1 << 23)
+#define TTBCR_A1 (1 << 22)
+#define TTBCR_T1SZ (7 << 16)
+#define TTBCR_SH0 (3 << 12)
+#define TTBCR_ORGN0 (3 << 10)
+#define TTBCR_IRGN0 (3 << 8)
+#define TTBCR_EPD0 (1 << 7)
+#define TTBCR_T0SZ (7 << 0)
+#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
+
+/* Virtualization Translation Control Register (VTCR) bits */
+#define VTCR_SH0 (3 << 12)
+#define VTCR_ORGN0 (3 << 10)
+#define VTCR_IRGN0 (3 << 8)
+#define VTCR_SL0 (3 << 6)
+#define VTCR_S (1 << 4)
+#define VTCR_T0SZ (0xf)
+#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | VTCR_S | VTCR_T0SZ)
+#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
+#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
+#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
+
+
+/*
+ * Memory region attributes for LPAE (defined in pgtable-3level.h):
+ *
+ * n = AttrIndx[2:0]
+ *
+ * n MAIR
+ * UNCACHED 000 00000000
+ * BUFFERABLE 001 01000100
+ * DEV_WC 001 01000100
+ * WRITETHROUGH 010 10101010
+ * WRITEBACK 011 11101110
+ * DEV_CACHED 011 11101110
+ * DEV_SHARED 100 00000100
+ * DEV_NONSHARED 100 00000100
+ * unused 101
+ * unused 110
+ * WRITEALLOC 111 11111111
+ */
+#define MAIR0 0xeeaa4400
+#define MAIR1 0xff000004
+#define HMAIR0 MAIR0
+#define HMAIR1 MAIR1
+
+
+
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.c Sat Jun 27 13:50:08 2015 (r287668)
@@ -19,7 +19,7 @@
#include "arm.h"
MALLOC_DECLARE(M_HYP);
-extern lpae_pd_entry_t hyp_l1pd[];
+extern lpae_pd_entry_t *hyp_l1pd;
/*
* create_lpae_mapping
* - l1pd - the level 1 address of the PD (NULL for the HYP mode PD)
@@ -57,12 +57,12 @@
l3_index = (virt_start >> LPAE_L3_SHIFT) & LPAE_L3_INDEX_MASK;
if ((virt_start & LPAE_L1_B_ADDR_MASK) == virt_start) {
- if (len > LPAE_L1_SIZE) {
+ if (len >= LPAE_L1_SIZE) {
mapped_size = LPAE_L1_SIZE;
}
}
if(!mapped_size && (virt_start & LPAE_L2_B_ADDR_MASK) == virt_start) {
- if (len > LPAE_L2_SIZE) {
+ if (len >= LPAE_L2_SIZE) {
mapped_size = LPAE_L2_SIZE;
}
}
@@ -119,7 +119,11 @@
set_prot:
if (prot != VM_PROT_NONE) {
*pd = phys_start;
- *pd |= LPAE_TYPE_BLOCK;
+ *pd |= LPAE_AF;
+ if (mapped_size == LPAE_L3_SIZE)
+ *pd |= LPAE_L3_TYPE_BLOCK;
+ else
+ *pd |= LPAE_L12_TYPE_BLOCK;
if (is_hyp_pd) { /* PL-2 stage-1 table */
if (prot & (VM_PROT_READ | VM_PROT_WRITE))
@@ -139,6 +143,41 @@
return mapped_size;
}
+void dump_lpae_mapping(void *arg)
+{
+ int i, j, k;
+ struct hyp *vm_hyp;
+ lpae_pd_entry_t *l1pd, *l1pd_shadow, *l2pd, *l2pd_shadow, *l3pd;
+
+ vm_hyp = arg;
+
+ if (arg)
+ l1pd = &vm_hyp->l1pd[0];
+ else
+ l1pd = &hyp_l1pd[0];
+
+ l1pd_shadow = &l1pd[LPAE_L1_ENTRIES];
+
+ printf("l1pd = %x\n", vtophys(l1pd));
+
+ for (i = 0; i < LPAE_L1_ENTRIES; i++) {
+ if(l1pd_shadow[i]) {
+ printf("\t %d: l2pd = %llx\n", i, l1pd[i]);
+ l2pd = (lpae_pd_entry_t *) l1pd_shadow[i];
+ l2pd_shadow = &l2pd[LPAE_L2_ENTRIES];
+ for (j = 0; j < LPAE_L2_ENTRIES; j++) {
+ if (l2pd_shadow[j]) {
+ printf("\t\t %d: l3pd = %llx\n", j, l2pd[j]);
+ l3pd = (lpae_pd_entry_t *) l2pd_shadow[j];
+ for (k = 0; k < LPAE_L3_ENTRIES; k++) {
+ if (l3pd[k])
+ printf("\t\t\t %d: l3_entry = %llx\n", k, l3pd[k]);
+ }
+ }
+ }
+ }
+ }
+}
int lpae_vmmmap_set(void *arg,
lpae_vm_vaddr_t virt_start,
lpae_vm_paddr_t phys_start,
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/mmu.h Sat Jun 27 13:50:08 2015 (r287668)
@@ -14,6 +14,7 @@
vm_prot_t prot);
void lpae_vmcleanup(void *arg);
+void dump_lpae_mapping(void *arg);
#define LPAE_NLEVELS 3
@@ -42,13 +43,17 @@
#define LPAE_L3_SIZE (1 << 12)
#define LPAE_L3_INDEX_MASK 0x1FF
-#define LPAE_TYPE_LINK 0x03
-#define LPAE_TYPE_BLOCK 0x01
-#define LPAE_TYPE_MASK 0x03 /* mask of type bits */
+#define LPAE_TYPE_LINK 0x03
+#define LPAE_L12_TYPE_BLOCK 0x01
+#define LPAE_L3_TYPE_BLOCK 0x03
+#define LPAE_TYPE_MASK 0x03 /* mask of type bits */
#define LPAE_AP_HYP_RW (0x01 << 6) /* RW permissions for PL-2 stage 1*/
#define LPAE_AP_HYP_RDONLY (0x03 << 6) /* RD permissions for PL-2 stage 1 */
#define LPAE_HAP_READ (0x01 << 6) /* read permissions for stage 2 */
#define LPAE_HAP_WRITE (0x02 << 6) /* write permissions for stage 2*/
+
+#define LPAE_AF (0x1 << 10) /* Access Flag */
+
#endif
Modified: soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/arm/vmm/vmm.c Sat Jun 27 13:50:08 2015 (r287668)
@@ -198,7 +198,7 @@
static int
vmm_init(void)
{
- ops = NULL;
+ ops = &vmm_ops_arm;
return (VMM_INIT(0));
}
@@ -218,7 +218,9 @@
case MOD_UNLOAD:
error = vmmdev_cleanup();
if (error == 0) {
- vmm_initialized = 0;
+ error = VMM_CLEANUP();
+ if (error)
+ vmm_initialized = 0;
}
break;
default:
Modified: soc2015/mihai/bhyve-on-arm-head/sys/modules/vmm-arm/Makefile
==============================================================================
--- soc2015/mihai/bhyve-on-arm-head/sys/modules/vmm-arm/Makefile Sat Jun 27 12:37:09 2015 (r287667)
+++ soc2015/mihai/bhyve-on-arm-head/sys/modules/vmm-arm/Makefile Sat Jun 27 13:50:08 2015 (r287668)
@@ -11,6 +11,7 @@
vmm_dev.c \
mmu.c \
vmm_stat.c \
+ arm.c \
hyp.S
More information about the svn-soc-all
mailing list