svn commit: r316761 - head/sys/arm64/arm64
Andrew Turner
andrew at FreeBSD.org
Thu Apr 13 15:03:05 UTC 2017
Author: andrew
Date: Thu Apr 13 15:03:03 2017
New Revision: 316761
URL: https://svnweb.freebsd.org/changeset/base/316761
Log:
Set the arm64 Execute-never bits in more places.
We need to set the Execute-never bits when mapping device memory as the
hardware may perform speculative instruction fetches.
Set the Privileged Execute-ever bit on userspace memory to stop the kernel
if it is tricked into executing it.
Reviewed by: kib
Sponsored by: DARPA, AFRL
Differential Revision: https://reviews.freebsd.org/D10382
Modified:
head/sys/arm64/arm64/pmap.c
Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Thu Apr 13 14:51:34 2017 (r316760)
+++ head/sys/arm64/arm64/pmap.c Thu Apr 13 15:03:03 2017 (r316761)
@@ -1127,7 +1127,7 @@ static void
pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
{
pd_entry_t *pde;
- pt_entry_t *pte;
+ pt_entry_t *pte, attr;
vm_offset_t va;
int lvl;
@@ -1138,6 +1138,10 @@ pmap_kenter(vm_offset_t sva, vm_size_t s
KASSERT((size & PAGE_MASK) == 0,
("pmap_kenter: Mapping is not page-sized"));
+ attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
+ if (mode == DEVICE_MEMORY)
+ attr |= ATTR_XN;
+
va = sva;
while (size != 0) {
pde = pmap_pde(kernel_pmap, va, &lvl);
@@ -1146,8 +1150,7 @@ pmap_kenter(vm_offset_t sva, vm_size_t s
KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
pte = pmap_l2_to_l3(pde, va);
- pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT |
- ATTR_IDX(mode) | L3_PAGE);
+ pmap_load_store(pte, (pa & ~L3_OFFSET) | attr);
PTE_SYNC(pte);
va += PAGE_SIZE;
@@ -1259,6 +1262,8 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
m = ma[i];
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
+ if (m->md.pv_memattr == DEVICE_MEMORY)
+ pa |= ATTR_XN;
pte = pmap_l2_to_l3(pde, va);
pmap_load_store(pte, pa);
PTE_SYNC(pte);
@@ -2719,12 +2724,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va,
L3_PAGE);
if ((prot & VM_PROT_WRITE) == 0)
new_l3 |= ATTR_AP(ATTR_AP_RO);
- if ((prot & VM_PROT_EXECUTE) == 0)
+ if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
new_l3 |= ATTR_XN;
if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= ATTR_SW_WIRED;
if ((va >> 63) == 0)
- new_l3 |= ATTR_AP(ATTR_AP_USER);
+ new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
@@ -3127,8 +3132,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
ATTR_AP(ATTR_AP_RO) | L3_PAGE;
- if ((prot & VM_PROT_EXECUTE) == 0)
+ if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == DEVICE_MEMORY)
pa |= ATTR_XN;
+ else if (va < VM_MAXUSER_ADDRESS)
+ pa |= ATTR_PXN;
/*
* Now validate mapping with RO protection
@@ -4263,6 +4270,8 @@ pmap_change_attr_locked(vm_offset_t va,
l3 = pmap_load(pte);
l3 &= ~ATTR_IDX_MASK;
l3 |= ATTR_IDX(mode);
+ if (mode == DEVICE_MEMORY)
+ l3 |= ATTR_XN;
pmap_update_entry(kernel_pmap, pte, l3, tmpva,
PAGE_SIZE);
More information about the svn-src-all
mailing list