svn commit: r344202 - head/sys/powerpc/booke
Justin Hibbits
jhibbits at FreeBSD.org
Sat Feb 16 04:16:11 UTC 2019
Author: jhibbits
Date: Sat Feb 16 04:16:10 2019
New Revision: 344202
URL: https://svnweb.freebsd.org/changeset/base/344202
Log:
powerpc/booke: Use DMAP where possible for page copy and zeroing
This avoids several locks and pmap_kenter()'s, improving performance
marginally.
MFC after: 2 weeks
Modified:
head/sys/powerpc/booke/pmap.c
Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c Sat Feb 16 03:49:48 2019 (r344201)
+++ head/sys/powerpc/booke/pmap.c Sat Feb 16 04:16:10 2019 (r344202)
@@ -2973,14 +2973,19 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int o
/* XXX KASSERT off and size are within a single page? */
- mtx_lock(&zero_page_mutex);
- va = zero_page_va;
+ if (hw_direct_map) {
+ va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t)va + off, size);
+ } else {
+ mtx_lock(&zero_page_mutex);
+ va = zero_page_va;
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
- bzero((caddr_t)va + off, size);
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t)va + off, size);
+ mmu_booke_kremove(mmu, va);
- mtx_unlock(&zero_page_mutex);
+ mtx_unlock(&zero_page_mutex);
+ }
}
/*
@@ -2991,15 +2996,23 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
{
vm_offset_t off, va;
- mtx_lock(&zero_page_mutex);
- va = zero_page_va;
+ if (hw_direct_map) {
+ va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+ } else {
+ va = zero_page_va;
+ mtx_lock(&zero_page_mutex);
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+ mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+ }
+
for (off = 0; off < PAGE_SIZE; off += cacheline_size)
__asm __volatile("dcbz 0,%0" :: "r"(va + off));
- mmu_booke_kremove(mmu, va);
- mtx_unlock(&zero_page_mutex);
+ if (!hw_direct_map) {
+ mmu_booke_kremove(mmu, va);
+
+ mtx_unlock(&zero_page_mutex);
+ }
}
/*
@@ -3015,13 +3028,20 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t
sva = copy_page_src_va;
dva = copy_page_dst_va;
- mtx_lock(©_page_mutex);
- mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
- mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
+ if (hw_direct_map) {
+ sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
+ dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
+ } else {
+ mtx_lock(©_page_mutex);
+ mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
+ mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
+ }
memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
- mmu_booke_kremove(mmu, dva);
- mmu_booke_kremove(mmu, sva);
- mtx_unlock(©_page_mutex);
+ if (!hw_direct_map) {
+ mmu_booke_kremove(mmu, dva);
+ mmu_booke_kremove(mmu, sva);
+ mtx_unlock(©_page_mutex);
+ }
}
static inline void
@@ -3032,26 +3052,31 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offs
vm_offset_t a_pg_offset, b_pg_offset;
int cnt;
- mtx_lock(©_page_mutex);
- while (xfersize > 0) {
- a_pg_offset = a_offset & PAGE_MASK;
- cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
- mmu_booke_kenter(mmu, copy_page_src_va,
- VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
- a_cp = (char *)copy_page_src_va + a_pg_offset;
- b_pg_offset = b_offset & PAGE_MASK;
- cnt = min(cnt, PAGE_SIZE - b_pg_offset);
- mmu_booke_kenter(mmu, copy_page_dst_va,
- VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
- b_cp = (char *)copy_page_dst_va + b_pg_offset;
- bcopy(a_cp, b_cp, cnt);
- mmu_booke_kremove(mmu, copy_page_dst_va);
- mmu_booke_kremove(mmu, copy_page_src_va);
- a_offset += cnt;
- b_offset += cnt;
- xfersize -= cnt;
+ if (hw_direct_map) {
+ bcopy((caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ma)) + a_offset,
+ (caddr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*mb)), xfersize);
+ } else {
+ mtx_lock(©_page_mutex);
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_src_va,
+ VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+ a_cp = (char *)copy_page_src_va + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_dst_va,
+ VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+ b_cp = (char *)copy_page_dst_va + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ mmu_booke_kremove(mmu, copy_page_dst_va);
+ mmu_booke_kremove(mmu, copy_page_src_va);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ mtx_unlock(©_page_mutex);
}
- mtx_unlock(©_page_mutex);
}
static vm_offset_t
@@ -3064,6 +3089,9 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
paddr = VM_PAGE_TO_PHYS(m);
+ if (hw_direct_map)
+ return (PHYS_TO_DMAP(paddr));
+
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
flags |= PTE_PS_4KB;
@@ -3096,6 +3124,9 @@ static void
mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
pte_t *pte;
+
+ if (hw_direct_map)
+ return;
pte = pte_find(mmu, kernel_pmap, addr);
More information about the svn-src-all
mailing list