svn commit: r325628 - head/sys/powerpc/booke
Justin Hibbits
jhibbits at FreeBSD.org
Fri Nov 10 04:14:50 UTC 2017
Author: jhibbits
Date: Fri Nov 10 04:14:48 2017
New Revision: 325628
URL: https://svnweb.freebsd.org/changeset/base/325628
Log:
Book-E pmap_mapdev_attr() improvements
* Check TLB1 in all mapdev cases, in case the memattr matches an existing
mapping (doesn't need to be MAP_DEFAULT).
* Fix mapping where the starting address is not a multiple of the widest size
base. For instance, it will now properly map 0xffffef000, size 0x11000 using
2 TLB entries, basing it at 0x****f000, instead of 0x***00000.
MFC after: 2 weeks
Modified:
head/sys/powerpc/booke/pmap.c
Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c Fri Nov 10 02:09:37 2017 (r325627)
+++ head/sys/powerpc/booke/pmap.c Fri Nov 10 04:14:48 2017 (r325628)
@@ -3471,16 +3471,17 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
* check whether a sequence of TLB1 entries exist that match the
* requirement, but now only checks the easy case.
*/
- if (ma == VM_MEMATTR_DEFAULT) {
- for (i = 0; i < TLB1_ENTRIES; i++) {
- tlb1_read_entry(&e, i);
- if (!(e.mas1 & MAS1_VALID))
- continue;
- if (pa >= e.phys &&
- (pa + size) <= (e.phys + e.size))
- return (void *)(e.virt +
- (vm_offset_t)(pa - e.phys));
- }
+ for (i = 0; i < TLB1_ENTRIES; i++) {
+ tlb1_read_entry(&e, i);
+ if (!(e.mas1 & MAS1_VALID))
+ continue;
+ if (pa >= e.phys &&
+ (pa + size) <= (e.phys + e.size) &&
+ (ma == VM_MEMATTR_DEFAULT ||
+ tlb_calc_wimg(pa, ma) ==
+ (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
+ return (void *)(e.virt +
+ (vm_offset_t)(pa - e.phys));
}
size = roundup(size, PAGE_SIZE);
@@ -3494,10 +3495,19 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
* With a sparse mapdev, align to the largest starting region. This
* could feasibly be optimized for a 'best-fit' alignment, but that
* calculation could be very costly.
+ * Align to the smaller of:
+ * - first set bit in overlap of (pa & size mask)
+ * - largest size envelope
+ *
+ * It's possible the device mapping may start at a PA that's not larger
+ * than the size mask, so we need to offset in to maximize the TLB entry
+ * range and minimize the number of used TLB entries.
*/
do {
tmpva = tlb1_map_base;
- va = roundup(tlb1_map_base, 1 << flsl(size));
+ sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
+ sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
+ va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
#ifdef __powerpc64__
} while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
#else
@@ -3514,6 +3524,13 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
do {
sz = 1 << (ilog2(size) & ~1);
+ /* Align size to PA */
+ if (pa % sz != 0) {
+ do {
+ sz >>= 2;
+ } while (pa % sz != 0);
+ }
+ /* Now align from there to VA */
if (va % sz != 0) {
do {
sz >>= 2;
@@ -3522,8 +3539,9 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
if (bootverbose)
printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
va, (uintmax_t)pa, sz);
- tlb1_set_entry(va, pa, sz,
- _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma));
+ if (tlb1_set_entry(va, pa, sz,
+ _TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma)) < 0)
+ return (NULL);
size -= sz;
pa += sz;
va += sz;
More information about the svn-src-head
mailing list