git: c5f477a637c2 - main - arm: replace tailq pointers with iterators
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 21 Apr 2025 04:13:06 UTC
The branch main has been updated by dougm: URL: https://cgit.FreeBSD.org/src/commit/?id=c5f477a637c28a28b13b91e6d8dac6015277c8e6 commit c5f477a637c28a28b13b91e6d8dac6015277c8e6 Author: Doug Moore <dougm@FreeBSD.org> AuthorDate: 2025-04-21 04:10:17 +0000 Commit: Doug Moore <dougm@FreeBSD.org> CommitDate: 2025-04-21 04:10:17 +0000 arm: replace tailq pointers with iterators Change architecture-specific code to use iterators rather than tailq pointers. Reviewed by: kib Differential Revision: https://reviews.freebsd.org/D49924 --- sys/arm/arm/pmap-v6.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index bb2aced34c94..fafa8908dcd1 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -124,6 +124,7 @@ #include <vm/vm_pageout.h> #include <vm/vm_phys.h> #include <vm/vm_extern.h> +#include <vm/vm_radix.h> #include <vm/vm_reserv.h> #include <sys/lock.h> #include <sys/mutex.h> @@ -4782,29 +4783,32 @@ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { + struct pctrie_iter pages; vm_offset_t va; vm_page_t m, mpt2pg; - vm_pindex_t diff, psize; PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n", __func__, pmap, start, end, m_start, prot)); VM_OBJECT_ASSERT_LOCKED(m_start->object); - psize = atop(end - start); + mpt2pg = NULL; - m = m_start; + vm_page_iter_limit_init(&pages, m_start->object, + m_start->pindex + atop(end - start)); + m = vm_radix_iter_lookup(&pages, m_start->pindex); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); - while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - va = start + ptoa(diff); + while (m != NULL) { + va = start + ptoa(m->pindex - m_start->pindex); if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end && m->psind == 1 && sp_enabled && - pmap_enter_1mpage(pmap, va, m, prot)) - m = &m[PTE1_SIZE / PAGE_SIZE - 1]; - else + pmap_enter_1mpage(pmap, va, m, prot)) { + m = vm_radix_iter_jump(&pages, NBPDR / PAGE_SIZE); + } else { mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot, mpt2pg); - m = TAILQ_NEXT(m, listq); + m = vm_radix_iter_step(&pages); + } } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); @@ -4819,6 +4823,7 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { + struct pctrie_iter pages; pt1_entry_t *pte1p; vm_paddr_t pa, pte2_pa; vm_page_t p; @@ -4831,7 +4836,8 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) { if (!vm_object_populate(object, pindex, pindex + atop(size))) return; - p = vm_page_lookup(object, pindex); + vm_page_iter_init(&pages, object); + p = vm_radix_iter_lookup(&pages, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("%s: invalid page %p", __func__, p)); pat_mode = p->md.pat_mode; @@ -4849,15 +4855,14 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, * the pages are not physically contiguous or have differing * memory attributes. */ - p = TAILQ_NEXT(p, listq); for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size; pa += PAGE_SIZE) { + p = vm_radix_iter_next(&pages); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("%s: invalid page %p", __func__, p)); if (pa != VM_PAGE_TO_PHYS(p) || pat_mode != p->md.pat_mode) return; - p = TAILQ_NEXT(p, listq); } /*