git: 4961faaacc07 - main - pmap_{un}map_io_transient: Use bool instead of boolean_t.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Thu, 04 May 2023 19:34:20 UTC
The branch main has been updated by jhb: URL: https://cgit.FreeBSD.org/src/commit/?id=4961faaacc07d5bc71f28131229b1184d0d5ca9f commit 4961faaacc07d5bc71f28131229b1184d0d5ca9f Author: John Baldwin <jhb@FreeBSD.org> AuthorDate: 2023-05-04 19:29:48 +0000 Commit: John Baldwin <jhb@FreeBSD.org> CommitDate: 2023-05-04 19:29:48 +0000 pmap_{un}map_io_transient: Use bool instead of boolean_t. Reviewed by: imp, kib Differential Revision: https://reviews.freebsd.org/D39920 --- sys/amd64/amd64/pmap.c | 24 ++++++++++++------------ sys/amd64/amd64/uio_machdep.c | 12 ++++++------ sys/amd64/include/pmap.h | 4 ++-- sys/arm64/arm64/pmap.c | 22 +++++++++++----------- sys/arm64/arm64/uio_machdep.c | 12 ++++++------ sys/arm64/include/pmap.h | 4 ++-- sys/dev/cxgbe/cxgbei/icl_cxgbei.c | 12 ++++++------ sys/riscv/include/pmap.h | 4 ++-- sys/riscv/riscv/pmap.c | 22 +++++++++++----------- sys/riscv/riscv/uio_machdep.c | 12 ++++++------ 10 files changed, 64 insertions(+), 64 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 1009736472dc..273c768559e1 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -10365,19 +10365,19 @@ done: * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. - * \param can_fault TRUE if the thread using the mapped pages can take - * page faults, FALSE otherwise. + * \param can_fault true if the thread using the mapped pages can take + * page faults, false otherwise. * - * \returns TRUE if the caller must call pmap_unmap_io_transient when - * finished or FALSE otherwise. + * \returns true if the caller must call pmap_unmap_io_transient when + * finished or false otherwise. * */ -boolean_t +bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; - boolean_t needs_mapping; + bool needs_mapping; pt_entry_t *pte; int cache_bits, error __unused, i; @@ -10385,14 +10385,14 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ - needs_mapping = FALSE; + needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(paddr >= dmaplimit)) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); - needs_mapping = TRUE; + needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } @@ -10400,7 +10400,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) - return (FALSE); + return (false); /* * NB: The sequence of updating a page table followed by accesses @@ -10426,7 +10426,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, } else { pte = vtopte(vaddr[i]); cache_bits = pmap_cache_bits(kernel_pmap, - page[i]->md.pat_mode, 0); + page[i]->md.pat_mode, false); pte_store(pte, paddr | X86_PG_RW | X86_PG_V | cache_bits); pmap_invlpg(kernel_pmap, vaddr[i]); @@ -10439,7 +10439,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; int i; diff --git a/sys/amd64/amd64/uio_machdep.c b/sys/amd64/amd64/uio_machdep.c index 786844dda7ce..97e5fdc0a05e 100644 --- a/sys/amd64/amd64/uio_machdep.c +++ b/sys/amd64/amd64/uio_machdep.c @@ -67,7 +67,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) size_t cnt; int error = 0; int save = 0; - boolean_t mapped; + bool mapped; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); @@ -75,7 +75,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; - mapped = FALSE; + mapped = false; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; @@ -90,7 +90,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) cnt = min(cnt, PAGE_SIZE - page_offset); if (uio->uio_segflg != UIO_NOCOPY) { mapped = pmap_map_io_transient( - &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + &ma[offset >> PAGE_SHIFT], &vaddr, 1, true); cp = (char *)vaddr + page_offset; } switch (uio->uio_segflg) { @@ -114,8 +114,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) } if (__predict_false(mapped)) { pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], - &vaddr, 1, TRUE); - mapped = FALSE; + &vaddr, 1, true); + mapped = false; } iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; @@ -127,7 +127,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) out: if (__predict_false(mapped)) pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, - TRUE); + true); if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT; return (error); diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index 1b3aeb248ebb..cf41855a7c59 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -481,8 +481,8 @@ void pmap_invalidate_cache_pages(vm_page_t *pages, int count); void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva); void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num); -boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); -void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); +bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); +void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); void pmap_map_delete(pmap_t, vm_offset_t, vm_offset_t); void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec); void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva); diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 1d1986b34f66..69073c5c56db 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -7619,33 +7619,33 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. - * \param can_fault TRUE if the thread using the mapped pages can take - * page faults, FALSE otherwise. + * \param can_fault true if the thread using the mapped pages can take + * page faults, false otherwise. * - * \returns TRUE if the caller must call pmap_unmap_io_transient when - * finished or FALSE otherwise. + * \returns true if the caller must call pmap_unmap_io_transient when + * finished or false otherwise. * */ -boolean_t +bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; - boolean_t needs_mapping; + bool needs_mapping; int error __diagused, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ - needs_mapping = FALSE; + needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(!PHYS_IN_DMAP(paddr))) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); - needs_mapping = TRUE; + needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } @@ -7653,7 +7653,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) - return (FALSE); + return (false); if (!can_fault) sched_pin(); @@ -7670,7 +7670,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; int i; diff --git a/sys/arm64/arm64/uio_machdep.c b/sys/arm64/arm64/uio_machdep.c index 11ed239fa9dd..6e92046f614b 100644 --- a/sys/arm64/arm64/uio_machdep.c +++ b/sys/arm64/arm64/uio_machdep.c @@ -65,7 +65,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) size_t cnt; int error = 0; int save = 0; - boolean_t mapped; + bool mapped; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); @@ -73,7 +73,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; - mapped = FALSE; + mapped = false; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; @@ -88,7 +88,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) cnt = min(cnt, PAGE_SIZE - page_offset); if (uio->uio_segflg != UIO_NOCOPY) { mapped = pmap_map_io_transient( - &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + &ma[offset >> PAGE_SHIFT], &vaddr, 1, true); cp = (char *)vaddr + page_offset; } switch (uio->uio_segflg) { @@ -112,8 +112,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) } if (__predict_false(mapped)) { pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], - &vaddr, 1, TRUE); - mapped = FALSE; + &vaddr, 1, true); + mapped = false; } iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; @@ -126,7 +126,7 @@ out: if (__predict_false(mapped)) { panic("ARM64TODO: uiomove_fromphys"); pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, - TRUE); + true); } if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT; diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h index ad77450f0233..b855a811d3c0 100644 --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -162,8 +162,8 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t); void pmap_unmapdev(void *, vm_size_t); void pmap_unmapbios(void *, vm_size_t); -boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); -void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); +bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); +void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **, pd_entry_t **, pt_entry_t **); diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c index 82201b358e91..a406634e96b8 100644 --- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c +++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c @@ -625,7 +625,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip, struct mbuf *m, *m_tail; vm_offset_t vaddr; size_t page_offset, todo, mtodo; - boolean_t mapped; + bool mapped; int i; MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); @@ -712,7 +712,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip, todo = MIN(len, PAGE_SIZE - page_offset); mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1, - FALSE); + false); do { mtodo = min(todo, M_SIZE(m) - m->m_len); @@ -727,7 +727,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip, if (__predict_false(mapped)) pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1, - FALSE); + false); page_offset = 0; len -= todo; @@ -813,7 +813,7 @@ icl_cxgbei_conn_pdu_get_bio(struct icl_conn *ic, struct icl_pdu *ip, struct icl_cxgbei_pdu *icp = ip_to_icp(ip); vm_offset_t vaddr; size_t page_offset, todo; - boolean_t mapped; + bool mapped; int i; if (icp->icp_flags & ICPF_RX_DDP) @@ -834,12 +834,12 @@ icl_cxgbei_conn_pdu_get_bio(struct icl_conn *ic, struct icl_pdu *ip, todo = MIN(len, PAGE_SIZE - page_offset); mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1, - FALSE); + false); m_copydata(ip->ip_data_mbuf, pdu_off, todo, (char *)vaddr + page_offset); if (__predict_false(mapped)) pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1, - FALSE); + false); page_offset = 0; pdu_off += todo; diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h index 88579e6448b5..5ca6dc411e7b 100644 --- a/sys/riscv/include/pmap.h +++ b/sys/riscv/include/pmap.h @@ -154,8 +154,8 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t); void pmap_unmapdev(void *, vm_size_t); void pmap_unmapbios(void *, vm_size_t); -boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); -void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t); +bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool); +void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool); bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **, pt_entry_t **); diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index 1b4eaa60a1f4..8d34dbc3637d 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -4736,33 +4736,33 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, * \param vaddr On return contains the kernel virtual memory address * of the pages passed in the page parameter. * \param count Number of pages passed in. - * \param can_fault TRUE if the thread using the mapped pages can take - * page faults, FALSE otherwise. + * \param can_fault true if the thread using the mapped pages can take + * page faults, false otherwise. * - * \returns TRUE if the caller must call pmap_unmap_io_transient when - * finished or FALSE otherwise. + * \returns true if the caller must call pmap_unmap_io_transient when + * finished or false otherwise. * */ -boolean_t +bool pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; - boolean_t needs_mapping; + bool needs_mapping; int error __diagused, i; /* * Allocate any KVA space that we need, this is done in a separate * loop to prevent calling vmem_alloc while pinned. */ - needs_mapping = FALSE; + needs_mapping = false; for (i = 0; i < count; i++) { paddr = VM_PAGE_TO_PHYS(page[i]); if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) { error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr[i]); KASSERT(error == 0, ("vmem_alloc failed: %d", error)); - needs_mapping = TRUE; + needs_mapping = true; } else { vaddr[i] = PHYS_TO_DMAP(paddr); } @@ -4770,7 +4770,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, /* Exit early if everything is covered by the DMAP */ if (!needs_mapping) - return (FALSE); + return (false); if (!can_fault) sched_pin(); @@ -4787,7 +4787,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, void pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, - boolean_t can_fault) + bool can_fault) { vm_paddr_t paddr; int i; diff --git a/sys/riscv/riscv/uio_machdep.c b/sys/riscv/riscv/uio_machdep.c index e6f6d39f02be..108824d5b8b4 100644 --- a/sys/riscv/riscv/uio_machdep.c +++ b/sys/riscv/riscv/uio_machdep.c @@ -65,7 +65,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) size_t cnt; int error = 0; int save = 0; - boolean_t mapped; + bool mapped; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); @@ -73,7 +73,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; - mapped = FALSE; + mapped = false; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; @@ -88,7 +88,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) cnt = min(cnt, PAGE_SIZE - page_offset); if (uio->uio_segflg != UIO_NOCOPY) { mapped = pmap_map_io_transient( - &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + &ma[offset >> PAGE_SHIFT], &vaddr, 1, true); cp = (char *)vaddr + page_offset; } switch (uio->uio_segflg) { @@ -112,8 +112,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) } if (__predict_false(mapped)) { pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], - &vaddr, 1, TRUE); - mapped = FALSE; + &vaddr, 1, true); + mapped = false; } iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; @@ -126,7 +126,7 @@ out: if (__predict_false(mapped)) { panic("TODO 3"); pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, - TRUE); + true); } if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT;