git: 10ebd230f4d9 - main - intel_idpgtbl.c: rename domain_* functions to dmar_*
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 27 Sep 2024 17:34:44 UTC
The branch main has been updated by kib: URL: https://cgit.FreeBSD.org/src/commit/?id=10ebd230f4d9ed5c6406037972776666a2241107 commit 10ebd230f4d9ed5c6406037972776666a2241107 Author: Konstantin Belousov <kib@FreeBSD.org> AuthorDate: 2024-09-25 01:38:42 +0000 Commit: Konstantin Belousov <kib@FreeBSD.org> CommitDate: 2024-09-27 17:34:23 +0000 intel_idpgtbl.c: rename domain_* functions to dmar_* Sponsored by: Advanced Micro Devices (AMD) Sponsored by: The FreeBSD Foundation MFC after: 1 week --- sys/x86/iommu/intel_ctx.c | 6 ++-- sys/x86/iommu/intel_dmar.h | 6 ++-- sys/x86/iommu/intel_idpgtbl.c | 66 +++++++++++++++++++++---------------------- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/sys/x86/iommu/intel_ctx.c b/sys/x86/iommu/intel_ctx.c index 5047acd283e9..659d5c8a35e6 100644 --- a/sys/x86/iommu/intel_ctx.c +++ b/sys/x86/iommu/intel_ctx.c @@ -399,7 +399,7 @@ dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) if (id_mapped) { if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { - domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, + domain->pgtbl_obj = dmar_get_idmap_pgtbl(domain, domain->iodom.end); } domain->iodom.flags |= IOMMU_DOMAIN_IDMAP; @@ -864,7 +864,7 @@ dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free, iommu_domain_free_entry(entry, false); } } else { - domain_flush_iotlb_sync(domain, entry->start, entry->end - + dmar_flush_iotlb_sync(domain, entry->start, entry->end - entry->start); iommu_domain_free_entry(entry, free); } @@ -899,7 +899,7 @@ dmar_domain_unload(struct iommu_domain *iodom, entry->start, cansleep ? IOMMU_PGF_WAITOK : 0); KASSERT(error == 0, ("unmap %p error %d", domain, error)); if (!unit->qi_enabled) { - domain_flush_iotlb_sync(domain, entry->start, + dmar_flush_iotlb_sync(domain, entry->start, entry->end - entry->start); TAILQ_REMOVE(entries, entry, dmamap_link); iommu_domain_free_entry(entry, true); diff --git a/sys/x86/iommu/intel_dmar.h b/sys/x86/iommu/intel_dmar.h index 188e40dec36c..fcdc915abcfd 100644 --- a/sys/x86/iommu/intel_dmar.h +++ b/sys/x86/iommu/intel_dmar.h @@ -219,10 +219,10 @@ void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit); void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit); void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt); -vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain, +vm_object_t dmar_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr); -void put_idmap_pgtbl(vm_object_t obj); -void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, +void dmar_put_idmap_pgtbl(vm_object_t obj); +void dmar_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size); int dmar_domain_alloc_pgtbl(struct dmar_domain *domain); void dmar_domain_free_pgtbl(struct dmar_domain *domain); diff --git a/sys/x86/iommu/intel_idpgtbl.c b/sys/x86/iommu/intel_idpgtbl.c index fbc0e9e97b64..7def178146d1 100644 --- a/sys/x86/iommu/intel_idpgtbl.c +++ b/sys/x86/iommu/intel_idpgtbl.c @@ -67,7 +67,7 @@ #include <x86/iommu/x86_iommu.h> #include <x86/iommu/intel_dmar.h> -static int domain_unmap_buf_locked(struct dmar_domain *domain, +static int dmar_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size, int flags); /* @@ -106,7 +106,7 @@ static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl", * mapped by the page table page. */ static void -domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, +dmar_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, iommu_gaddr_t addr) { vm_page_t m1; @@ -125,7 +125,7 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, pg_sz = pglvl_page_size(tbl->pglvl, lvl); if (lvl != tbl->leaf) { for (i = 0, f = addr; i < IOMMU_NPTEPG; i++, f += pg_sz) - domain_idmap_nextlvl(tbl, lvl + 1, base + i, f); + dmar_idmap_nextlvl(tbl, lvl + 1, base + i, f); } VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); pte = iommu_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf); @@ -147,7 +147,7 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W; } } - /* domain_get_idmap_pgtbl flushes CPU cache if needed. */ + /* dmar_get_idmap_pgtbl flushes CPU cache if needed. */ iommu_unmap_pgtbl(sf); VM_OBJECT_WLOCK(tbl->pgtbl_obj); } @@ -161,7 +161,7 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx, * maxaddr is typically mapped. */ vm_object_t -domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr) +dmar_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr) { struct dmar_unit *unit; struct idpgtbl *tbl; @@ -231,7 +231,7 @@ domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr) tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL, IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL); VM_OBJECT_WLOCK(tbl->pgtbl_obj); - domain_idmap_nextlvl(tbl, 0, 0, 0); + dmar_idmap_nextlvl(tbl, 0, 0, 0); VM_OBJECT_WUNLOCK(tbl->pgtbl_obj); LIST_INSERT_HEAD(&idpgtbls, tbl, link); res = tbl->pgtbl_obj; @@ -273,7 +273,7 @@ end: * Return a reference to the identity mapping page table to the cache. */ void -put_idmap_pgtbl(vm_object_t obj) +dmar_put_idmap_pgtbl(vm_object_t obj) { struct idpgtbl *tbl, *tbl1; vm_object_t rmobj; @@ -317,7 +317,7 @@ put_idmap_pgtbl(vm_object_t obj) */ static iommu_pte_t * -domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, +dmar_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, int flags, vm_pindex_t *idxp, struct sf_buf **sf) { vm_page_t m; @@ -360,7 +360,7 @@ retry: vm_page_wire(m); sfp = NULL; - ptep = domain_pgtbl_map_pte(domain, base, lvl - 1, + ptep = dmar_pgtbl_map_pte(domain, base, lvl - 1, flags, &idx1, &sfp); if (ptep == NULL) { KASSERT(m->pindex != 0, @@ -385,7 +385,7 @@ retry: } static int -domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, +dmar_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags) { iommu_pte_t *pte; @@ -439,13 +439,13 @@ domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, ("mapping loop overflow %p %jx %jx %jx", domain, (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz)); KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl)); - pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); + pte = dmar_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); if (pte == NULL) { KASSERT((flags & IOMMU_PGF_WAITOK) == 0, ("failed waitable pte alloc %p", domain)); if (sf != NULL) iommu_unmap_pgtbl(sf); - domain_unmap_buf_locked(domain, base1, base - base1, + dmar_unmap_buf_locked(domain, base1, base - base1, flags); TD_PINNED_ASSERT; return (ENOMEM); @@ -462,7 +462,7 @@ domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, } static int -domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, +dmar_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags) { struct dmar_domain *domain; @@ -513,13 +513,13 @@ domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags)); DMAR_DOMAIN_PGLOCK(domain); - error = domain_map_buf_locked(domain, base, size, ma, pflags, flags); + error = dmar_map_buf_locked(domain, base, size, ma, pflags, flags); DMAR_DOMAIN_PGUNLOCK(domain); if (error != 0) return (error); if ((unit->hw_cap & DMAR_CAP_CM) != 0) - domain_flush_iotlb_sync(domain, base, size); + dmar_flush_iotlb_sync(domain, base, size); else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) { /* See 11.1 Write Buffer Flushing. */ DMAR_LOCK(unit); @@ -529,12 +529,12 @@ domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base, return (0); } -static void domain_unmap_clear_pte(struct dmar_domain *domain, +static void dmar_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, int flags, iommu_pte_t *pte, struct sf_buf **sf, bool free_fs); static void -domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base, +dmar_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, int flags) { struct sf_buf *sf; @@ -542,12 +542,12 @@ domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base, vm_pindex_t idx; sf = NULL; - pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); - domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true); + pde = dmar_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); + dmar_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true); } static void -domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, +dmar_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, int flags, iommu_pte_t *pte, struct sf_buf **sf, bool free_sf) { vm_page_t m; @@ -568,14 +568,14 @@ domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl, ("lost reference (idx) on root pg domain %p base %jx lvl %d", domain, (uintmax_t)base, lvl)); iommu_pgfree(domain->pgtbl_obj, m->pindex, flags); - domain_free_pgtbl_pde(domain, base, lvl - 1, flags); + dmar_free_pgtbl_pde(domain, base, lvl - 1, flags); } /* * Assumes that the unmap is never partial. */ static int -domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, +dmar_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size, int flags) { iommu_pte_t *pte; @@ -619,14 +619,14 @@ domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, pg_sz = domain_page_size(domain, lvl); if (pg_sz > size) continue; - pte = domain_pgtbl_map_pte(domain, base, lvl, flags, + pte = dmar_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf); KASSERT(pte != NULL, ("sleeping or page missed %p %jx %d 0x%x", domain, (uintmax_t)base, lvl, flags)); if ((pte->pte & DMAR_PTE_SP) != 0 || lvl == domain->pglvl - 1) { - domain_unmap_clear_pte(domain, base, lvl, + dmar_unmap_clear_pte(domain, base, lvl, flags, pte, &sf, false); break; } @@ -647,7 +647,7 @@ domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base, } static int -domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base, +dmar_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base, iommu_gaddr_t size, int flags) { struct dmar_domain *domain; @@ -656,7 +656,7 @@ domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base, domain = IODOM2DOM(iodom); DMAR_DOMAIN_PGLOCK(domain); - error = domain_unmap_buf_locked(domain, base, size, flags); + error = dmar_unmap_buf_locked(domain, base, size, flags); DMAR_DOMAIN_PGUNLOCK(domain); return (error); } @@ -700,7 +700,7 @@ dmar_domain_free_pgtbl(struct dmar_domain *domain) domain->pgtbl_obj = NULL; if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) { - put_idmap_pgtbl(obj); + dmar_put_idmap_pgtbl(obj); domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP; return; } @@ -716,7 +716,7 @@ dmar_domain_free_pgtbl(struct dmar_domain *domain) } static inline uint64_t -domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) +dmar_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) { uint64_t iotlbr; @@ -732,7 +732,7 @@ domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro) } void -domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, +dmar_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size) { struct dmar_unit *unit; @@ -746,7 +746,7 @@ domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16; DMAR_LOCK(unit); if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) { - iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | + iotlbr = dmar_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM | DMAR_IOTLB_DID(domain->domain), iro); KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != DMAR_IOTLB_IAIG_INVLD, @@ -756,7 +756,7 @@ domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, for (; size > 0; base += isize, size -= isize) { am = calc_am(unit, base, size, &isize); dmar_write8(unit, iro, base | am); - iotlbr = domain_wait_iotlb_flush(unit, + iotlbr = dmar_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(domain->domain), iro); KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) != @@ -778,6 +778,6 @@ domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base, } const struct iommu_domain_map_ops dmar_domain_map_ops = { - .map = domain_map_buf, - .unmap = domain_unmap_buf, + .map = dmar_map_buf, + .unmap = dmar_unmap_buf, };