svn commit: r254638 - in projects/bhyve_npt_pmap: lib/libproc share/man/man4 share/man/man9 sys/amd64/amd64 sys/arm/arm sys/arm/s3c2xx0 sys/dev/drm2/ttm sys/dev/virtio/balloon sys/i386/i386 sys/ker...
Neel Natu
neel at FreeBSD.org
Thu Aug 22 05:03:43 UTC 2013
Author: neel
Date: Thu Aug 22 05:03:41 2013
New Revision: 254638
URL: http://svnweb.freebsd.org/changeset/base/254638
Log:
IFC @r254182
Modified:
projects/bhyve_npt_pmap/lib/libproc/proc_sym.c
projects/bhyve_npt_pmap/share/man/man4/rsu.4
projects/bhyve_npt_pmap/share/man/man9/mutex.9
projects/bhyve_npt_pmap/share/man/man9/vm_page_busy.9
projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
projects/bhyve_npt_pmap/sys/arm/arm/bus_space_generic.c
projects/bhyve_npt_pmap/sys/arm/s3c2xx0/s3c2xx0_space.c
projects/bhyve_npt_pmap/sys/dev/drm2/ttm/ttm_page_alloc.c
projects/bhyve_npt_pmap/sys/dev/virtio/balloon/virtio_balloon.c
projects/bhyve_npt_pmap/sys/i386/i386/pmap.c
projects/bhyve_npt_pmap/sys/kern/init_main.c
projects/bhyve_npt_pmap/sys/kern/kern_malloc.c
projects/bhyve_npt_pmap/sys/kern/kern_synch.c
projects/bhyve_npt_pmap/sys/sys/proc.h
projects/bhyve_npt_pmap/sys/vm/device_pager.c
projects/bhyve_npt_pmap/sys/vm/memguard.c
projects/bhyve_npt_pmap/sys/vm/sg_pager.c
projects/bhyve_npt_pmap/sys/vm/uma_core.c
projects/bhyve_npt_pmap/sys/vm/uma_int.h
projects/bhyve_npt_pmap/sys/vm/vm_page.c
projects/bhyve_npt_pmap/sys/vm/vm_page.h
projects/bhyve_npt_pmap/sys/vm/vm_pageout.c
projects/bhyve_npt_pmap/sys/vm/vm_param.h
projects/bhyve_npt_pmap/sys/vm/vm_phys.c
projects/bhyve_npt_pmap/usr.sbin/watchdogd/watchdogd.c
Directory Properties:
projects/bhyve_npt_pmap/ (props changed)
projects/bhyve_npt_pmap/share/man/man4/ (props changed)
projects/bhyve_npt_pmap/sys/ (props changed)
Modified: projects/bhyve_npt_pmap/lib/libproc/proc_sym.c
==============================================================================
--- projects/bhyve_npt_pmap/lib/libproc/proc_sym.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/lib/libproc/proc_sym.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -299,6 +299,7 @@ proc_addr2sym(struct proc_handle *p, uin
* the function.
*/
symcopy->st_value = rsym;
+ error = 0;
goto out;
}
}
Modified: projects/bhyve_npt_pmap/share/man/man4/rsu.4
==============================================================================
--- projects/bhyve_npt_pmap/share/man/man4/rsu.4 Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/share/man/man4/rsu.4 Thu Aug 22 05:03:41 2013 (r254638)
@@ -22,7 +22,6 @@
.Nm rsu
.Nd Realtek RTL8188SU/RTL8192SU USB IEEE 802.11b/g/n wireless network device
.Sh SYNOPSIS
-.\.Cd "rsu* at uhub? port ?"
To compile this driver into the kernel,
place the following lines in your kernel configuration file:
.Bd -ragged -offset indent
@@ -120,6 +119,7 @@ The following adapters should work:
.It Sweex LW154
.It TRENDnet TEW-648UB
.It TRENDnet TEW-649UB
+.El
.Sh EXAMPLES
Join an existing BSS network (i.e., connect to an access point):
.Bd -literal -offset indent
@@ -149,14 +149,14 @@ The driver will reset the hardware.
This should not happen.
.El
.Sh SEE ALSO
-.Xr arp 8 ,
+.Xr hostname 1 ,
.Xr intro 1 ,
-.Xr netintro 4 ,
.Xr usb 3 ,
-.Xr hostname 1 ,
-.Xr ifconfig 8,
+.Xr netintro 4 ,
+.Xr rsufw 4 ,
.Xr wlan 4 ,
-.Xr rsufw 4
+.Xr arp 8 ,
+.Xr ifconfig 8
.Sh HISTORY
The
.Nm
Modified: projects/bhyve_npt_pmap/share/man/man9/mutex.9
==============================================================================
--- projects/bhyve_npt_pmap/share/man/man9/mutex.9 Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/share/man/man9/mutex.9 Thu Aug 22 05:03:41 2013 (r254638)
@@ -226,7 +226,7 @@ lock, respectively, and also accept a
.Fa flags
argument.
In both cases, the only flags presently available for lock acquires are
-.Dv MTX_QUIET
+.Dv MTX_QUIET
and
.Dv MTX_RECURSE .
If the
@@ -243,7 +243,6 @@ bit is turned on in the
.Fa flags
argument, then the mutex can be acquired recursively.
.Pp
-.Pp
The
.Fn mtx_trylock
attempts to acquire the
Modified: projects/bhyve_npt_pmap/share/man/man9/vm_page_busy.9
==============================================================================
--- projects/bhyve_npt_pmap/share/man/man9/vm_page_busy.9 Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/share/man/man9/vm_page_busy.9 Thu Aug 22 05:03:41 2013 (r254638)
@@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.Dd August 07, 2013
-.Dt vm_page_busy 9
+.Dt VM_PAGE_BUSY 9
.Os
.Sh NAME
.Nm vm_page_busied ,
Modified: projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/sys/amd64/amd64/pmap.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -392,13 +392,12 @@ static boolean_t pmap_protect_pde(pmap_t
vm_prot_t prot);
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask);
static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- vm_page_t *free, struct rwlock **lockp);
-static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
- vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free,
- struct rwlock **lockp);
+ struct spglist *free, struct rwlock **lockp);
+static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
- vm_page_t *free);
+ struct spglist *free);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m, struct rwlock **lockp);
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
@@ -413,8 +412,8 @@ static vm_page_t pmap_allocpte(pmap_t pm
struct rwlock **lockp);
static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
- vm_page_t *free);
-static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
+ struct spglist *free);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
/*
@@ -1818,14 +1817,12 @@ pmap_qremove(vm_offset_t sva, int count)
* Page table page management routines.....
***************************************************/
static __inline void
-pmap_free_zero_pages(vm_page_t free)
+pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
- while (free != NULL) {
- m = free;
- free = (void *)m->object;
- m->object = NULL;
+ while ((m = SLIST_FIRST(free)) != NULL) {
+ SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
@@ -1837,15 +1834,15 @@ pmap_free_zero_pages(vm_page_t free)
* physical memory manager after the TLB has been updated.
*/
static __inline void
-pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
{
if (set_PG_ZERO)
m->flags |= PG_ZERO;
else
m->flags &= ~PG_ZERO;
- m->object = (void *)*free;
- *free = m;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
}
/*
@@ -1895,7 +1892,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page
* page table page was unmapped and FALSE otherwise.
*/
static inline boolean_t
-pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
+pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->wire_count;
@@ -1907,7 +1904,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_offset_t
}
static void
-_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
+_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1965,7 +1962,8 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_
* conditionally free the page, and manage the hold/wire counts.
*/
static int
-pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
+ struct spglist *free)
{
vm_page_t mpte;
@@ -2481,7 +2479,8 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
pt_entry_t *pte, tpte, PG_G, PG_A, PG_M;
pv_entry_t pv;
vm_offset_t va;
- vm_page_t free, m, m_pc;
+ vm_page_t m, m_pc;
+ struct spglist free;
uint64_t inuse;
int bit, field, freed;
@@ -2489,10 +2488,11 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
pmap = NULL;
- free = m_pc = NULL;
+ m_pc = NULL;
+ SLIST_INIT(&free);
TAILQ_INIT(&new_tail);
mtx_lock(&pv_chunks_mutex);
- while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && free == NULL) {
+ while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && SLIST_EMPTY(&free)) {
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
mtx_unlock(&pv_chunks_mutex);
if (pmap != pc->pc_pmap) {
@@ -2557,7 +2557,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
}
}
pc->pc_map[field] |= 1UL << bit;
- pmap_unuse_pt(pmap, va, *pde, &free);
+ pmap_unuse_pt(pmap, va, *pde, &free);
freed++;
}
}
@@ -2597,15 +2597,14 @@ reclaim_pv_chunk(pmap_t locked_pmap, str
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
}
- if (m_pc == NULL && free != NULL) {
- m_pc = free;
- free = (void *)m_pc->object;
- m_pc->object = NULL;
+ if (m_pc == NULL && !SLIST_EMPTY(&free)) {
+ m_pc = SLIST_FIRST(&free);
+ SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
atomic_add_int(&cnt.v_wire_count, 1);
}
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
return (m_pc);
}
@@ -3054,7 +3053,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_e
pd_entry_t newpde, oldpde;
pt_entry_t *firstpte, newpte, PG_G, PG_A, PG_M;
vm_paddr_t mptepa;
- vm_page_t free, mpte;
+ vm_page_t mpte;
+ struct spglist free;
int PG_PTE_CACHE;
PG_G = pmap_global_bit(pmap);
@@ -3090,11 +3090,11 @@ pmap_demote_pde_locked(pmap_t pmap, pd_e
pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
- free = NULL;
+ SLIST_INIT(&free);
pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
lockp);
pmap_invalidate_page(pmap, trunc_2mpage(va));
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
@@ -3217,7 +3217,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_e
*/
static int
pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- vm_page_t *free, struct rwlock **lockp)
+ struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pd_entry_t oldpde;
@@ -3284,7 +3284,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t
*/
static int
pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
- pd_entry_t ptepde, vm_page_t *free, struct rwlock **lockp)
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp)
{
struct md_page *pvh;
pt_entry_t oldpte, PG_A, PG_M;
@@ -3323,7 +3323,8 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t
* Remove a single page from a process address space
*/
static void
-pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
+pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
+ struct spglist *free)
{
struct rwlock *lock;
pt_entry_t *pte;
@@ -3356,7 +3357,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
pdp_entry_t *pdpe;
pd_entry_t ptpaddr, *pde;
pt_entry_t *pte, PG_G;
- vm_page_t free = NULL;
+ struct spglist free;
int anyvalid;
PG_G = pmap_global_bit(pmap);
@@ -3368,6 +3369,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
return;
anyvalid = 0;
+ SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
@@ -3486,7 +3488,7 @@ out:
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -3511,11 +3513,11 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte, PG_A, PG_M;
pd_entry_t *pde;
vm_offset_t va;
- vm_page_t free;
+ struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
- free = NULL;
+ SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
if ((m->flags & PG_FICTITIOUS) != 0)
goto small_mappings;
@@ -3562,7 +3564,7 @@ small_mappings:
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(&pvh_global_lock);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -4165,7 +4167,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t
struct rwlock **lockp)
{
pd_entry_t *pde, newpde;
- vm_page_t free, mpde;
+ vm_page_t mpde;
+ struct spglist free;
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -4203,10 +4206,10 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t
*/
if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m),
lockp)) {
- free = NULL;
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpde, &free)) {
pmap_invalidate_page(pmap, va);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@@ -4310,7 +4313,7 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
- vm_page_t free;
+ struct spglist free;
pt_entry_t *pte;
vm_paddr_t pa;
@@ -4389,10 +4392,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
- free = NULL;
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
mpte = NULL;
}
@@ -4594,7 +4597,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
vm_offset_t src_addr)
{
struct rwlock *lock;
- vm_page_t free;
+ struct spglist free;
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t va_next;
@@ -4710,13 +4713,13 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
*dst_pte = ptetemp & ~(PG_W | PG_M |
PG_A);
pmap_resident_count_inc(dst_pmap, 1);
- } else {
- free = NULL;
+ } else {
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr,
dstmpte, &free)) {
- pmap_invalidate_page(dst_pmap,
- addr);
- pmap_free_zero_pages(free);
+ pmap_invalidate_page(dst_pmap,
+ addr);
+ pmap_free_zero_pages(&free);
}
goto out;
}
@@ -4733,10 +4736,10 @@ out:
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(src_pmap);
PMAP_UNLOCK(dst_pmap);
-}
+}
/*
- * pmap_zero_page zeros the specified hardware page by mapping
+ * pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
*/
void
@@ -4951,7 +4954,7 @@ pmap_remove_pages(pmap_t pmap)
{
pd_entry_t ptepde;
pt_entry_t *pte, tpte, PG_M;
- vm_page_t free = NULL;
+ struct spglist free;
vm_page_t m, mpte, mt;
pv_entry_t pv;
struct md_page *pvh;
@@ -4971,6 +4974,7 @@ pmap_remove_pages(pmap_t pmap)
lock = NULL;
PG_M = pmap_modified_bit(pmap);
+ SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
@@ -5109,7 +5113,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -5333,10 +5337,11 @@ pmap_ts_referenced(vm_page_t m)
pt_entry_t *pte, PG_A;
vm_offset_t va;
int rtval, pvh_gen, md_gen;
- vm_page_t free = NULL;
+ struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
+ SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
@@ -5456,7 +5461,7 @@ small_mappings:
out:
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
return (rtval);
}
@@ -5573,10 +5578,11 @@ pmap_clear_reference(vm_page_t m)
pd_entry_t oldpde, *pde;
pt_entry_t *pte, PG_A;
vm_offset_t va;
- vm_page_t free = NULL;
+ struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_clear_reference: page %p is not managed", m));
+ SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
if ((m->flags & PG_FICTITIOUS) != 0)
goto small_mappings;
@@ -5633,7 +5639,7 @@ small_mappings:
PMAP_UNLOCK(pmap);
}
rw_wunlock(&pvh_global_lock);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
Modified: projects/bhyve_npt_pmap/sys/arm/arm/bus_space_generic.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/arm/arm/bus_space_generic.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/sys/arm/arm/bus_space_generic.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -104,21 +104,21 @@ generic_bs_alloc(void *t, bus_addr_t rst
void
generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
{
- vm_offset_t va, endva;
+ vm_offset_t va, endva, origva;
- if (pmap_devmap_find_va((vm_offset_t)t, size) != NULL) {
+ if (pmap_devmap_find_va((vm_offset_t)h, size) != NULL) {
/* Device was statically mapped; nothing to do. */
return;
}
- endva = round_page((vm_offset_t)t + size);
- va = trunc_page((vm_offset_t)t);
+ endva = round_page((vm_offset_t)h + size);
+ origva = va = trunc_page((vm_offset_t)h);
while (va < endva) {
pmap_kremove(va);
va += PAGE_SIZE;
}
- kva_free(va, endva - va);
+ kva_free(origva, endva - origva);
}
void
Modified: projects/bhyve_npt_pmap/sys/arm/s3c2xx0/s3c2xx0_space.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/arm/s3c2xx0/s3c2xx0_space.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/sys/arm/s3c2xx0/s3c2xx0_space.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -200,21 +200,21 @@ s3c2xx0_bs_map(void *t, bus_addr_t bpa,
void
s3c2xx0_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
{
- vm_offset_t va, endva;
+ vm_offset_t va, endva, origva;
- if (pmap_devmap_find_va((vm_offset_t)t, size) != NULL) {
+ if (pmap_devmap_find_va((vm_offset_t)h, size) != NULL) {
/* Device was statically mapped; nothing to do. */
return;
}
- endva = round_page((vm_offset_t)t + size);
- va = trunc_page((vm_offset_t)t);
+ endva = round_page((vm_offset_t)h + size);
+ origva = va = trunc_page((vm_offset_t)h);
while (va < endva) {
pmap_kremove(va);
va += PAGE_SIZE;
}
- kva_free(va, endva - va);
+ kva_free(origva, endva - origva);
}
int
Modified: projects/bhyve_npt_pmap/sys/dev/drm2/ttm/ttm_page_alloc.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/dev/drm2/ttm/ttm_page_alloc.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/sys/dev/drm2/ttm/ttm_page_alloc.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -330,7 +330,7 @@ static int ttm_page_pool_free(struct ttm
restart:
mtx_lock(&pool->lock);
- TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
+ TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, plinks.q, p1) {
if (freed_pages >= npages_to_free)
break;
@@ -338,7 +338,7 @@ restart:
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
/* remove range of pages from the pool */
- TAILQ_REMOVE(&pool->list, p, pageq);
+ TAILQ_REMOVE(&pool->list, p, plinks.q);
ttm_pool_update_free_locked(pool, freed_pages);
/**
@@ -373,7 +373,7 @@ restart:
/* remove range of pages from the pool */
if (freed_pages) {
- TAILQ_REMOVE(&pool->list, p, pageq);
+ TAILQ_REMOVE(&pool->list, p, plinks.q);
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
@@ -470,7 +470,7 @@ static void ttm_handle_caching_state_fai
unsigned i;
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
- TAILQ_REMOVE(pages, failed_pages[i], pageq);
+ TAILQ_REMOVE(pages, failed_pages[i], plinks.q);
ttm_vm_page_free(failed_pages[i]);
}
}
@@ -545,7 +545,7 @@ static int ttm_alloc_new_pages(struct pg
}
}
- TAILQ_INSERT_HEAD(pages, p, pageq);
+ TAILQ_INSERT_HEAD(pages, p, plinks.q);
}
if (cpages) {
@@ -600,16 +600,16 @@ static void ttm_page_pool_fill_locked(st
mtx_lock(&pool->lock);
if (!r) {
- TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
++pool->nrefills;
pool->npages += alloc_size;
} else {
printf("[TTM] Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- TAILQ_FOREACH(p, &pool->list, pageq) {
+ TAILQ_FOREACH(p, &pool->list, plinks.q) {
++cpages;
}
- TAILQ_CONCAT(&pool->list, &new_pages, pageq);
+ TAILQ_CONCAT(&pool->list, &new_pages, plinks.q);
pool->npages += cpages;
}
@@ -636,15 +636,15 @@ static unsigned ttm_page_pool_get_pages(
if (count >= pool->npages) {
/* take all pages from the pool */
- TAILQ_CONCAT(pages, &pool->list, pageq);
+ TAILQ_CONCAT(pages, &pool->list, plinks.q);
count -= pool->npages;
pool->npages = 0;
goto out;
}
for (i = 0; i < count; i++) {
p = TAILQ_FIRST(&pool->list);
- TAILQ_REMOVE(&pool->list, p, pageq);
- TAILQ_INSERT_TAIL(pages, p, pageq);
+ TAILQ_REMOVE(&pool->list, p, plinks.q);
+ TAILQ_INSERT_TAIL(pages, p, plinks.q);
}
pool->npages -= count;
count = 0;
@@ -674,7 +674,7 @@ static void ttm_put_pages(vm_page_t *pag
mtx_lock(&pool->lock);
for (i = 0; i < npages; i++) {
if (pages[i]) {
- TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
+ TAILQ_INSERT_TAIL(&pool->list, pages[i], plinks.q);
pages[i] = NULL;
pool->npages++;
}
@@ -735,13 +735,13 @@ static int ttm_get_pages(vm_page_t *page
TAILQ_INIT(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
- TAILQ_FOREACH(p, &plist, pageq) {
+ TAILQ_FOREACH(p, &plist, plinks.q) {
pages[count++] = p;
}
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- TAILQ_FOREACH(p, &plist, pageq) {
+ TAILQ_FOREACH(p, &plist, plinks.q) {
pmap_zero_page(p);
}
}
@@ -754,7 +754,7 @@ static int ttm_get_pages(vm_page_t *page
TAILQ_INIT(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
npages);
- TAILQ_FOREACH(p, &plist, pageq) {
+ TAILQ_FOREACH(p, &plist, plinks.q) {
pages[count++] = p;
}
if (r) {
Modified: projects/bhyve_npt_pmap/sys/dev/virtio/balloon/virtio_balloon.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/dev/virtio/balloon/virtio_balloon.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/sys/dev/virtio/balloon/virtio_balloon.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -334,7 +334,7 @@ vtballoon_inflate(struct vtballoon_softc
KASSERT(m->queue == PQ_NONE,
("%s: allocated page %p on queue", __func__, m));
- TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, pageq);
+ TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
}
if (i > 0)
@@ -362,8 +362,8 @@ vtballoon_deflate(struct vtballoon_softc
sc->vtballoon_page_frames[i] =
VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
- TAILQ_REMOVE(&sc->vtballoon_pages, m, pageq);
- TAILQ_INSERT_TAIL(&free_pages, m, pageq);
+ TAILQ_REMOVE(&sc->vtballoon_pages, m, plinks.q);
+ TAILQ_INSERT_TAIL(&free_pages, m, plinks.q);
}
if (i > 0) {
@@ -371,7 +371,7 @@ vtballoon_deflate(struct vtballoon_softc
vtballoon_send_page_frames(sc, vq, i);
while ((m = TAILQ_FIRST(&free_pages)) != NULL) {
- TAILQ_REMOVE(&free_pages, m, pageq);
+ TAILQ_REMOVE(&free_pages, m, plinks.q);
vtballoon_free_page(sc, m);
}
}
Modified: projects/bhyve_npt_pmap/sys/i386/i386/pmap.c
==============================================================================
--- projects/bhyve_npt_pmap/sys/i386/i386/pmap.c Thu Aug 22 04:36:15 2013 (r254637)
+++ projects/bhyve_npt_pmap/sys/i386/i386/pmap.c Thu Aug 22 05:03:41 2013 (r254638)
@@ -317,12 +317,12 @@ static boolean_t pmap_protect_pde(pmap_t
vm_prot_t prot);
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- vm_page_t *free);
+ struct spglist *free);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
- vm_page_t *free);
+ struct spglist *free);
static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
- vm_page_t *free);
+ struct spglist *free);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
vm_offset_t va);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
@@ -335,10 +335,10 @@ static void pmap_update_pde_invalidate(v
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
-static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
+static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
-static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
+static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
#ifdef PAE
static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
#endif
@@ -1568,14 +1568,12 @@ pmap_qremove(vm_offset_t sva, int count)
* Page table page management routines.....
***************************************************/
static __inline void
-pmap_free_zero_pages(vm_page_t free)
+pmap_free_zero_pages(struct spglist *free)
{
vm_page_t m;
- while (free != NULL) {
- m = free;
- free = (void *)m->object;
- m->object = NULL;
+ while ((m = SLIST_FIRST(free)) != NULL) {
+ SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
vm_page_free_toq(m);
}
@@ -1587,15 +1585,15 @@ pmap_free_zero_pages(vm_page_t free)
* physical memory manager after the TLB has been updated.
*/
static __inline void
-pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
{
if (set_PG_ZERO)
m->flags |= PG_ZERO;
else
m->flags &= ~PG_ZERO;
- m->object = (void *)*free;
- *free = m;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
}
/*
@@ -1645,7 +1643,7 @@ pmap_remove_pt_page(pmap_t pmap, vm_page
* page table page was unmapped and FALSE otherwise.
*/
static inline boolean_t
-pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
+pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
{
--m->wire_count;
@@ -1657,7 +1655,7 @@ pmap_unwire_ptp(pmap_t pmap, vm_page_t m
}
static void
-_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free)
+_pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
{
vm_offset_t pteva;
@@ -1693,7 +1691,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t
* conditionally free the page, and manage the hold/wire counts.
*/
static int
-pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
+pmap_unuse_pt(pmap_t pmap, vm_offset_t va, struct spglist *free)
{
pd_entry_t ptepde;
vm_page_t mpte;
@@ -2193,16 +2191,18 @@ pmap_pv_reclaim(pmap_t locked_pmap)
pt_entry_t *pte, tpte;
pv_entry_t pv;
vm_offset_t va;
- vm_page_t free, m, m_pc;
+ vm_page_t m, m_pc;
+ struct spglist free;
uint32_t inuse;
int bit, field, freed;
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
pmap = NULL;
- free = m_pc = NULL;
+ m_pc = NULL;
+ SLIST_INIT(&free);
TAILQ_INIT(&newtail);
while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
- free == NULL)) {
+ SLIST_EMPTY(&free))) {
TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
if (pmap != pc->pc_pmap) {
if (pmap != NULL) {
@@ -2307,15 +2307,14 @@ out:
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
}
- if (m_pc == NULL && pv_vafree != 0 && free != NULL) {
- m_pc = free;
- free = (void *)m_pc->object;
- m_pc->object = NULL;
+ if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) {
+ m_pc = SLIST_FIRST(&free);
+ SLIST_REMOVE_HEAD(&free, plinks.s.ss);
/* Recycle a freed page table page. */
m_pc->wire_count = 1;
atomic_add_int(&cnt.v_wire_count, 1);
}
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
return (m_pc);
}
@@ -2636,7 +2635,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t
pd_entry_t newpde, oldpde;
pt_entry_t *firstpte, newpte;
vm_paddr_t mptepa;
- vm_page_t free, mpte;
+ vm_page_t mpte;
+ struct spglist free;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpde = *pde;
@@ -2658,10 +2658,10 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t
if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
VM_ALLOC_WIRED)) == NULL) {
- free = NULL;
+ SLIST_INIT(&free);
pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
pmap_invalidate_page(pmap, trunc_4mpage(va));
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
return (FALSE);
@@ -2814,7 +2814,7 @@ pmap_remove_kernel_pde(pmap_t pmap, pd_e
*/
static void
pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
- vm_page_t *free)
+ struct spglist *free)
{
struct md_page *pvh;
pd_entry_t oldpde;
@@ -2870,7 +2870,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t
* pmap_remove_pte: do the things to unmap a page in a process
*/
static int
-pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
+pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va,
+ struct spglist *free)
{
pt_entry_t oldpte;
vm_page_t m;
@@ -2904,7 +2905,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t
* Remove a single page from a process address space
*/
static void
-pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
+pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free)
{
pt_entry_t *pte;
@@ -2929,7 +2930,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
vm_offset_t pdnxt;
pd_entry_t ptpaddr;
pt_entry_t *pte;
- vm_page_t free = NULL;
+ struct spglist free;
int anyvalid;
/*
@@ -2939,6 +2940,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva
return;
anyvalid = 0;
+ SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
sched_pin();
@@ -3031,7 +3033,7 @@ out:
pmap_invalidate_all(pmap);
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -3056,11 +3058,11 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *pte, tpte;
pd_entry_t *pde;
vm_offset_t va;
- vm_page_t free;
+ struct spglist free;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
- free = NULL;
+ SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
sched_pin();
if ((m->flags & PG_FICTITIOUS) != 0)
@@ -3105,7 +3107,7 @@ small_mappings:
vm_page_aflag_clear(m, PGA_WRITEABLE);
sched_unpin();
rw_wunlock(&pvh_global_lock);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
/*
@@ -3769,7 +3771,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
{
pt_entry_t *pte;
vm_paddr_t pa;
- vm_page_t free;
+ struct spglist free;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->oflags & VPO_UNMANAGED) != 0,
@@ -3838,10 +3840,10 @@ pmap_enter_quick_locked(pmap_t pmap, vm_
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m)) {
if (mpte != NULL) {
- free = NULL;
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, mpte, &free)) {
pmap_invalidate_page(pmap, va);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
mpte = NULL;
@@ -4024,7 +4026,7 @@ void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr)
{
- vm_page_t free;
+ struct spglist free;
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
@@ -4107,12 +4109,12 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
PG_A);
dst_pmap->pm_stats.resident_count++;
} else {
- free = NULL;
+ SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, dstmpte,
&free)) {
pmap_invalidate_page(dst_pmap,
addr);
- pmap_free_zero_pages(free);
+ pmap_free_zero_pages(&free);
}
goto out;
}
@@ -4419,11 +4421,11 @@ void
pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
- vm_page_t free = NULL;
vm_page_t m, mpte, mt;
pv_entry_t pv;
struct md_page *pvh;
struct pv_chunk *pc, *npc;
+ struct spglist free;
int field, idx;
int32_t bit;
uint32_t inuse, bitmask;
@@ -4433,6 +4435,7 @@ pmap_remove_pages(pmap_t pmap)
printf("warning: pmap_remove_pages called with non-current pmap\n");
return;
}
+ SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
sched_pin();
@@ -4541,7 +4544,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
rw_wunlock(&pvh_global_lock);
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list