PERFORCE change 94917 for review
George V. Neville-Neil
gnn at FreeBSD.org
Mon Apr 10 15:35:13 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=94917
Change 94917 by gnn at fast_ipsec_integ on 2006/04/10 15:34:49
Integrate from HEAD
Affected files ...
.. //depot/projects/fast_ipsec/src/sys/alpha/alpha/pmap.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/alpha/include/kdb.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/amd64/amd64/gdb_machdep.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/amd64/amd64/io_apic.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/amd64/amd64/pmap.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/amd64/include/gdb_machdep.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/amd64/include/kdb.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/amd64/include/pmap.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/arm/cpufunc.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/arm/pmap.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/arm/swtch.S#3 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/arm/trap.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/arm/undefined.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/at91/at91.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/at91/at91_twi.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/at91/at91_usartreg.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/at91/if_ate.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/at91/uart_dev_at91usart.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/include/pmap.h#5 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/sa11x0/uart_dev_sa1110.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/arm/xscale/i80321/i80321_timer.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/conf/options#8 integrate
.. //depot/projects/fast_ipsec/src/sys/conf/options.arm#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/aac/aac.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/acpi_support/acpi_asus.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/acpica/Osd/OsdHardware.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/amr/amr.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/amr/amr_pci.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/amr/amrvar.h#5 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/ath/if_ath.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/bfe/if_bfe.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/drm-preprocess.sh#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/drm.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/drm_agpsupport.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/drm_pciids.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/i915_dma.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/i915_drm.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/i915_drv.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/i915_irq.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/i915_mem.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/mga_drv.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/r300_cmdbuf.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/r300_reg.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/radeon_cp.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/radeon_drm.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/radeon_drv.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/radeon_state.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/savage_bci.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/savage_state.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/sis_ds.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/sis_ds.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/drm/sis_mm.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/em/LICENSE#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/em/README#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/em/if_em.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/em/if_em_hw.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/em/if_em_hw.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/hwpmc/hwpmc_x86.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/iicbus/if_ic.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/iicbus/iic.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/iicbus/iicbb.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/iicbus/iicsmb.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/mfi/mfi.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/scc/scc_bfe.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/scc/scc_core.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/sound/pci/ich.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/uart/uart_dev_z8530.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/usb/usbdevs#5 integrate
.. //depot/projects/fast_ipsec/src/sys/dev/usb/uscanner.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/eli/g_eli.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_bsd.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_ccd.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_ctl.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_disk.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_gpt.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_slice.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/geom_subr.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/mirror/g_mirror.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/mirror/g_mirror.h#5 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/mirror/g_mirror_ctl.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/raid3/g_raid3.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/raid3/g_raid3.h#5 integrate
.. //depot/projects/fast_ipsec/src/sys/geom/raid3/g_raid3_ctl.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/i386/i386/exception.s#3 integrate
.. //depot/projects/fast_ipsec/src/sys/i386/i386/io_apic.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/i386/i386/machdep.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/i386/i386/pmap.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/i386/include/kdb.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/i386/isa/npx.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/ia64/ia64/interrupt.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/ia64/ia64/pmap.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/ia64/include/kdb.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/kern_event.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/kern_exec.c#8 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/kern_exit.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/kern_shutdown.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/subr_kdb.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/subr_witness.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/uipc_proto.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/vfs_bio.c#7 integrate
.. //depot/projects/fast_ipsec/src/sys/kern/vfs_subr.c#9 integrate
.. //depot/projects/fast_ipsec/src/sys/modules/ath/Makefile#3 integrate
.. //depot/projects/fast_ipsec/src/sys/net/if_media.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/net/raw_usrreq.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/netgraph/ng_socket.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet/in_pcb.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet/in_pcb.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet/tcp_input.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet/tcp_sack.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet/tcp_subr.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet/tcp_usrreq.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet6/ip6_mroute.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/netinet6/udp6_usrreq.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/netipsec/ipsec.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/netipsec/ipsec.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/netipsec/keysock.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/netipsec/xform_ah.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/netipsec/xform_esp.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/netnatm/natm.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/nfsclient/nfs_bio.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/nfsclient/nfs_vnops.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/nfsclient/nfsnode.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/nfsserver/nfs_srvsock.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/posix4/ksched.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/posix4/p1003_1b.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/powerpc/conf/GENERIC#4 integrate
.. //depot/projects/fast_ipsec/src/sys/powerpc/powerpc/mmu_if.m#2 integrate
.. //depot/projects/fast_ipsec/src/sys/powerpc/powerpc/pmap_dispatch.c#2 integrate
.. //depot/projects/fast_ipsec/src/sys/security/mac/mac_vfs.c#3 integrate
.. //depot/projects/fast_ipsec/src/sys/sparc64/include/kdb.h#2 integrate
.. //depot/projects/fast_ipsec/src/sys/sparc64/sparc64/machdep.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/sparc64/sparc64/pmap.c#4 integrate
.. //depot/projects/fast_ipsec/src/sys/sparc64/sparc64/trap.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/sys/filedesc.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/sys/mac.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/sys/mac_policy.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/sys/umtx.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/ufs/ffs/ffs_softdep.c#6 integrate
.. //depot/projects/fast_ipsec/src/sys/ufs/ufs/ufsmount.h#4 integrate
.. //depot/projects/fast_ipsec/src/sys/vm/pmap.h#3 integrate
.. //depot/projects/fast_ipsec/src/sys/vm/swap_pager.c#5 integrate
.. //depot/projects/fast_ipsec/src/sys/vm/swap_pager.h#2 integrate
Differences ...
==== //depot/projects/fast_ipsec/src/sys/alpha/alpha/pmap.c#4 (text+ko) ====
@@ -148,7 +148,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/alpha/alpha/pmap.c,v 1.187 2005/12/02 18:02:54 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/alpha/alpha/pmap.c,v 1.188 2006/04/03 21:16:07 peter Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -2125,9 +2125,7 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap, sva, eva)
- pmap_t pmap;
- vm_offset_t sva, eva;
+pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
vm_page_t m;
@@ -2146,11 +2144,6 @@
pv;
pv = npv) {
- if (pv->pv_va >= eva || pv->pv_va < sva) {
- npv = TAILQ_NEXT(pv, pv_plist);
- continue;
- }
-
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
pte = vtopte(pv->pv_va);
#else
==== //depot/projects/fast_ipsec/src/sys/alpha/include/kdb.h#2 (text+ko) ====
@@ -23,7 +23,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $FreeBSD: src/sys/alpha/include/kdb.h,v 1.2 2005/01/05 20:05:50 imp Exp $
+ * $FreeBSD: src/sys/alpha/include/kdb.h,v 1.3 2006/04/03 22:51:46 marcel Exp $
*/
#ifndef _MACHINE_KDB_H_
@@ -31,6 +31,8 @@
#include <machine/frame.h>
+#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid]
+
static __inline void
kdb_cpu_clear_singlestep(void)
{
==== //depot/projects/fast_ipsec/src/sys/amd64/amd64/gdb_machdep.c#3 (text+ko) ====
@@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/amd64/gdb_machdep.c,v 1.5 2005/09/27 21:10:10 peter Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/gdb_machdep.c,v 1.6 2006/04/04 03:00:20 marcel Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -91,3 +91,27 @@
kdb_frame->tf_rip = *(register_t *)val;
}
}
+
+int
+gdb_cpu_signal(int type, int code)
+{
+
+ switch (type & ~T_USER) {
+ case 0: return (SIGFPE); /* Divide by zero. */
+ case 1: return (SIGTRAP); /* Debug exception. */
+ case 3: return (SIGTRAP); /* Breakpoint. */
+ case 4: return (SIGSEGV); /* into instr. (overflow). */
+ case 5: return (SIGURG); /* bound instruction. */
+ case 6: return (SIGILL); /* Invalid opcode. */
+ case 7: return (SIGFPE); /* Coprocessor not present. */
+ case 8: return (SIGEMT); /* Double fault. */
+ case 9: return (SIGSEGV); /* Coprocessor segment overrun. */
+ case 10: return (SIGTRAP); /* Invalid TSS (also single-step). */
+ case 11: return (SIGSEGV); /* Segment not present. */
+ case 12: return (SIGSEGV); /* Stack exception. */
+ case 13: return (SIGSEGV); /* General protection. */
+ case 14: return (SIGSEGV); /* Page fault. */
+ case 16: return (SIGEMT); /* Coprocessor error. */
+ }
+ return (SIGEMT);
+}
==== //depot/projects/fast_ipsec/src/sys/amd64/amd64/io_apic.c#5 (text+ko) ====
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/amd64/io_apic.c,v 1.23 2006/03/20 19:39:07 jhb Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/io_apic.c,v 1.24 2006/04/05 20:43:19 jhb Exp $");
#include "opt_atpic.h"
#include "opt_isa.h"
@@ -89,6 +89,7 @@
u_int io_edgetrigger:1;
u_int io_masked:1;
int io_bus:4;
+ uint32_t io_lowreg;
};
struct ioapic {
@@ -207,9 +208,7 @@
mtx_lock_spin(&icu_lock);
if (intpin->io_masked) {
- flags = ioapic_read(io->io_addr,
- IOAPIC_REDTBL_LO(intpin->io_intpin));
- flags &= ~(IOART_INTMASK);
+ flags = intpin->io_lowreg & ~IOART_INTMASK;
ioapic_write(io->io_addr, IOAPIC_REDTBL_LO(intpin->io_intpin),
flags);
intpin->io_masked = 0;
@@ -226,9 +225,7 @@
mtx_lock_spin(&icu_lock);
if (!intpin->io_masked && !intpin->io_edgetrigger) {
- flags = ioapic_read(io->io_addr,
- IOAPIC_REDTBL_LO(intpin->io_intpin));
- flags |= IOART_INTMSET;
+ flags = intpin->io_lowreg | IOART_INTMSET;
ioapic_write(io->io_addr, IOAPIC_REDTBL_LO(intpin->io_intpin),
flags);
intpin->io_masked = 1;
@@ -313,6 +310,7 @@
/* Write the values to the APIC. */
mtx_lock_spin(&icu_lock);
+ intpin->io_lowreg = low;
ioapic_write(io->io_addr, IOAPIC_REDTBL_LO(intpin->io_intpin), low);
value = ioapic_read(io->io_addr, IOAPIC_REDTBL_HI(intpin->io_intpin));
value &= ~IOART_DEST;
==== //depot/projects/fast_ipsec/src/sys/amd64/amd64/pmap.c#7 (text+ko) ====
@@ -77,7 +77,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.540 2006/04/02 05:45:05 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.543 2006/04/04 20:17:35 peter Exp $");
/*
* Manages physical address maps.
@@ -158,6 +158,13 @@
#define PMAP_INLINE
#endif
+#define PV_STATS
+#ifdef PV_STATS
+#define PV_STAT(x) do { x ; } while (0)
+#else
+#define PV_STAT(x) do { } while (0)
+#endif
+
struct pmap kernel_pmap_store;
vm_paddr_t avail_start; /* PA of first available physical page */
@@ -182,7 +189,6 @@
/*
* Data for the pv entry allocation mechanism
*/
-static uma_zone_t pvzone;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static int shpgperproc = PMAP_SHPGPERPROC;
@@ -198,8 +204,8 @@
*/
static caddr_t crashdumpmap;
-static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
-static pv_entry_t get_pv_entry(pmap_t locked_pmap);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
static void pmap_clear_ptes(vm_page_t m, long bit);
static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
@@ -511,7 +517,7 @@
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys);
kernel_pmap->pm_active = -1; /* don't allow deactivation */
- TAILQ_INIT(&kernel_pmap->pm_pvlist);
+ TAILQ_INIT(&kernel_pmap->pm_pvchunk);
nkpt = NKPT;
/*
@@ -571,8 +577,6 @@
* high water mark so that the system can recover from excessive
* numbers of pv entries.
*/
- pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
- NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
@@ -1065,7 +1069,7 @@
PMAP_LOCK_INIT(pmap);
pmap->pm_pml4 = (pml4_entry_t *)(KERNBASE + KPML4phys);
pmap->pm_active = 0;
- TAILQ_INIT(&pmap->pm_pvlist);
+ TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
@@ -1102,7 +1106,7 @@
pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
pmap->pm_active = 0;
- TAILQ_INIT(&pmap->pm_pvlist);
+ TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
}
@@ -1439,61 +1443,79 @@
* page management routines.
***************************************************/
-/*
- * free the pv_entry back to the free list
- */
-static PMAP_INLINE void
-free_pv_entry(pv_entry_t pv)
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+CTASSERT(_NPCM == 3);
+CTASSERT(_NPCPV == 168);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
{
- pv_entry_count--;
- uma_zfree(pvzone, pv);
+
+ return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
}
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define PC_FREE0 0xfffffffffffffffful
+#define PC_FREE1 0xfffffffffffffffful
+#define PC_FREE2 0x000000fffffffffful
+
+static uint64_t pc_freemask[3] = { PC_FREE0, PC_FREE1, PC_FREE2 };
+
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+ "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+ "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+ "Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+ "Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs;
+static int pv_entry_spare;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+ "Current number of pv entries");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+ "Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+ "Current number of pv entry allocs");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+ "Current number of spare pv entries");
+
+static int pmap_collect_inactive, pmap_collect_active;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
+ "Current number times pmap_collect called on inactive queue");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
+ "Current number times pmap_collect called on active queue");
+#endif
+
/*
- * get a new pv_entry, allocating a block from the system
- * when needed.
+ * We are in a serious low memory condition. Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk. This is normally called to
+ * unmap inactive pages, and if necessary, active pages.
*/
-static pv_entry_t
-get_pv_entry(pmap_t locked_pmap)
+static void
+pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
{
- static const struct timeval printinterval = { 60, 0 };
- static struct timeval lastprint;
- struct vpgqueues *vpq;
pd_entry_t ptepde;
pmap_t pmap;
pt_entry_t *pte, tpte;
- pv_entry_t allocated_pv, next_pv, pv;
+ pv_entry_t next_pv, pv;
vm_offset_t va;
vm_page_t m;
- PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
- if (allocated_pv != NULL) {
- pv_entry_count++;
- if (pv_entry_count > pv_entry_high_water)
- pagedaemon_wakeup();
- else
- return (allocated_pv);
- }
-
- /*
- * Reclaim pv entries: At first, destroy mappings to inactive
- * pages. After that, if a pv entry is still needed, destroy
- * mappings to active pages.
- */
- if (ratecheck(&lastprint, &printinterval))
- printf("Approaching the limit on PV entries, consider "
- "increasing sysctl vm.pmap.shpgperproc or "
- "vm.pmap.pv_entry_max\n");
- vpq = &vm_page_queues[PQ_INACTIVE];
-retry:
TAILQ_FOREACH(m, &vpq->pl, pageq) {
if (m->hold_count || m->busy || (m->flags & PG_BUSY))
continue;
TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
va = pv->pv_va;
- pmap = pv->pv_pmap;
+ pmap = PV_PMAP(pv);
/* Avoid deadlock and lock recursion. */
if (pmap > locked_pmap)
PMAP_LOCK(pmap);
@@ -1503,18 +1525,17 @@
pte = pmap_pte_pde(pmap, va, &ptepde);
tpte = pte_load_clear(pte);
KASSERT((tpte & PG_W) == 0,
- ("get_pv_entry: wired pte %#lx", tpte));
+ ("pmap_collect: wired pte %#lx", tpte));
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
if (tpte & PG_M) {
KASSERT((tpte & PG_RW),
- ("get_pv_entry: modified page not writable: va: %#lx, pte: %#lx",
+ ("pmap_collect: modified page not writable: va: %#lx, pte: %#lx",
va, tpte));
if (pmap_track_modified(va))
vm_page_dirty(m);
}
pmap_invalidate_page(pmap, va);
- TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
@@ -1522,20 +1543,130 @@
pmap_unuse_pt(pmap, va, ptepde);
if (pmap != locked_pmap)
PMAP_UNLOCK(pmap);
- if (allocated_pv == NULL)
- allocated_pv = pv;
- else
- free_pv_entry(pv);
+ free_pv_entry(locked_pmap, pv);
+ }
+ }
+}
+
+
+/*
+ * free the pv_entry back to the free list
+ */
+static void
+free_pv_entry(pmap_t pmap, pv_entry_t pv)
+{
+ vm_page_t m;
+ struct pv_chunk *pc;
+ int idx, field, bit;
+
+ PV_STAT(pv_entry_frees++);
+ PV_STAT(pv_entry_spare++);
+ PV_STAT(pv_entry_count--);
+ pc = pv_to_chunk(pv);
+ idx = pv - &pc->pc_pventry[0];
+ field = idx / 64;
+ bit = idx % 64;
+ pc->pc_map[field] |= 1ul << bit;
+ /* move to head of list */
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
+ pc->pc_map[2] != PC_FREE2)
+ return;
+ PV_STAT(pv_entry_spare -= _NPCPV);
+ PV_STAT(pc_chunk_count--);
+ PV_STAT(pc_chunk_frees++);
+ /* entire chunk is free, return it */
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ vm_page_lock_queues();
+ vm_page_free(m);
+ vm_page_unlock_queues();
+}
+
+/*
+ * get a new pv_entry, allocating a block from the system
+ * when needed.
+ */
+static pv_entry_t
+get_pv_entry(pmap_t pmap, int try)
+{
+ static const struct timeval printinterval = { 60, 0 };
+ static struct timeval lastprint;
+ static vm_pindex_t colour;
+ int bit, field;
+ pv_entry_t pv;
+ struct pv_chunk *pc;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ PV_STAT(pv_entry_allocs++);
+ PV_STAT(pv_entry_count++);
+ if (pv_entry_count > pv_entry_high_water)
+ pagedaemon_wakeup();
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ if (pc != NULL) {
+ for (field = 0; field < _NPCM; field++) {
+ if (pc->pc_map[field]) {
+ bit = bsfq(pc->pc_map[field]);
+ break;
+ }
+ }
+ if (field < _NPCM) {
+ pv = &pc->pc_pventry[field * 64 + bit];
+ pc->pc_map[field] &= ~(1ul << bit);
+ /* If this was the last item, move it to tail */
+ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
+ pc->pc_map[2] == 0) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
+ }
+ PV_STAT(pv_entry_spare--);
+ return (pv);
}
}
- if (allocated_pv == NULL) {
- if (vpq == &vm_page_queues[PQ_INACTIVE]) {
- vpq = &vm_page_queues[PQ_ACTIVE];
- goto retry;
+ /* No free items, allocate another chunk */
+ m = vm_page_alloc(NULL, colour, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
+ if (m == NULL) {
+ if (try) {
+ PV_STAT(pc_chunk_tryfail++);
+ return (NULL);
+ }
+ /*
+ * Reclaim pv entries: At first, destroy mappings to inactive
+ * pages. After that, if a pv chunk entry is still needed,
+ * destroy mappings to active pages.
+ */
+ if (ratecheck(&lastprint, &printinterval))
+ printf("Approaching the limit on PV entries, consider "
+ "increasing sysctl vm.pmap.shpgperproc or "
+ "vm.pmap.pv_entry_max\n");
+ PV_STAT(pmap_collect_inactive++);
+ pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
+ m = vm_page_alloc(NULL, colour,
+ VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
+ if (m == NULL) {
+ PV_STAT(pmap_collect_active++);
+ pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]);
+ m = vm_page_alloc(NULL, colour,
+ VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
+ if (m == NULL)
+ panic("get_pv_entry: increase vm.pmap.shpgperproc");
}
- panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
}
- return (allocated_pv);
+ PV_STAT(pc_chunk_count++);
+ PV_STAT(pc_chunk_allocs++);
+ colour++;
+ pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
+ pc->pc_map[1] = PC_FREE1;
+ pc->pc_map[2] = PC_FREE2;
+ pv = &pc->pc_pventry[0];
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ PV_STAT(pv_entry_spare += _NPCPV - 1);
+ return (pv);
}
static void
@@ -1545,24 +1676,16 @@
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
- TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- if (pmap == pv->pv_pmap && va == pv->pv_va)
- break;
- }
- } else {
- TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
- if (va == pv->pv_va)
- break;
- }
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ if (pmap == PV_PMAP(pv) && va == pv->pv_va)
+ break;
}
KASSERT(pv != NULL, ("pmap_remove_entry: pv not found"));
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
if (TAILQ_EMPTY(&m->md.pv_list))
vm_page_flag_clear(m, PG_WRITEABLE);
- TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
- free_pv_entry(pv);
+ free_pv_entry(pmap, pv);
}
/*
@@ -1574,13 +1697,10 @@
{
pv_entry_t pv;
- pv = get_pv_entry(pmap);
- pv->pv_va = va;
- pv->pv_pmap = pmap;
-
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
+ pv = get_pv_entry(pmap, FALSE);
+ pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
}
@@ -1596,11 +1716,8 @@
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
- (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
- pv_entry_count++;
+ (pv = get_pv_entry(pmap, TRUE)) != NULL) {
pv->pv_va = va;
- pv->pv_pmap = pmap;
- TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
return (TRUE);
@@ -1791,6 +1908,7 @@
pmap_remove_all(vm_page_t m)
{
register pv_entry_t pv;
+ pmap_t pmap;
pt_entry_t *pte, tpte;
pd_entry_t ptepde;
@@ -1805,12 +1923,13 @@
#endif
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
- PMAP_LOCK(pv->pv_pmap);
- pv->pv_pmap->pm_stats.resident_count--;
- pte = pmap_pte_pde(pv->pv_pmap, pv->pv_va, &ptepde);
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pmap->pm_stats.resident_count--;
+ pte = pmap_pte_pde(pmap, pv->pv_va, &ptepde);
tpte = pte_load_clear(pte);
if (tpte & PG_W)
- pv->pv_pmap->pm_stats.wired_count--;
+ pmap->pm_stats.wired_count--;
if (tpte & PG_A)
vm_page_flag_set(m, PG_REFERENCED);
@@ -1824,13 +1943,12 @@
if (pmap_track_modified(pv->pv_va))
vm_page_dirty(m);
}
- pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
- TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
+ pmap_invalidate_page(pmap, pv->pv_va);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count--;
- pmap_unuse_pt(pv->pv_pmap, pv->pv_va, ptepde);
- PMAP_UNLOCK(pv->pv_pmap);
- free_pv_entry(pv);
+ pmap_unuse_pt(pmap, pv->pv_va, ptepde);
+ PMAP_UNLOCK(pmap);
+ free_pv_entry(pmap, pv);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
@@ -2584,7 +2702,7 @@
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
- if (pv->pv_pmap == pmap) {
+ if (PV_PMAP(pv) == pmap) {
return TRUE;
}
loops++;
@@ -2594,7 +2712,6 @@
return (FALSE);
}
-#define PMAP_REMOVE_PAGES_CURPROC_ONLY
/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code
@@ -2604,79 +2721,95 @@
* in the case of running down an entire address space.
*/
void
-pmap_remove_pages(pmap, sva, eva)
- pmap_t pmap;
- vm_offset_t sva, eva;
+pmap_remove_pages(pmap_t pmap)
{
pt_entry_t *pte, tpte;
vm_page_t m;
- pv_entry_t pv, npv;
+ pv_entry_t pv;
+ struct pv_chunk *pc, *npc;
+ int field, idx;
+ int64_t bit;
+ uint64_t inuse, bitmask;
+ int allfree;
-#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
printf("warning: pmap_remove_pages called with non-current pmap\n");
return;
}
-#endif
vm_page_lock_queues();
PMAP_LOCK(pmap);
- for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
+ TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
+ allfree = 1;
+ for (field = 0; field < _NPCM; field++) {
+ inuse = (~(pc->pc_map[field])) & pc_freemask[field];
+ while (inuse != 0) {
+ bit = bsfq(inuse);
+ bitmask = 1UL << bit;
+ idx = field * 64 + bit;
+ pv = &pc->pc_pventry[idx];
+ inuse &= ~bitmask;
- if (pv->pv_va >= eva || pv->pv_va < sva) {
- npv = TAILQ_NEXT(pv, pv_plist);
- continue;
- }
+ pte = vtopte(pv->pv_va);
+ tpte = *pte;
-#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
- pte = vtopte(pv->pv_va);
-#else
- pte = pmap_pte(pmap, pv->pv_va);
-#endif
- tpte = *pte;
-
- if (tpte == 0) {
- printf("TPTE at %p IS ZERO @ VA %08lx\n",
- pte, pv->pv_va);
- panic("bad pte");
- }
+ if (tpte == 0) {
+ printf(
+ "TPTE at %p IS ZERO @ VA %08lx\n",
+ pte, pv->pv_va);
+ panic("bad pte");
+ }
/*
* We cannot remove wired pages from a process' mapping at this time
*/
- if (tpte & PG_W) {
- npv = TAILQ_NEXT(pv, pv_plist);
- continue;
- }
+ if (tpte & PG_W) {
+ allfree = 0;
+ continue;
+ }
+
+ m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
+ KASSERT(m->phys_addr == (tpte & PG_FRAME),
+ ("vm_page_t %p phys_addr mismatch %016jx %016jx",
+ m, (uintmax_t)m->phys_addr,
+ (uintmax_t)tpte));
- m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
- KASSERT(m->phys_addr == (tpte & PG_FRAME),
- ("vm_page_t %p phys_addr mismatch %016jx %016jx",
- m, (uintmax_t)m->phys_addr, (uintmax_t)tpte));
+ KASSERT(m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad tpte %#jx",
+ (uintmax_t)tpte));
- KASSERT(m < &vm_page_array[vm_page_array_size],
- ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
+ pmap->pm_stats.resident_count--;
- pmap->pm_stats.resident_count--;
+ pte_clear(pte);
- pte_clear(pte);
+ /*
+ * Update the vm_page_t clean/reference bits.
+ */
+ if (tpte & PG_M)
+ vm_page_dirty(m);
- /*
- * Update the vm_page_t clean and reference bits.
- */
- if (tpte & PG_M) {
- vm_page_dirty(m);
+ /* Mark free */
+ PV_STAT(pv_entry_frees++);
+ PV_STAT(pv_entry_spare++);
+ PV_STAT(pv_entry_count--);
+ pc->pc_map[field] |= bitmask;
+ m->md.pv_list_count--;
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+ if (TAILQ_EMPTY(&m->md.pv_list))
+ vm_page_flag_clear(m, PG_WRITEABLE);
+ pmap_unuse_pt(pmap, pv->pv_va,
+ *vtopde(pv->pv_va));
+ }
+ }
+ if (allfree) {
+ PV_STAT(pv_entry_spare -= _NPCPV);
+ PV_STAT(pc_chunk_count--);
+ PV_STAT(pc_chunk_frees++);
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+ vm_page_lock_queues();
+ vm_page_free(m);
+ vm_page_unlock_queues();
}
-
- npv = TAILQ_NEXT(pv, pv_plist);
- TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
-
- m->md.pv_list_count--;
- TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
- if (TAILQ_EMPTY(&m->md.pv_list))
- vm_page_flag_clear(m, PG_WRITEABLE);
-
- pmap_unuse_pt(pmap, pv->pv_va, *vtopde(pv->pv_va));
- free_pv_entry(pv);
}
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
@@ -2694,6 +2827,7 @@
{
pv_entry_t pv;
pt_entry_t *pte;
+ pmap_t pmap;
boolean_t rv;
rv = FALSE;
@@ -2709,10 +2843,11 @@
*/
if (!pmap_track_modified(pv->pv_va))
continue;
- PMAP_LOCK(pv->pv_pmap);
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, pv->pv_va);
rv = (*pte & PG_M) != 0;
- PMAP_UNLOCK(pv->pv_pmap);
+ PMAP_UNLOCK(pmap);
if (rv)
break;
}
@@ -2750,6 +2885,7 @@
pmap_clear_ptes(vm_page_t m, long bit)
{
register pv_entry_t pv;
+ pmap_t pmap;
pt_entry_t pbits, *pte;
if ((m->flags & PG_FICTITIOUS) ||
@@ -2770,8 +2906,9 @@
continue;
}
- PMAP_LOCK(pv->pv_pmap);
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, pv->pv_va);
retry:
pbits = *pte;
if (pbits & bit) {
@@ -2785,9 +2922,9 @@
} else {
atomic_clear_long(pte, bit);
}
- pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
+ pmap_invalidate_page(pmap, pv->pv_va);
}
- PMAP_UNLOCK(pv->pv_pmap);
+ PMAP_UNLOCK(pmap);
}
if (bit == PG_RW)
vm_page_flag_clear(m, PG_WRITEABLE);
@@ -2826,6 +2963,7 @@
pmap_ts_referenced(vm_page_t m)
{
register pv_entry_t pv, pvf, pvn;
+ pmap_t pmap;
pt_entry_t *pte;
pt_entry_t v;
int rtval = 0;
@@ -2848,20 +2986,21 @@
if (!pmap_track_modified(pv->pv_va))
continue;
- PMAP_LOCK(pv->pv_pmap);
- pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pte = pmap_pte(pmap, pv->pv_va);
if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
atomic_clear_long(pte, PG_A);
- pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list