svn commit: r226319 - in stable/9/sys: ia64/ia64 powerpc/booke
sparc64/sparc64 vm
Konstantin Belousov
kib at FreeBSD.org
Wed Oct 12 20:08:26 UTC 2011
Author: kib
Date: Wed Oct 12 20:08:25 2011
New Revision: 226319
URL: http://svn.freebsd.org/changeset/base/226319
Log:
Handle page dirty mask with atomics.
MFC r225838:
Use the explicitly-sized types for the dirty and valid masks.
MFC r225840:
Use the trick of performing the atomic operation on the contained aligned
word to handle the dirty mask updates in vm_page_clear_dirty_mask().
MFC 225841
Remove locking of the vm page queues from several pmaps.
MFC r225843:
Fix grammar.
MFC r225856:
Style nit.
Approved by: re (bz)
Modified:
stable/9/sys/ia64/ia64/pmap.c
stable/9/sys/powerpc/booke/pmap.c
stable/9/sys/sparc64/sparc64/pmap.c
stable/9/sys/vm/vm_fault.c
stable/9/sys/vm/vm_page.c
stable/9/sys/vm/vm_page.h
Directory Properties:
stable/9/sys/ (props changed)
stable/9/sys/amd64/include/xen/ (props changed)
stable/9/sys/boot/ (props changed)
stable/9/sys/boot/i386/efi/ (props changed)
stable/9/sys/boot/ia64/efi/ (props changed)
stable/9/sys/boot/ia64/ski/ (props changed)
stable/9/sys/boot/powerpc/boot1.chrp/ (props changed)
stable/9/sys/boot/powerpc/ofw/ (props changed)
stable/9/sys/cddl/contrib/opensolaris/ (props changed)
stable/9/sys/conf/ (props changed)
stable/9/sys/contrib/dev/acpica/ (props changed)
stable/9/sys/contrib/octeon-sdk/ (props changed)
stable/9/sys/contrib/pf/ (props changed)
stable/9/sys/contrib/x86emu/ (props changed)
Modified: stable/9/sys/ia64/ia64/pmap.c
==============================================================================
--- stable/9/sys/ia64/ia64/pmap.c Wed Oct 12 19:52:23 2011 (r226318)
+++ stable/9/sys/ia64/ia64/pmap.c Wed Oct 12 20:08:25 2011 (r226319)
@@ -1486,7 +1486,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
panic("pmap_protect: unaligned addresses");
- vm_page_lock_queues();
PMAP_LOCK(pmap);
oldpmap = pmap_switch(pmap);
for ( ; sva < eva; sva += PAGE_SIZE) {
@@ -1514,7 +1513,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sv
pmap_pte_prot(pmap, pte, prot);
pmap_invalidate_page(sva);
}
- vm_page_unlock_queues();
pmap_switch(oldpmap);
PMAP_UNLOCK(pmap);
}
Modified: stable/9/sys/powerpc/booke/pmap.c
==============================================================================
--- stable/9/sys/powerpc/booke/pmap.c Wed Oct 12 19:52:23 2011 (r226318)
+++ stable/9/sys/powerpc/booke/pmap.c Wed Oct 12 20:08:25 2011 (r226319)
@@ -1918,7 +1918,6 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap
if (prot & VM_PROT_WRITE)
return;
- vm_page_lock_queues();
PMAP_LOCK(pmap);
for (va = sva; va < eva; va += PAGE_SIZE) {
if ((pte = pte_find(mmu, pmap, va)) != NULL) {
@@ -1941,7 +1940,6 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap
}
}
PMAP_UNLOCK(pmap);
- vm_page_unlock_queues();
}
/*
Modified: stable/9/sys/sparc64/sparc64/pmap.c
==============================================================================
--- stable/9/sys/sparc64/sparc64/pmap.c Wed Oct 12 19:52:23 2011 (r226318)
+++ stable/9/sys/sparc64/sparc64/pmap.c Wed Oct 12 20:08:25 2011 (r226319)
@@ -1423,6 +1423,7 @@ pmap_protect_tte(struct pmap *pm, struct
u_long data;
vm_page_t m;
+ PMAP_LOCK_ASSERT(pm, MA_OWNED);
data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) {
m = PHYS_TO_VM_PAGE(TD_PA(data));
@@ -1451,7 +1452,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva,
if (prot & VM_PROT_WRITE)
return;
- vm_page_lock_queues();
PMAP_LOCK(pm);
if (eva - sva > PMAP_TSB_THRESH) {
tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
@@ -1463,7 +1463,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva,
tlb_range_demap(pm, sva, eva - 1);
}
PMAP_UNLOCK(pm);
- vm_page_unlock_queues();
}
/*
Modified: stable/9/sys/vm/vm_fault.c
==============================================================================
--- stable/9/sys/vm/vm_fault.c Wed Oct 12 19:52:23 2011 (r226318)
+++ stable/9/sys/vm/vm_fault.c Wed Oct 12 20:08:25 2011 (r226319)
@@ -1090,18 +1090,10 @@ vm_fault_quick_hold_pages(vm_map_t map,
* performed through an unmanaged mapping or by a DMA
* operation.
*
- * The object lock is not held here. Therefore, like
- * a pmap operation, the page queues lock may be
- * required in order to call vm_page_dirty(). See
- * vm_page_clear_dirty_mask().
+ * The object lock is not held here.
+ * See vm_page_clear_dirty_mask().
*/
-#if defined(__amd64__) || defined(__i386__) || defined(__ia64__)
vm_page_dirty(*mp);
-#else
- vm_page_lock_queues();
- vm_page_dirty(*mp);
- vm_page_unlock_queues();
-#endif
}
}
if (pmap_failed) {
Modified: stable/9/sys/vm/vm_page.c
==============================================================================
--- stable/9/sys/vm/vm_page.c Wed Oct 12 19:52:23 2011 (r226318)
+++ stable/9/sys/vm/vm_page.c Wed Oct 12 20:08:25 2011 (r226319)
@@ -745,9 +745,9 @@ vm_page_sleep(vm_page_t m, const char *m
*
* Set all bits in the page's dirty field.
*
- * The object containing the specified page must be locked if the call is
- * made from the machine-independent layer. If, however, the call is
- * made from the pmap layer, then the page queues lock may be required.
+ * The object containing the specified page must be locked if the
+ * call is made from the machine-independent layer.
+ *
* See vm_page_clear_dirty_mask().
*/
void
@@ -2339,44 +2339,52 @@ vm_page_set_valid(vm_page_t m, int base,
static __inline void
vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
{
+ uintptr_t addr;
+#if PAGE_SIZE < 16384
+ int shift;
+#endif
/*
* If the object is locked and the page is neither VPO_BUSY nor
* PGA_WRITEABLE, then the page's dirty field cannot possibly be
- * set by a concurrent pmap operation.
+ * set by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0)
m->dirty &= ~pagebits;
else {
-#if defined(__amd64__) || defined(__i386__) || defined(__ia64__)
/*
- * On the aforementioned architectures, the page queues lock
- * is not required by the following read-modify-write
- * operation. The combination of the object's lock and an
- * atomic operation suffice. Moreover, the pmap layer on
- * these architectures can call vm_page_dirty() without
- * holding the page queues lock.
+ * The pmap layer can call vm_page_dirty() without
+ * holding a distinguished lock. The combination of
+ * the object's lock and an atomic operation suffice
+ * to guarantee consistency of the page dirty field.
+ *
+ * For PAGE_SIZE == 32768 case, compiler already
+ * properly aligns the dirty field, so no forcible
+ * alignment is needed. Only require existence of
+ * atomic_clear_64 when page size is 32768.
*/
-#if PAGE_SIZE == 4096
- atomic_clear_char(&m->dirty, pagebits);
-#elif PAGE_SIZE == 8192
- atomic_clear_short(&m->dirty, pagebits);
+ addr = (uintptr_t)&m->dirty;
+#if PAGE_SIZE == 32768
+#error pagebits too short
+ atomic_clear_64((uint64_t *)addr, pagebits);
#elif PAGE_SIZE == 16384
- atomic_clear_int(&m->dirty, pagebits);
-#else
-#error "PAGE_SIZE is not supported."
-#endif
-#else
+ atomic_clear_32((uint32_t *)addr, pagebits);
+#else /* PAGE_SIZE <= 8192 */
/*
- * Otherwise, the page queues lock is required to ensure that
- * a concurrent pmap operation does not set the page's dirty
- * field during the following read-modify-write operation.
+ * Use a trick to perform a 32-bit atomic on the
+ * containing aligned word, to not depend on the existence
+ * of atomic_clear_{8, 16}.
*/
- vm_page_lock_queues();
- m->dirty &= ~pagebits;
- vm_page_unlock_queues();
+ shift = addr & (sizeof(uint32_t) - 1);
+#if BYTE_ORDER == BIG_ENDIAN
+ shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
+#else
+ shift *= NBBY;
#endif
+ addr &= ~(sizeof(uint32_t) - 1);
+ atomic_clear_32((uint32_t *)addr, pagebits << shift);
+#endif /* PAGE_SIZE */
}
}
Modified: stable/9/sys/vm/vm_page.h
==============================================================================
--- stable/9/sys/vm/vm_page.h Wed Oct 12 19:52:23 2011 (r226318)
+++ stable/9/sys/vm/vm_page.h Wed Oct 12 20:08:25 2011 (r226319)
@@ -94,21 +94,21 @@
* object that the page belongs to (O), the pool lock for the page (P),
* or the lock for either the free or paging queues (Q). If a field is
* annotated below with two of these locks, then holding either lock is
- * sufficient for read access, but both locks are required for write
+ * sufficient for read access, but both locks are required for write
* access.
*
- * In contrast, the synchronization of accesses to the page's dirty field
- * is machine dependent (M). In the machine-independent layer, the lock
- * on the object that the page belongs to must be held in order to
- * operate on the field. However, the pmap layer is permitted to set
- * all bits within the field without holding that lock. Therefore, if
- * the underlying architecture does not support atomic read-modify-write
- * operations on the field's type, then the machine-independent layer
- * must also hold the page queues lock when performing read-modify-write
- * operations and the pmap layer must hold the page queues lock when
- * setting the field. In the machine-independent layer, the
- * implementation of read-modify-write operations on the field is
- * encapsulated in vm_page_clear_dirty_mask().
+ * In contrast, the synchronization of accesses to the page's
+ * dirty field is machine dependent (M). In the
+ * machine-independent layer, the lock on the object that the
+ * page belongs to must be held in order to operate on the field.
+ * However, the pmap layer is permitted to set all bits within
+ * the field without holding that lock. If the underlying
+ * architecture does not support atomic read-modify-write
+ * operations on the field's type, then the machine-independent
+ * layer uses a 32-bit atomic on the aligned 32-bit word that
+ * contains the dirty field. In the machine-independent layer,
+ * the implementation of read-modify-write operations on the
+ * field is encapsulated in vm_page_clear_dirty_mask().
*/
TAILQ_HEAD(pglist, vm_page);
@@ -139,17 +139,17 @@ struct vm_page {
/* so, on normal X86 kernels, they must be at least 8 bits wide */
/* In reality, support for 32KB pages is not fully implemented. */
#if PAGE_SIZE == 4096
- u_char valid; /* map of valid DEV_BSIZE chunks (O) */
- u_char dirty; /* map of dirty DEV_BSIZE chunks (M) */
+ uint8_t valid; /* map of valid DEV_BSIZE chunks (O) */
+ uint8_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
#elif PAGE_SIZE == 8192
- u_short valid; /* map of valid DEV_BSIZE chunks (O) */
- u_short dirty; /* map of dirty DEV_BSIZE chunks (M) */
+ uint16_t valid; /* map of valid DEV_BSIZE chunks (O) */
+ uint16_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
#elif PAGE_SIZE == 16384
- u_int valid; /* map of valid DEV_BSIZE chunks (O) */
- u_int dirty; /* map of dirty DEV_BSIZE chunks (M) */
+ uint32_t valid; /* map of valid DEV_BSIZE chunks (O) */
+ uint32_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
#elif PAGE_SIZE == 32768
- u_long valid; /* map of valid DEV_BSIZE chunks (O) */
- u_long dirty; /* map of dirty DEV_BSIZE chunks (M) */
+ uint64_t valid; /* map of valid DEV_BSIZE chunks (O) */
+ uint64_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
#endif
};
More information about the svn-src-stable-9
mailing list