svn commit: r226843 - in head/sys: amd64/amd64 dev/xen/balloon
i386/i386 i386/xen kern vm
Alan Cox
alc at FreeBSD.org
Thu Oct 27 16:39:17 UTC 2011
Author: alc
Date: Thu Oct 27 16:39:17 2011
New Revision: 226843
URL: http://svn.freebsd.org/changeset/base/226843
Log:
Eliminate vestiges of page coloring in VM_ALLOC_NOOBJ calls to
vm_page_alloc(). While I'm here, for the sake of consistency, always
specify the allocation class, such as VM_ALLOC_NORMAL, as the first of
the flags.
Modified:
head/sys/amd64/amd64/pmap.c
head/sys/amd64/amd64/uma_machdep.c
head/sys/dev/xen/balloon/balloon.c
head/sys/i386/i386/pmap.c
head/sys/i386/xen/pmap.c
head/sys/kern/vfs_bio.c
head/sys/vm/vm_kern.c
Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/amd64/amd64/pmap.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -1635,7 +1635,6 @@ int
pmap_pinit(pmap_t pmap)
{
vm_page_t pml4pg;
- static vm_pindex_t color;
int i;
PMAP_LOCK_INIT(pmap);
@@ -1643,8 +1642,8 @@ pmap_pinit(pmap_t pmap)
/*
* allocate the page directory page
*/
- while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
- VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
+ while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
VM_WAIT;
pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
@@ -2188,7 +2187,6 @@ get_pv_entry(pmap_t pmap, int try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
- static vm_pindex_t colour;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@@ -2228,7 +2226,7 @@ retry:
}
}
/* No free items, allocate another chunk */
- m = vm_page_alloc(NULL, colour, (pq == &vm_page_queues[PQ_ACTIVE] ?
+ m = vm_page_alloc(NULL, 0, (pq == &vm_page_queues[PQ_ACTIVE] ?
VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED);
if (m == NULL) {
@@ -2255,7 +2253,6 @@ retry:
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
- colour++;
dump_add_page(m->phys_addr);
pc = (void *)PHYS_TO_DMAP(m->phys_addr);
pc->pc_pmap = pmap;
Modified: head/sys/amd64/amd64/uma_machdep.c
==============================================================================
--- head/sys/amd64/amd64/uma_machdep.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/amd64/amd64/uma_machdep.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$");
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
- static vm_pindex_t colour;
vm_page_t m;
vm_paddr_t pa;
void *va;
@@ -50,13 +49,13 @@ uma_small_alloc(uma_zone_t zone, int byt
*flags = UMA_SLAB_PRIV;
if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
- pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
+ pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
else
- pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
+ pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
if (wait & M_ZERO)
pflags |= VM_ALLOC_ZERO;
for (;;) {
- m = vm_page_alloc(NULL, colour++, pflags | VM_ALLOC_NOOBJ);
+ m = vm_page_alloc(NULL, 0, pflags);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);
Modified: head/sys/dev/xen/balloon/balloon.c
==============================================================================
--- head/sys/dev/xen/balloon/balloon.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/dev/xen/balloon/balloon.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -298,8 +298,7 @@ decrease_reservation(unsigned long nr_pa
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
- int color = 0;
- if ((page = vm_page_alloc(NULL, color++,
+ if ((page = vm_page_alloc(NULL, 0,
VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
nr_pages = i;
Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/i386/i386/pmap.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -1720,7 +1720,6 @@ pmap_pinit(pmap_t pmap)
{
vm_page_t m, ptdpg[NPGPTD];
vm_paddr_t pa;
- static int color;
int i;
PMAP_LOCK_INIT(pmap);
@@ -1754,9 +1753,8 @@ pmap_pinit(pmap_t pmap)
* allocate the page directory page(s)
*/
for (i = 0; i < NPGPTD;) {
- m = vm_page_alloc(NULL, color++,
- VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
- VM_ALLOC_ZERO);
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL)
VM_WAIT;
else {
@@ -2274,7 +2272,6 @@ get_pv_entry(pmap_t pmap, int try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
- static vm_pindex_t colour;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@@ -2320,7 +2317,7 @@ retry:
* queues lock. If "pv_vafree" is currently non-empty, it will
* remain non-empty until pmap_ptelist_alloc() completes.
*/
- if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
+ if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, (pq ==
&vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (try) {
@@ -2346,7 +2343,6 @@ retry:
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
- colour++;
pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
pmap_qenter((vm_offset_t)pc, &m, 1);
pc->pc_pmap = pmap;
Modified: head/sys/i386/xen/pmap.c
==============================================================================
--- head/sys/i386/xen/pmap.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/i386/xen/pmap.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -1475,7 +1475,6 @@ pmap_pinit(pmap_t pmap)
{
vm_page_t m, ptdpg[NPGPTD + 1];
int npgptd = NPGPTD + 1;
- static int color;
int i;
#ifdef HAMFISTED_LOCKING
@@ -1507,9 +1506,8 @@ pmap_pinit(pmap_t pmap)
* allocate the page directory page(s)
*/
for (i = 0; i < npgptd;) {
- m = vm_page_alloc(NULL, color++,
- VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
- VM_ALLOC_ZERO);
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if (m == NULL)
VM_WAIT;
else {
@@ -2103,7 +2101,6 @@ get_pv_entry(pmap_t pmap, int try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
- static vm_pindex_t colour;
struct vpgqueues *pq;
int bit, field;
pv_entry_t pv;
@@ -2149,7 +2146,7 @@ retry:
* queues lock. If "pv_vafree" is currently non-empty, it will
* remain non-empty until pmap_ptelist_alloc() completes.
*/
- if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
+ if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, (pq ==
&vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
if (try) {
@@ -2175,7 +2172,6 @@ retry:
}
PV_STAT(pc_chunk_count++);
PV_STAT(pc_chunk_allocs++);
- colour++;
pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
pmap_qenter((vm_offset_t)pc, &m, 1);
if ((m->flags & PG_ZERO) == 0)
Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/kern/vfs_bio.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -3760,10 +3760,9 @@ tryagain:
* could interfere with paging I/O, no matter which
* process we are.
*/
- p = vm_page_alloc(NULL, pg >> PAGE_SHIFT, VM_ALLOC_NOOBJ |
- VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
- VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
- if (!p) {
+ p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
+ if (p == NULL) {
VM_WAIT;
goto tryagain;
}
Modified: head/sys/vm/vm_kern.c
==============================================================================
--- head/sys/vm/vm_kern.c Thu Oct 27 16:22:17 2011 (r226842)
+++ head/sys/vm/vm_kern.c Thu Oct 27 16:39:17 2011 (r226843)
@@ -543,7 +543,7 @@ kmem_init_zero_region(void)
* zeros, while not using much more physical resources.
*/
addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
- m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
More information about the svn-src-head
mailing list