svn commit: r323677 - in stable/11/sys: kern sparc64/sparc64 vm
Mark Johnston
markj at FreeBSD.org
Sun Sep 17 15:45:40 UTC 2017
Author: markj
Date: Sun Sep 17 15:45:39 2017
New Revision: 323677
URL: https://svnweb.freebsd.org/changeset/base/323677
Log:
MFC r322405, r322406:
Modify vm_page_grab_pages() to handle VM_ALLOC_NOWAIT, use it in
sendfile_swapin().
Modified:
stable/11/sys/kern/kern_sendfile.c
stable/11/sys/kern/vfs_bio.c
stable/11/sys/sparc64/sparc64/pmap.c
stable/11/sys/vm/vm_glue.c
stable/11/sys/vm/vm_page.c
stable/11/sys/vm/vm_page.h
Directory Properties:
stable/11/ (props changed)
Modified: stable/11/sys/kern/kern_sendfile.c
==============================================================================
--- stable/11/sys/kern/kern_sendfile.c Sun Sep 17 15:40:12 2017 (r323676)
+++ stable/11/sys/kern/kern_sendfile.c Sun Sep 17 15:45:39 2017 (r323677)
@@ -308,7 +308,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, o
int npages, int rhpages, int flags)
{
vm_page_t *pa = sfio->pa;
- int nios;
+ int grabbed, nios;
nios = 0;
flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
@@ -318,14 +318,14 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, o
* only required pages. Readahead pages are dealt with later.
*/
VM_OBJECT_WLOCK(obj);
- for (int i = 0; i < npages; i++) {
- pa[i] = vm_page_grab(obj, OFF_TO_IDX(vmoff(i, off)),
- VM_ALLOC_WIRED | VM_ALLOC_NORMAL | flags);
- if (pa[i] == NULL) {
- npages = i;
- rhpages = 0;
- break;
- }
+
+ grabbed = vm_page_grab_pages(obj, OFF_TO_IDX(off),
+ VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
+ if (grabbed < npages) {
+ for (int i = grabbed; i < npages; i++)
+ pa[i] = NULL;
+ npages = grabbed;
+ rhpages = 0;
}
for (int i = 0; i < npages;) {
Modified: stable/11/sys/kern/vfs_bio.c
==============================================================================
--- stable/11/sys/kern/vfs_bio.c Sun Sep 17 15:40:12 2017 (r323676)
+++ stable/11/sys/kern/vfs_bio.c Sun Sep 17 15:45:39 2017 (r323677)
@@ -2756,7 +2756,7 @@ vfs_vmio_extend(struct buf *bp, int desiredpages, int
* deadlocks once allocbuf() is called after
* pages are vfs_busy_pages().
*/
- vm_page_grab_pages(obj,
+ (void)vm_page_grab_pages(obj,
OFF_TO_IDX(bp->b_offset) + bp->b_npages,
VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
Modified: stable/11/sys/sparc64/sparc64/pmap.c
==============================================================================
--- stable/11/sys/sparc64/sparc64/pmap.c Sun Sep 17 15:40:12 2017 (r323676)
+++ stable/11/sys/sparc64/sparc64/pmap.c Sun Sep 17 15:45:39 2017 (r323677)
@@ -1252,7 +1252,7 @@ pmap_pinit(pmap_t pm)
CPU_ZERO(&pm->pm_active);
VM_OBJECT_WLOCK(pm->pm_tsb_obj);
- vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL |
+ (void)vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO, ma, TSB_PAGES);
VM_OBJECT_WUNLOCK(pm->pm_tsb_obj);
for (i = 0; i < TSB_PAGES; i++)
Modified: stable/11/sys/vm/vm_glue.c
==============================================================================
--- stable/11/sys/vm/vm_glue.c Sun Sep 17 15:40:12 2017 (r323676)
+++ stable/11/sys/vm/vm_glue.c Sun Sep 17 15:45:39 2017 (r323677)
@@ -391,7 +391,7 @@ vm_thread_new(struct thread *td, int pages)
* page of stack.
*/
VM_OBJECT_WLOCK(ksobj);
- vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
+ (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_WIRED, ma, pages);
for (i = 0; i < pages; i++)
ma[i]->valid = VM_PAGE_BITS_ALL;
@@ -568,7 +568,7 @@ vm_thread_swapin(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
VM_OBJECT_WLOCK(ksobj);
- vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma,
+ (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma,
pages);
for (int i = 0; i < pages;) {
int j, a, count, rv;
Modified: stable/11/sys/vm/vm_page.c
==============================================================================
--- stable/11/sys/vm/vm_page.c Sun Sep 17 15:40:12 2017 (r323676)
+++ stable/11/sys/vm/vm_page.c Sun Sep 17 15:45:39 2017 (r323677)
@@ -3147,13 +3147,15 @@ retrylookup:
* optional allocation flags:
* VM_ALLOC_IGN_SBUSY do not sleep on soft busy pages
* VM_ALLOC_NOBUSY do not exclusive busy the page
+ * VM_ALLOC_NOWAIT do not sleep
* VM_ALLOC_SBUSY set page to sbusy state
* VM_ALLOC_WIRED wire the pages
* VM_ALLOC_ZERO zero and validate any invalid pages
*
- * This routine may sleep.
+ * If VM_ALLOC_NOWAIT is not specified, this routine may sleep. Otherwise, it
+ * may return a partial prefix of the requested range.
*/
-void
+int
vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count)
{
@@ -3171,7 +3173,7 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pin
(allocflags & VM_ALLOC_IGN_SBUSY) != 0,
("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
if (count == 0)
- return;
+ return (0);
i = 0;
retrylookup:
m = vm_page_lookup(object, pindex + i);
@@ -3180,6 +3182,8 @@ retrylookup:
sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
vm_page_xbusied(m) : vm_page_busied(m);
if (sleep) {
+ if ((allocflags & VM_ALLOC_NOWAIT) != 0)
+ break;
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
@@ -3207,6 +3211,8 @@ retrylookup:
m = vm_page_alloc(object, pindex + i, (allocflags &
~VM_ALLOC_IGN_SBUSY) | VM_ALLOC_COUNT(count - i));
if (m == NULL) {
+ if ((allocflags & VM_ALLOC_NOWAIT) != 0)
+ break;
VM_OBJECT_WUNLOCK(object);
VM_WAIT;
VM_OBJECT_WLOCK(object);
@@ -3221,6 +3227,7 @@ retrylookup:
ma[i] = m;
m = vm_page_next(m);
}
+ return (i);
}
/*
Modified: stable/11/sys/vm/vm_page.h
==============================================================================
--- stable/11/sys/vm/vm_page.h Sun Sep 17 15:40:12 2017 (r323676)
+++ stable/11/sys/vm/vm_page.h Sun Sep 17 15:45:39 2017 (r323677)
@@ -414,7 +414,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
#define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
#define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
#define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
-#define VM_ALLOC_NOWAIT 0x8000 /* (g) Do not sleep, return NULL */
+#define VM_ALLOC_NOWAIT 0x8000 /* (gp) Do not sleep */
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
@@ -454,7 +454,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_
vm_page_t vm_page_alloc_freelist(int, int);
void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
-void vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
+int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count);
int vm_page_try_to_free (vm_page_t);
void vm_page_deactivate (vm_page_t);
More information about the svn-src-stable
mailing list