svn commit: r234094 - in stable/8/sys: i386/conf vm
Konstantin Belousov
kib at FreeBSD.org
Tue Apr 10 10:44:42 UTC 2012
Author: kib
Date: Tue Apr 10 10:44:41 2012
New Revision: 234094
URL: http://svn.freebsd.org/changeset/base/234094
Log:
MFC r233100:
In vm_object_page_clean(), do not clean OBJ_MIGHTBEDIRTY object flag
if the filesystem performed short write and we are skipping the page
due to this.
Propogate write error from the pager back to the callers of
vm_pageout_flush(). Report the failure to write a page from the
requested range as the FALSE return value from vm_object_page_clean(),
and propagate it back to msync(2) to return EIO to usermode.
While there, convert the clearobjflags variable in the
vm_object_page_clean() and arguments of the helper functions to
boolean.
PR: kern/165927
Tested by: David Wolfskill
Modified:
stable/8/sys/vm/vm_contig.c
stable/8/sys/vm/vm_map.c
stable/8/sys/vm/vm_mmap.c
stable/8/sys/vm/vm_object.c
stable/8/sys/vm/vm_object.h
stable/8/sys/vm/vm_pageout.c
stable/8/sys/vm/vm_pageout.h
Directory Properties:
stable/8/sys/ (props changed)
stable/8/sys/amd64/include/xen/ (props changed)
stable/8/sys/boot/ (props changed)
stable/8/sys/cddl/contrib/opensolaris/ (props changed)
stable/8/sys/contrib/dev/acpica/ (props changed)
stable/8/sys/contrib/pf/ (props changed)
stable/8/sys/dev/e1000/ (props changed)
stable/8/sys/i386/conf/XENHVM (props changed)
Modified: stable/8/sys/vm/vm_contig.c
==============================================================================
--- stable/8/sys/vm/vm_contig.c Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_contig.c Tue Apr 10 10:44:41 2012 (r234094)
@@ -135,7 +135,8 @@ vm_contig_launder_page(vm_page_t m, vm_p
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
- vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 0, NULL);
+ vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 0,
+ NULL, NULL);
VM_OBJECT_UNLOCK(object);
return (0);
}
Modified: stable/8/sys/vm/vm_map.c
==============================================================================
--- stable/8/sys/vm/vm_map.c Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_map.c Tue Apr 10 10:44:41 2012 (r234094)
@@ -2573,6 +2573,7 @@ vm_map_sync(
vm_object_t object;
vm_ooffset_t offset;
unsigned int last_timestamp;
+ boolean_t failed;
vm_map_lock_read(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -2602,6 +2603,7 @@ vm_map_sync(
if (invalidate)
pmap_remove(map->pmap, start, end);
+ failed = FALSE;
/*
* Make a second pass, cleaning/uncaching pages from the indicated
@@ -2630,7 +2632,8 @@ vm_map_sync(
vm_object_reference(object);
last_timestamp = map->timestamp;
vm_map_unlock_read(map);
- vm_object_sync(object, offset, size, syncio, invalidate);
+ if (!vm_object_sync(object, offset, size, syncio, invalidate))
+ failed = TRUE;
start += size;
vm_object_deallocate(object);
vm_map_lock_read(map);
@@ -2640,7 +2643,7 @@ vm_map_sync(
}
vm_map_unlock_read(map);
- return (KERN_SUCCESS);
+ return (failed ? KERN_FAILURE : KERN_SUCCESS);
}
/*
Modified: stable/8/sys/vm/vm_mmap.c
==============================================================================
--- stable/8/sys/vm/vm_mmap.c Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_mmap.c Tue Apr 10 10:44:41 2012 (r234094)
@@ -487,6 +487,8 @@ msync(td, uap)
return (EINVAL); /* Sun returns ENOMEM? */
case KERN_INVALID_ARGUMENT:
return (EBUSY);
+ case KERN_FAILURE:
+ return (EIO);
default:
return (EINVAL);
}
Modified: stable/8/sys/vm/vm_object.c
==============================================================================
--- stable/8/sys/vm/vm_object.c Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_object.c Tue Apr 10 10:44:41 2012 (r234094)
@@ -105,9 +105,10 @@ SYSCTL_INT(_vm, OID_AUTO, old_msync, CTL
"Use old (insecure) msync behavior");
static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
- int pagerflags, int flags, int *clearobjflags);
+ int pagerflags, int flags, boolean_t *clearobjflags,
+ boolean_t *eio);
static boolean_t vm_object_page_remove_write(vm_page_t p, int flags,
- int *clearobjflags);
+ boolean_t *clearobjflags);
static void vm_object_qcollapse(vm_object_t object);
static void vm_object_vndeallocate(vm_object_t object);
@@ -772,7 +773,7 @@ vm_object_terminate(vm_object_t object)
}
static boolean_t
-vm_object_page_remove_write(vm_page_t p, int flags, int *clearobjflags)
+vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
{
/*
@@ -781,7 +782,7 @@ vm_object_page_remove_write(vm_page_t p,
* cleared in this case so we do not have to set them.
*/
if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
- *clearobjflags = 0;
+ *clearobjflags = FALSE;
return (FALSE);
} else {
pmap_remove_write(p);
@@ -803,20 +804,24 @@ vm_object_page_remove_write(vm_page_t p,
* Odd semantics: if start == end, we clean everything.
*
* The object must be locked.
+ *
+ * Returns FALSE if some page from the range was not written, as
+ * reported by the pager, and TRUE otherwise.
*/
-void
+boolean_t
vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
int flags)
{
vm_page_t np, p;
vm_pindex_t pi, tend;
- int clearobjflags, curgeneration, n, pagerflags;
+ int curgeneration, n, pagerflags;
+ boolean_t clearobjflags, eio, res;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 ||
object->resident_page_count == 0)
- return;
+ return (TRUE);
pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ?
VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
@@ -835,7 +840,8 @@ vm_object_page_clean(vm_object_t object,
* stay dirty so do not mess with the page and do not clear the
* object flags.
*/
- clearobjflags = 1;
+ clearobjflags = TRUE;
+ res = TRUE;
rescan:
curgeneration = object->generation;
@@ -858,7 +864,11 @@ rescan:
continue;
n = vm_object_page_collect_flush(object, p, pagerflags,
- flags, &clearobjflags);
+ flags, &clearobjflags, &eio);
+ if (eio) {
+ res = FALSE;
+ clearobjflags = FALSE;
+ }
if (object->generation != curgeneration)
goto rescan;
@@ -874,8 +884,10 @@ rescan:
* behind, but there is not much we can do there if
* filesystem refuses to write it.
*/
- if (n == 0)
+ if (n == 0) {
n = 1;
+ clearobjflags = FALSE;
+ }
np = vm_page_find_least(object, pi + n);
}
vm_page_unlock_queues();
@@ -886,11 +898,12 @@ rescan:
vm_object_clear_flag(object, OBJ_CLEANING);
if (clearobjflags && start == 0 && tend == object->size)
vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
+ return (res);
}
static int
vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags,
- int flags, int *clearobjflags)
+ int flags, boolean_t *clearobjflags, boolean_t *eio)
{
vm_page_t ma[vm_pageout_page_count], p_first, tp;
int count, i, mreq, runlen;
@@ -921,7 +934,7 @@ vm_object_page_collect_flush(vm_object_t
for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
ma[i] = tp;
- vm_pageout_flush(ma, count, pagerflags, mreq, &runlen);
+ vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio);
return (runlen);
}
@@ -935,17 +948,20 @@ vm_object_page_collect_flush(vm_object_t
* Note: certain anonymous maps, such as MAP_NOSYNC maps,
* may start out with a NULL object.
*/
-void
+boolean_t
vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
boolean_t syncio, boolean_t invalidate)
{
vm_object_t backing_object;
struct vnode *vp;
struct mount *mp;
- int flags, fsync_after;
+ int error, flags, fsync_after;
+ boolean_t res;
if (object == NULL)
- return;
+ return (TRUE);
+ res = TRUE;
+ error = 0;
VM_OBJECT_LOCK(object);
while ((backing_object = object->backing_object) != NULL) {
VM_OBJECT_LOCK(backing_object);
@@ -991,16 +1007,18 @@ vm_object_sync(vm_object_t object, vm_oo
fsync_after = FALSE;
}
VM_OBJECT_LOCK(object);
- vm_object_page_clean(object,
+ res = vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
flags);
VM_OBJECT_UNLOCK(object);
if (fsync_after)
- (void) VOP_FSYNC(vp, MNT_WAIT, curthread);
+ error = VOP_FSYNC(vp, MNT_WAIT, curthread);
VOP_UNLOCK(vp, 0);
VFS_UNLOCK_GIANT(vfslocked);
vn_finished_write(mp);
+ if (error != 0)
+ res = FALSE;
VM_OBJECT_LOCK(object);
}
if ((object->type == OBJT_VNODE ||
@@ -1013,6 +1031,7 @@ vm_object_sync(vm_object_t object, vm_oo
purge ? FALSE : TRUE);
}
VM_OBJECT_UNLOCK(object);
+ return (res);
}
/*
Modified: stable/8/sys/vm/vm_object.h
==============================================================================
--- stable/8/sys/vm/vm_object.h Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_object.h Tue Apr 10 10:44:41 2012 (r234094)
@@ -220,7 +220,7 @@ void vm_object_set_writeable_dirty (vm_o
void vm_object_init (void);
void vm_object_page_cache(vm_object_t object, vm_pindex_t start,
vm_pindex_t end);
-void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
+boolean_t vm_object_page_clean(vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
void vm_object_reference (vm_object_t);
@@ -228,7 +228,7 @@ void vm_object_reference_locked(vm_objec
int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr);
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
void vm_object_split(vm_map_entry_t);
-void vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
+boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
boolean_t);
void vm_object_madvise (vm_object_t, vm_pindex_t, int, int);
#endif /* _KERNEL */
Modified: stable/8/sys/vm/vm_pageout.c
==============================================================================
--- stable/8/sys/vm/vm_pageout.c Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_pageout.c Tue Apr 10 10:44:41 2012 (r234094)
@@ -391,7 +391,8 @@ more:
/*
* we allow reads during pageouts...
*/
- return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL));
+ return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL,
+ NULL));
}
/*
@@ -405,9 +406,12 @@ more:
*
* Returned runlen is the count of pages between mreq and first
* page after mreq with status VM_PAGER_AGAIN.
+ * *eio is set to TRUE if pager returned VM_PAGER_ERROR or VM_PAGER_FAIL
+ * for any page in runlen set.
*/
int
-vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen)
+vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
+ boolean_t *eio)
{
vm_object_t object = mc[0]->object;
int pageout_status[count];
@@ -439,6 +443,8 @@ vm_pageout_flush(vm_page_t *mc, int coun
vm_pager_put_pages(object, mc, count, flags, pageout_status);
runlen = count - mreq;
+ if (eio != NULL)
+ *eio = FALSE;
vm_page_lock_queues();
for (i = 0; i < count; i++) {
vm_page_t mt = mc[i];
@@ -467,6 +473,8 @@ vm_pageout_flush(vm_page_t *mc, int coun
* will try paging out it again later).
*/
vm_page_activate(mt);
+ if (eio != NULL && i >= mreq && i - mreq < runlen)
+ *eio = TRUE;
break;
case VM_PAGER_AGAIN:
if (i >= mreq && i - mreq < runlen)
Modified: stable/8/sys/vm/vm_pageout.h
==============================================================================
--- stable/8/sys/vm/vm_pageout.h Tue Apr 10 09:27:41 2012 (r234093)
+++ stable/8/sys/vm/vm_pageout.h Tue Apr 10 10:44:41 2012 (r234094)
@@ -102,7 +102,7 @@ extern void vm_waitpfault(void);
#ifdef _KERNEL
boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
-int vm_pageout_flush(vm_page_t *, int, int, int, int *);
+int vm_pageout_flush(vm_page_t *, int, int, int, int *, boolean_t *);
void vm_pageout_oom(int shortage);
void vm_contig_grow_cache(int, vm_paddr_t, vm_paddr_t);
#endif
More information about the svn-src-stable-8
mailing list