svn commit: r351957 - in stable/12/sys: kern sys
Alan Somers
asomers at FreeBSD.org
Fri Sep 6 19:22:34 UTC 2019
Author: asomers
Date: Fri Sep 6 19:22:33 2019
New Revision: 351957
URL: https://svnweb.freebsd.org/changeset/base/351957
Log:
MFC r350386, r350390
r350386:
Add v_inval_buf_range, like vtruncbuf but for a range of a file
v_inval_buf_range invalidates all buffers within a certain LBA range of a
file. It will be used by fusefs(5). This commit is a partial merge of
r346162, r346606, and r346756 from projects/fuse2.
Reviewed by: kib
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D21032
r350390:
Better comments for vlrureclaim
Sponsored by: The FreeBSD Foundation
Modified:
stable/12/sys/kern/vfs_subr.c
stable/12/sys/sys/vnode.h
Directory Properties:
stable/12/ (props changed)
Modified: stable/12/sys/kern/vfs_subr.c
==============================================================================
--- stable/12/sys/kern/vfs_subr.c Fri Sep 6 19:22:24 2019 (r351956)
+++ stable/12/sys/kern/vfs_subr.c Fri Sep 6 19:22:33 2019 (r351957)
@@ -117,6 +117,8 @@ static void vfs_knl_assert_locked(void *arg);
static void vfs_knl_assert_unlocked(void *arg);
static void vnlru_return_batches(struct vfsops *mnt_op);
static void destroy_vpollinfo(struct vpollinfo *vi);
+static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
+ daddr_t startlbn, daddr_t endlbn);
/*
* These fences are intended for cases where some synchronization is
@@ -944,9 +946,16 @@ vattr_null(struct vattr *vap)
* desirable to reuse such vnodes. These conditions may cause the
* number of vnodes to reach some minimum value regardless of what
* you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
+ *
+ * @param mp Try to reclaim vnodes from this mountpoint
+ * @param reclaim_nc_src Only reclaim directories with outgoing namecache
+ * entries if this argument is strue
+ * @param trigger Only reclaim vnodes with fewer than this many resident
+ * pages.
+ * @return The number of vnodes that were reclaimed.
*/
static int
-vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger)
+vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger)
{
struct vnode *vp;
int count, done, target;
@@ -1235,7 +1244,8 @@ vnlru_proc(void)
{
struct mount *mp, *nmp;
unsigned long onumvnodes;
- int done, force, reclaim_nc_src, trigger, usevnodes;
+ int done, force, trigger, usevnodes;
+ bool reclaim_nc_src;
EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
SHUTDOWN_PRI_FIRST);
@@ -1945,9 +1955,8 @@ int
vtruncbuf(struct vnode *vp, off_t length, int blksize)
{
struct buf *bp, *nbp;
- int anyfreed;
- daddr_t trunclbn;
struct bufobj *bo;
+ daddr_t startlbn;
CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__,
vp, blksize, (uintmax_t)length);
@@ -1955,91 +1964,134 @@ vtruncbuf(struct vnode *vp, off_t length, int blksize)
/*
* Round up to the *next* lbn.
*/
- trunclbn = howmany(length, blksize);
+ startlbn = howmany(length, blksize);
ASSERT_VOP_LOCKED(vp, "vtruncbuf");
-restart:
+
bo = &vp->v_bufobj;
+restart_unlocked:
BO_LOCK(bo);
- anyfreed = 1;
- for (;anyfreed;) {
- anyfreed = 0;
- TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
- if (bp->b_lblkno < trunclbn)
+
+ while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN)
+ ;
+
+ if (length > 0) {
+restartsync:
+ TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
+ if (bp->b_lblkno > 0)
continue;
+ /*
+ * Since we hold the vnode lock this should only
+ * fail if we're racing with the buf daemon.
+ */
if (BUF_LOCK(bp,
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
BO_LOCKPTR(bo)) == ENOLCK)
- goto restart;
+ goto restart_unlocked;
- bremfree(bp);
- bp->b_flags |= (B_INVAL | B_RELBUF);
- bp->b_flags &= ~B_ASYNC;
- brelse(bp);
- anyfreed = 1;
+ VNASSERT((bp->b_flags & B_DELWRI), vp,
+ ("buf(%p) on dirty queue without DELWRI", bp));
+ bremfree(bp);
+ bawrite(bp);
BO_LOCK(bo);
- if (nbp != NULL &&
- (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
- (nbp->b_vp != vp) ||
- (nbp->b_flags & B_DELWRI))) {
- BO_UNLOCK(bo);
- goto restart;
- }
+ goto restartsync;
}
+ }
- TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
- if (bp->b_lblkno < trunclbn)
+ bufobj_wwait(bo, 0, 0);
+ BO_UNLOCK(bo);
+ vnode_pager_setsize(vp, length);
+
+ return (0);
+}
+
+/*
+ * Invalidate the cached pages of a file's buffer within the range of block
+ * numbers [startlbn, endlbn).
+ */
+void
+v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
+ int blksize)
+{
+ struct bufobj *bo;
+ off_t start, end;
+
+ ASSERT_VOP_LOCKED(vp, "v_inval_buf_range");
+
+ start = blksize * startlbn;
+ end = blksize * endlbn;
+
+ bo = &vp->v_bufobj;
+ BO_LOCK(bo);
+ MPASS(blksize == bo->bo_bsize);
+
+ while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN)
+ ;
+
+ BO_UNLOCK(bo);
+ vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1));
+}
+
+static int
+v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
+ daddr_t startlbn, daddr_t endlbn)
+{
+ struct buf *bp, *nbp;
+ bool anyfreed;
+
+ ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
+ ASSERT_BO_LOCKED(bo);
+
+ do {
+ anyfreed = false;
+ TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
+ if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
continue;
if (BUF_LOCK(bp,
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
- BO_LOCKPTR(bo)) == ENOLCK)
- goto restart;
+ BO_LOCKPTR(bo)) == ENOLCK) {
+ BO_LOCK(bo);
+ return (EAGAIN);
+ }
+
bremfree(bp);
- bp->b_flags |= (B_INVAL | B_RELBUF);
+ bp->b_flags |= B_INVAL | B_RELBUF;
bp->b_flags &= ~B_ASYNC;
brelse(bp);
- anyfreed = 1;
+ anyfreed = true;
BO_LOCK(bo);
if (nbp != NULL &&
- (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
- (nbp->b_vp != vp) ||
- (nbp->b_flags & B_DELWRI) == 0)) {
- BO_UNLOCK(bo);
- goto restart;
- }
+ (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
+ nbp->b_vp != vp ||
+ (nbp->b_flags & B_DELWRI) != 0))
+ return (EAGAIN);
}
- }
- if (length > 0) {
-restartsync:
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
- if (bp->b_lblkno > 0)
+ if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
continue;
- /*
- * Since we hold the vnode lock this should only
- * fail if we're racing with the buf daemon.
- */
if (BUF_LOCK(bp,
LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
BO_LOCKPTR(bo)) == ENOLCK) {
- goto restart;
+ BO_LOCK(bo);
+ return (EAGAIN);
}
- VNASSERT((bp->b_flags & B_DELWRI), vp,
- ("buf(%p) on dirty queue without DELWRI", bp));
-
bremfree(bp);
- bawrite(bp);
+ bp->b_flags |= B_INVAL | B_RELBUF;
+ bp->b_flags &= ~B_ASYNC;
+ brelse(bp);
+ anyfreed = true;
+
BO_LOCK(bo);
- goto restartsync;
+ if (nbp != NULL &&
+ (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
+ (nbp->b_vp != vp) ||
+ (nbp->b_flags & B_DELWRI) == 0))
+ return (EAGAIN);
}
- }
-
- bufobj_wwait(bo, 0, 0);
- BO_UNLOCK(bo);
- vnode_pager_setsize(vp, length);
-
+ } while (anyfreed);
return (0);
}
Modified: stable/12/sys/sys/vnode.h
==============================================================================
--- stable/12/sys/sys/vnode.h Fri Sep 6 19:22:24 2019 (r351956)
+++ stable/12/sys/sys/vnode.h Fri Sep 6 19:22:33 2019 (r351957)
@@ -659,6 +659,8 @@ void _vhold(struct vnode *, bool);
void vinactive(struct vnode *, struct thread *);
int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo);
int vtruncbuf(struct vnode *vp, off_t length, int blksize);
+void v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
+ int blksize);
void vunref(struct vnode *);
void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3);
int vrecycle(struct vnode *vp);
More information about the svn-src-all
mailing list