PERFORCE change 194691 for review
Ilya Putsikau
ilya at FreeBSD.org
Mon Jun 13 15:42:31 UTC 2011
http://p4web.freebsd.org/@@194691?ac=10
Change 194691 by ilya at ilya_triton2011 on 2011/06/13 15:41:32
Format fuse_io like other files
Affected files ...
.. //depot/projects/soc2011/ilya_fuse/fuse_module/fuse_io.c#8 edit
.. //depot/projects/soc2011/ilya_fuse/fuse_module/fuse_io.h#6 edit
Differences ...
==== //depot/projects/soc2011/ilya_fuse/fuse_module/fuse_io.c#8 (text+ko) ====
@@ -53,46 +53,46 @@
fuse_io_filehandle_get(struct vnode *vp, int rdonly,
struct ucred *cred, struct fuse_filehandle **fufhp)
{
- struct fuse_vnode_data *fvdat = VTOFUD(vp);
- struct fuse_filehandle *fufh;
- fufh_type_t fufh_type;
- int err = 0;
+ struct fuse_vnode_data *fvdat = VTOFUD(vp);
+ struct fuse_filehandle *fufh;
+ fufh_type_t fufh_type;
+ int err = 0;
- if (rdonly) {
- fufh_type = FUFH_RDONLY; // FUFH_RDWR will also do
- } else {
- fufh_type = FUFH_WRONLY; // FUFH_RDWR will also do
- }
+ if (rdonly) {
+ fufh_type = FUFH_RDONLY; // FUFH_RDWR will also do
+ } else {
+ fufh_type = FUFH_WRONLY; // FUFH_RDWR will also do
+ }
- fufh = &(fvdat->fufh[fufh_type]);
- if (!(fufh->fufh_flags & FUFH_VALID)) {
- fufh_type = FUFH_RDWR;
- fufh = &(fvdat->fufh[fufh_type]);
- if (!(fufh->fufh_flags & FUFH_VALID)) {
- fufh = NULL;
- } else {
- debug_printf("strategy falling back to FUFH_RDWR ... OK\n");
- }
- }
+ fufh = &(fvdat->fufh[fufh_type]);
+ if (!(fufh->fufh_flags & FUFH_VALID)) {
+ fufh_type = FUFH_RDWR;
+ fufh = &(fvdat->fufh[fufh_type]);
+ if (!(fufh->fufh_flags & FUFH_VALID)) {
+ fufh = NULL;
+ } else {
+ debug_printf("strategy falling back to FUFH_RDWR ... OK\n");
+ }
+ }
- if (fufh == NULL) {
- if (rdonly) {
- fufh_type = FUFH_RDONLY;
- } else {
- fufh_type = FUFH_RDWR;
- }
- err = fuse_filehandle_get(vp, NULL, cred, fufh_type);
- if (!err) {
- fufh = &(fvdat->fufh[fufh_type]);
- debug_printf("STRATEGY: created *new* fufh of type %d\n",
- fufh_type);
- }
- } else {
- debug_printf("STRATEGY: using existing fufh of type %d\n", fufh_type);
- }
+ if (fufh == NULL) {
+ if (rdonly) {
+ fufh_type = FUFH_RDONLY;
+ } else {
+ fufh_type = FUFH_RDWR;
+ }
+ err = fuse_filehandle_get(vp, NULL, cred, fufh_type);
+ if (!err) {
+ fufh = &(fvdat->fufh[fufh_type]);
+ debug_printf("STRATEGY: created *new* fufh of type %d\n",
+ fufh_type);
+ }
+ } else {
+ debug_printf("STRATEGY: using existing fufh of type %d\n", fufh_type);
+ }
- *fufhp = fufh;
- return (err);
+ *fufhp = fufh;
+ return (err);
}
/****************
@@ -106,421 +106,421 @@
fuse_io_dispatch(struct vnode *vp, struct uio *uio, int flag,
struct ucred *cred)
{
- struct fuse_filehandle *fufh;
- struct fuse_io_data fioda;
- int err, directio;
+ struct fuse_filehandle *fufh;
+ struct fuse_io_data fioda;
+ int err, directio;
- err = fuse_io_filehandle_get(vp, (uio->uio_rw == UIO_READ),
- cred, &fufh);
- if (!err)
- return (err);
+ err = fuse_io_filehandle_get(vp, (uio->uio_rw == UIO_READ),
+ cred, &fufh);
+ if (!err)
+ return (err);
- bzero(&fioda, sizeof(fioda));
- fioda.vp = vp;
- fioda.fufh = fufh;
- fioda.uio = uio;
- fioda.cred = cred;
- fioda.td = curthread;
+ bzero(&fioda, sizeof(fioda));
+ fioda.vp = vp;
+ fioda.fufh = fufh;
+ fioda.uio = uio;
+ fioda.cred = cred;
+ fioda.td = curthread;
- /*
- * Ideally, when the daemon asks for direct io at open time, the
- * standard file flag should be set according to this, so that would
- * just change the default mode, which later on could be changed via
- * fcntl(2).
- * But this doesn't work, the O_DIRECT flag gets cleared at some point
- * (don't know where). So to make any use of the Fuse direct_io option,
- * we hardwire it into the file's private data (similarly to Linux,
- * btw.).
- */
- directio = (flag & O_DIRECT);
+ /*
+ * Ideally, when the daemon asks for direct io at open time, the
+ * standard file flag should be set according to this, so that would
+ * just change the default mode, which later on could be changed via
+ * fcntl(2).
+ * But this doesn't work, the O_DIRECT flag gets cleared at some point
+ * (don't know where). So to make any use of the Fuse direct_io option,
+ * we hardwire it into the file's private data (similarly to Linux,
+ * btw.).
+ */
+ directio = (flag & O_DIRECT);
- switch (uio->uio_rw) {
- case UIO_READ:
- fioda.opcode = FUSE_READ;
- fioda.buffeater = fuse_std_buffeater;
+ switch (uio->uio_rw) {
+ case UIO_READ:
+ fioda.opcode = FUSE_READ;
+ fioda.buffeater = fuse_std_buffeater;
- if (directio) {
- DEBUG2G("direct read of vnode %llu via file handle %llu\n",
- VTOILLU(vp), (unsigned long long)fufh->fh_id);
- err = fuse_read_directbackend(&fioda);
- } else {
- DEBUG2G("buffered read of vnode %llu\n", VTOILLU(vp));
- err = fuse_read_biobackend(&fioda);
- }
- break;
- case UIO_WRITE:
- if (directio) {
- DEBUG2G("direct write of vnode %llu via file handle %llu\n",
- VTOILLU(vp), (unsigned long long)fufh->fh_id);
- err = fuse_write_directbackend(&fioda);
- } else {
- DEBUG2G("buffered write of vnode %llu\n", VTOILLU(vp));
- err = fuse_write_biobackend(&fioda);
- }
- break;
- default:
- panic("uninterpreted mode passed to fuse_io_dispatch");
- }
+ if (directio) {
+ DEBUG2G("direct read of vnode %llu via file handle %llu\n",
+ VTOILLU(vp), (unsigned long long)fufh->fh_id);
+ err = fuse_read_directbackend(&fioda);
+ } else {
+ DEBUG2G("buffered read of vnode %llu\n", VTOILLU(vp));
+ err = fuse_read_biobackend(&fioda);
+ }
+ break;
+ case UIO_WRITE:
+ if (directio) {
+ DEBUG2G("direct write of vnode %llu via file handle %llu\n",
+ VTOILLU(vp), (unsigned long long)fufh->fh_id);
+ err = fuse_write_directbackend(&fioda);
+ } else {
+ DEBUG2G("buffered write of vnode %llu\n", VTOILLU(vp));
+ err = fuse_write_biobackend(&fioda);
+ }
+ break;
+ default:
+ panic("uninterpreted mode passed to fuse_io_dispatch");
+ }
- fuse_invalidate_attr(vp);
+ fuse_invalidate_attr(vp);
- return (err);
+ return (err);
}
/* dispatch routine for vnode based I/O */
int
fuse_io_vnode(struct vnode *vp, struct uio *uio,
- int ioflag, struct ucred *cred)
+ int ioflag, struct ucred *cred)
{
- int fflag = (uio->uio_rw == UIO_READ) ? FREAD : FWRITE;
- int err;
+ int fflag = (uio->uio_rw == UIO_READ) ? FREAD : FWRITE;
+ int err;
- if (ioflag & IO_DIRECT)
- fflag |= O_DIRECT;
- if (ioflag & IO_NDELAY)
- fflag |= FNONBLOCK;
- if (ioflag & IO_APPEND)
- fflag |= O_APPEND;
- if (ioflag & IO_ASYNC)
- fflag |= O_ASYNC;
- if (ioflag & IO_SYNC)
- fflag |= O_SYNC;
+ if (ioflag & IO_DIRECT)
+ fflag |= O_DIRECT;
+ if (ioflag & IO_NDELAY)
+ fflag |= FNONBLOCK;
+ if (ioflag & IO_APPEND)
+ fflag |= O_APPEND;
+ if (ioflag & IO_ASYNC)
+ fflag |= O_ASYNC;
+ if (ioflag & IO_SYNC)
+ fflag |= O_SYNC;
- err = fuse_io_dispatch(vp, uio, fflag, cred);
+ err = fuse_io_dispatch(vp, uio, fflag, cred);
- DEBUG("return with %d\n", err);
- return (err);
+ DEBUG("return with %d\n", err);
+ return (err);
}
int
fuse_read_biobackend(struct fuse_io_data *fioda)
{
- struct vnode *vp = fioda->vp;
- struct fuse_filehandle *fufh = fioda->fufh;
- struct uio *uio = fioda->uio;
- enum fuse_opcode op = fioda->opcode;
- fuse_buffeater_t *buffe = fioda->buffeater;
- void *param = fioda->param;
+ struct vnode *vp = fioda->vp;
+ struct fuse_filehandle *fufh = fioda->fufh;
+ struct uio *uio = fioda->uio;
+ enum fuse_opcode op = fioda->opcode;
+ fuse_buffeater_t *buffe = fioda->buffeater;
+ void *param = fioda->param;
- int biosize;
- struct buf *bp;
- daddr_t lbn;
- int bcount;
- int bbcount;
- int err = 0, n = 0, on = 0;
+ int biosize;
+ struct buf *bp;
+ daddr_t lbn;
+ int bcount;
+ int bbcount;
+ int err = 0, n = 0, on = 0;
- if (uio->uio_resid == 0)
- return (0);
+ if (uio->uio_resid == 0)
+ return (0);
- biosize = vp->v_mount->mnt_stat.f_iosize;
- bcount = MIN(MAXBSIZE, biosize);
+ biosize = vp->v_mount->mnt_stat.f_iosize;
+ bcount = MIN(MAXBSIZE, biosize);
- DEBUG2G("entering loop\n");
- do {
- lbn = uio->uio_offset / biosize;
- on = uio->uio_offset & (biosize - 1);
+ DEBUG2G("entering loop\n");
+ do {
+ lbn = uio->uio_offset / biosize;
+ on = uio->uio_offset & (biosize - 1);
- DEBUG2G("biosize %d, lbn %d, on %d\n", biosize, (int)lbn, on);
+ DEBUG2G("biosize %d, lbn %d, on %d\n", biosize, (int)lbn, on);
- /*
- * Obtain the buffer cache block. Figure out the buffer size
- * when we are at EOF. If we are modifying the size of the
- * buffer based on an EOF condition we need to hold
- * nfs_rslock() through obtaining the buffer to prevent
- * a potential writer-appender from messing with n_size.
- * Otherwise we may accidently truncate the buffer and
- * lose dirty data.
- *
- * Note that bcount is *not* DEV_BSIZE aligned.
- */
+ /*
+ * Obtain the buffer cache block. Figure out the buffer size
+ * when we are at EOF. If we are modifying the size of the
+ * buffer based on an EOF condition we need to hold
+ * nfs_rslock() through obtaining the buffer to prevent
+ * a potential writer-appender from messing with n_size.
+ * Otherwise we may accidently truncate the buffer and
+ * lose dirty data.
+ *
+ * Note that bcount is *not* DEV_BSIZE aligned.
+ */
- bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
+ bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
- if (!bp)
- return (EINTR);
+ if (!bp)
+ return (EINTR);
- /*
- * If B_CACHE is not set, we must issue the read. If this
- * fails, we return an error.
- */
+ /*
+ * If B_CACHE is not set, we must issue the read. If this
+ * fails, we return an error.
+ */
- if ((bp->b_flags & B_CACHE) == 0) {
- bp->b_iocmd = BIO_READ;
- vfs_busy_pages(bp, 0);
- err = fuse_io_strategy(vp, bp, fufh, op);
+ if ((bp->b_flags & B_CACHE) == 0) {
+ bp->b_iocmd = BIO_READ;
+ vfs_busy_pages(bp, 0);
+ err = fuse_io_strategy(vp, bp, fufh, op);
#if _DEBUG
- prettyprint(bp->b_data, 48);
- printf("\n");
- prettyprint(bp->b_data + PAGE_SIZE, 48);
- printf("\n");
+ prettyprint(bp->b_data, 48);
+ printf("\n");
+ prettyprint(bp->b_data + PAGE_SIZE, 48);
+ printf("\n");
#endif
- if (err) {
- brelse(bp);
- return (err);
- }
- }
+ if (err) {
+ brelse(bp);
+ return (err);
+ }
+ }
- /*
- * on is the offset into the current bp. Figure out how many
- * bytes we can copy out of the bp. Note that bcount is
- * NOT DEV_BSIZE aligned.
- *
- * Then figure out how many bytes we can copy into the uio.
- */
+ /*
+ * on is the offset into the current bp. Figure out how many
+ * bytes we can copy out of the bp. Note that bcount is
+ * NOT DEV_BSIZE aligned.
+ *
+ * Then figure out how many bytes we can copy into the uio.
+ */
- n = 0;
- /*
- * If we zero pad the buf, bp->b_resid will be 0, so then
- * just ignore it
- */
- bbcount = bcount - bp->b_resid;
- if (on < bbcount)
- n = bbcount - on;
- if (n > 0) {
- DEBUG2G("feeding buffeater with %d bytes of buffer %p, saying %d was asked for\n",
- n, bp->b_data + on, n + (int)bp->b_resid);
+ n = 0;
+ /*
+ * If we zero pad the buf, bp->b_resid will be 0, so then
+ * just ignore it
+ */
+ bbcount = bcount - bp->b_resid;
+ if (on < bbcount)
+ n = bbcount - on;
+ if (n > 0) {
+ DEBUG2G("feeding buffeater with %d bytes of buffer %p, saying %d was asked for\n",
+ n, bp->b_data + on, n + (int)bp->b_resid);
#if 0 && _DEBUG
- prettyprint(bp->b_data + on, n);
- printf("\n");
+ prettyprint(bp->b_data + on, n);
+ printf("\n");
#endif
- err = buffe(uio, n + bp->b_resid, bp->b_data + on, n,
- param);
- }
- brelse(bp);
- DEBUG2G("end of turn, err %d, uio->uio_resid %d, n %d\n",
- err, uio->uio_resid, n);
- } while (err == 0 && uio->uio_resid > 0 && n > 0);
+ err = buffe(uio, n + bp->b_resid, bp->b_data + on, n,
+ param);
+ }
+ brelse(bp);
+ DEBUG2G("end of turn, err %d, uio->uio_resid %d, n %d\n",
+ err, uio->uio_resid, n);
+ } while (err == 0 && uio->uio_resid > 0 && n > 0);
- return ((err == -1) ? 0 : err);
+ return ((err == -1) ? 0 : err);
}
static int
fuse_read_directbackend(struct fuse_io_data *fioda)
{
- struct vnode *vp = fioda->vp;
- struct fuse_filehandle *fufh = fioda->fufh;
- struct uio *uio = fioda->uio;
- struct ucred *cred = fioda->cred;
- struct thread *td = fioda->td;
- enum fuse_opcode op = fioda->opcode;
- fuse_buffeater_t *buffe = fioda->buffeater;
- void *param = fioda->param;
+ struct vnode *vp = fioda->vp;
+ struct fuse_filehandle *fufh = fioda->fufh;
+ struct uio *uio = fioda->uio;
+ struct ucred *cred = fioda->cred;
+ struct thread *td = fioda->td;
+ enum fuse_opcode op = fioda->opcode;
+ fuse_buffeater_t *buffe = fioda->buffeater;
+ void *param = fioda->param;
+
+ struct fuse_dispatcher fdi;
+ struct fuse_read_in *fri;
+ int err = 0;
+
+ if (uio->uio_resid == 0)
+ return (0);
- struct fuse_dispatcher fdi;
- struct fuse_read_in *fri;
- int err = 0;
+ DEBUG("bug daemon for food\n");
- if (uio->uio_resid == 0)
- return (0);
+ fdisp_init(&fdi, 0);
- DEBUG("bug daemon for food\n");
+ /*
+ * XXX In "normal" case we use an intermediate kernel buffer for
+ * transmitting data from daemon's context to ours. Eventually, we should
+ * get rid of this. Anyway, if the target uio lives in sysspace (we are
+ * called from pageops), and the input data doesn't need kernel-side
+ * processing (we are not called from readdir) we can already invoke
+ * an optimized, "peer-to-peer" I/O routine.
+ */
+ if (buffe == fuse_std_buffeater && uio->uio_segflg == UIO_SYSSPACE) {
+ if ((err = fuse_io_p2p(fioda, &fdi)))
+ goto out;
+ else
+ goto done;
+ }
- fdisp_init(&fdi, 0);
+ while (uio->uio_resid > 0) {
+ fdi.iosize = sizeof(*fri);
+ fdisp_make_vp(&fdi, op, vp, td, cred);
+ fri = fdi.indata;
+ fri->fh = fufh->fh_id;
+ fri->offset = uio->uio_offset;
+ fri->size = MIN(uio->uio_resid,
+ fusefs_get_data(vp->v_mount)->max_read);
- /*
- * XXX In "normal" case we use an intermediate kernel buffer for
- * transmitting data from daemon's context to ours. Eventually, we should
- * get rid of this. Anyway, if the target uio lives in sysspace (we are
- * called from pageops), and the input data doesn't need kernel-side
- * processing (we are not called from readdir) we can already invoke
- * an optimized, "peer-to-peer" I/O routine.
- */
- if (buffe == fuse_std_buffeater && uio->uio_segflg == UIO_SYSSPACE) {
- if ((err = fuse_io_p2p(fioda, &fdi)))
- goto out;
- else
- goto done;
- }
+ DEBUG2G("fri->fh %llu, fri->offset %d, fri->size %d\n",
+ (unsigned long long)fri->fh, (int)fri->offset, fri->size);
+ if ((err = fdisp_wait_answ(&fdi)))
+ goto out;
- while (uio->uio_resid > 0) {
- fdi.iosize = sizeof(*fri);
- fdisp_make_vp(&fdi, op, vp, td, cred);
- fri = fdi.indata;
- fri->fh = fufh->fh_id;
- fri->offset = uio->uio_offset;
- fri->size = MIN(uio->uio_resid,
- fusefs_get_data(vp->v_mount)->max_read);
-
- DEBUG2G("fri->fh %llu, fri->offset %d, fri->size %d\n",
- (unsigned long long)fri->fh, (int)fri->offset, fri->size);
- if ((err = fdisp_wait_answ(&fdi)))
- goto out;
-
- DEBUG2G("%d bytes asked for from offset %d, passing on the %d we got\n",
- uio->uio_resid, (int)uio->uio_offset, (int)fdi.iosize);
+ DEBUG2G("%d bytes asked for from offset %d, passing on the %d we got\n",
+ uio->uio_resid, (int)uio->uio_offset, (int)fdi.iosize);
- if ((err = buffe(uio, fri->size, fdi.answ, fdi.iosize, param)))
- break;
- }
+ if ((err = buffe(uio, fri->size, fdi.answ, fdi.iosize, param)))
+ break;
+ }
done:
- fuse_ticket_drop(fdi.tick);
+ fuse_ticket_drop(fdi.tick);
out:
- return ((err == -1) ? 0 : err);
+ return ((err == -1) ? 0 : err);
}
/* direct I/O routine with no intermediate buffer */
static int
fuse_io_p2p(struct fuse_io_data *fioda, struct fuse_dispatcher *fdip)
{
- struct vnode *vp = fioda->vp;
- struct fuse_filehandle *fufh = fioda->fufh;
- struct uio *uio = fioda->uio;
- struct ucred *cred = fioda->cred;
- struct thread *td = fioda->td;
- enum fuse_opcode op;
+ struct vnode *vp = fioda->vp;
+ struct fuse_filehandle *fufh = fioda->fufh;
+ struct uio *uio = fioda->uio;
+ struct ucred *cred = fioda->cred;
+ struct thread *td = fioda->td;
+ enum fuse_opcode op;
- int err = 0;
- int chunksize = 0;
- struct iovec *iov;
- int nmax = (uio->uio_rw == UIO_READ) ?
- fusefs_get_data(vp->v_mount)->max_read :
- fusefs_get_data(vp->v_mount)->max_write;
+ int err = 0;
+ int chunksize = 0;
+ struct iovec *iov;
+ int nmax = (uio->uio_rw == UIO_READ) ?
+ fusefs_get_data(vp->v_mount)->max_read :
+ fusefs_get_data(vp->v_mount)->max_write;
- op = fioda->opcode ?:
- ((uio->uio_rw == UIO_READ) ? FUSE_READ : FUSE_WRITE);
+ op = fioda->opcode ?:
+ ((uio->uio_rw == UIO_READ) ? FUSE_READ : FUSE_WRITE);
- iov = uio->uio_iov;
- while (uio->uio_resid > 0) {
- int transfersize;
+ iov = uio->uio_iov;
+ while (uio->uio_resid > 0) {
+ int transfersize;
- chunksize = MIN(iov->iov_len, nmax);
+ chunksize = MIN(iov->iov_len, nmax);
- if (uio->uio_rw == UIO_READ) {
- struct fuse_read_in *fri;
+ if (uio->uio_rw == UIO_READ) {
+ struct fuse_read_in *fri;
- fdip->iosize = sizeof(struct fuse_read_in);
- fdisp_make_vp(fdip, op, vp, td, cred);
- fri = fdip->indata;
- fri->fh = fufh->fh_id;
- fri->offset = uio->uio_offset;
- fri->size = chunksize;
- fdip->tick->tk_aw_type = FT_A_BUF;
- fdip->tick->tk_aw_bufdata = iov->iov_base;
- } else {
- struct fuse_write_in *fwi;
+ fdip->iosize = sizeof(struct fuse_read_in);
+ fdisp_make_vp(fdip, op, vp, td, cred);
+ fri = fdip->indata;
+ fri->fh = fufh->fh_id;
+ fri->offset = uio->uio_offset;
+ fri->size = chunksize;
+ fdip->tick->tk_aw_type = FT_A_BUF;
+ fdip->tick->tk_aw_bufdata = iov->iov_base;
+ } else {
+ struct fuse_write_in *fwi;
- fdip->iosize = sizeof(struct fuse_write_in) + chunksize;
- fdisp_make_vp(fdip, op, vp, td, cred);
- fwi = fdip->indata;
- fwi->fh = fufh->fh_id;
- fwi->offset = uio->uio_offset;
- fwi->size = chunksize;
- fdip->tick->tk_ms_type = FT_M_BUF;
- fdip->tick->tk_ms_bufdata = iov->iov_base;
- fdip->tick->tk_ms_bufsize = chunksize;
- }
+ fdip->iosize = sizeof(struct fuse_write_in) + chunksize;
+ fdisp_make_vp(fdip, op, vp, td, cred);
+ fwi = fdip->indata;
+ fwi->fh = fufh->fh_id;
+ fwi->offset = uio->uio_offset;
+ fwi->size = chunksize;
+ fdip->tick->tk_ms_type = FT_M_BUF;
+ fdip->tick->tk_ms_bufdata = iov->iov_base;
+ fdip->tick->tk_ms_bufsize = chunksize;
+ }
- DEBUG2G("chunksize %d\n", chunksize);
- fdip->finh->len = (sizeof *fdip->finh) + chunksize;
- err = fdisp_wait_answ(fdip);
+ DEBUG2G("chunksize %d\n", chunksize);
+ fdip->finh->len = (sizeof *fdip->finh) + chunksize;
+ err = fdisp_wait_answ(fdip);
- if (err)
- return (err);
+ if (err)
+ return (err);
- transfersize = (uio->uio_rw == UIO_READ) ?
- fdip->tick->tk_aw_ohead.len - sizeof(struct fuse_out_header) :
- ((struct fuse_write_out *)fdip->answ)->size;
+ transfersize = (uio->uio_rw == UIO_READ) ?
+ fdip->tick->tk_aw_ohead.len - sizeof(struct fuse_out_header) :
+ ((struct fuse_write_out *)fdip->answ)->size;
- uio->uio_resid -= transfersize;
- uio->uio_offset += transfersize;
- iov->iov_len -= transfersize;
- iov->iov_base = (char *)iov->iov_base + transfersize;
+ uio->uio_resid -= transfersize;
+ uio->uio_offset += transfersize;
+ iov->iov_len -= transfersize;
+ iov->iov_base = (char *)iov->iov_base + transfersize;
- if (iov->iov_len == 0) {
- iov++;
- uio->uio_iovcnt--;
- }
- DEBUG2G("resid %d, offset %llu, iovcnt %d, iov_len %d, "
- "transfersize %d\n",
- uio->uio_resid, (long long unsigned)uio->uio_offset,
- uio->uio_iovcnt, iov->iov_len, transfersize);
+ if (iov->iov_len == 0) {
+ iov++;
+ uio->uio_iovcnt--;
+ }
+ DEBUG2G("resid %d, offset %llu, iovcnt %d, iov_len %d, "
+ "transfersize %d\n",
+ uio->uio_resid, (long long unsigned)uio->uio_offset,
+ uio->uio_iovcnt, iov->iov_len, transfersize);
- if (transfersize < chunksize)
- break;
- }
+ if (transfersize < chunksize)
+ break;
+ }
- return (0);
+ return (0);
}
/* Simple standard way for transmitting input */
static int
fuse_std_buffeater(struct uio *uio, size_t reqsize, void *buf, size_t bufsize, void *param)
{
- int err;
+ int err;
- if ((err = uiomove(buf, MIN(reqsize, bufsize), uio)))
- return (err);
+ if ((err = uiomove(buf, MIN(reqsize, bufsize), uio)))
+ return (err);
- if (bufsize < reqsize)
- return (-1);
+ if (bufsize < reqsize)
+ return (-1);
- return (0);
+ return (0);
}
static int
fuse_write_directbackend(struct fuse_io_data *fioda)
{
- struct vnode *vp = fioda->vp;
- uint64_t fh_id = fioda->fufh->fh_id;
- struct uio *uio = fioda->uio;
- struct ucred *cred = fioda->cred;
- struct thread *td = fioda->td;
+ struct vnode *vp = fioda->vp;
+ uint64_t fh_id = fioda->fufh->fh_id;
+ struct uio *uio = fioda->uio;
+ struct ucred *cred = fioda->cred;
+ struct thread *td = fioda->td;
+
+ size_t chunksize;
+ int diff;
+ struct fuse_write_in *fwi;
+ struct fuse_dispatcher fdi;
+ int err = 0;
+
+ if (! uio->uio_resid)
+ return (0);
- size_t chunksize;
- int diff;
- struct fuse_write_in *fwi;
- struct fuse_dispatcher fdi;
- int err = 0;
+ fdisp_init(&fdi, 0);
- if (! uio->uio_resid)
- return (0);
+ if (uio->uio_segflg == UIO_SYSSPACE) {
+ if ((err = fuse_io_p2p(fioda, &fdi)))
+ return (err);
+ else
+ goto out;
+ }
- fdisp_init(&fdi, 0);
+ while (uio->uio_resid > 0) {
+ chunksize = MIN(uio->uio_resid,
+ fusefs_get_data(vp->v_mount)->max_write);
- if (uio->uio_segflg == UIO_SYSSPACE) {
- if ((err = fuse_io_p2p(fioda, &fdi)))
- return (err);
- else
- goto out;
- }
+ fdi.iosize = sizeof(*fwi) + chunksize;
+ fdisp_make_vp(&fdi, FUSE_WRITE, vp, td, cred);
- while (uio->uio_resid > 0) {
- chunksize = MIN(uio->uio_resid,
- fusefs_get_data(vp->v_mount)->max_write);
+ fwi = fdi.indata;
+ fwi->fh = fh_id;
+ fwi->offset = uio->uio_offset;
+ fwi->size = chunksize;
- fdi.iosize = sizeof(*fwi) + chunksize;
- fdisp_make_vp(&fdi, FUSE_WRITE, vp, td, cred);
-
- fwi = fdi.indata;
- fwi->fh = fh_id;
- fwi->offset = uio->uio_offset;
- fwi->size = chunksize;
+ if ((err = uiomove((char *)fdi.indata + sizeof(*fwi),
+ chunksize, uio)))
+ break;
- if ((err = uiomove((char *)fdi.indata + sizeof(*fwi),
- chunksize, uio)))
- break;
-
- if ((err = fdisp_wait_answ(&fdi)))
- return(err);
+ if ((err = fdisp_wait_answ(&fdi)))
+ return(err);
- diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size;
- if (diff < 0) {
- err = EINVAL;
- break;
- }
+ diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size;
+ if (diff < 0) {
+ err = EINVAL;
+ break;
+ }
- uio->uio_resid += diff;
- uio->uio_offset -= diff;
- }
+ uio->uio_resid += diff;
+ uio->uio_offset -= diff;
+ }
out:
- fuse_ticket_drop(fdi.tick);
+ fuse_ticket_drop(fdi.tick);
- return (err);
+ return (err);
}
/*
@@ -529,416 +529,416 @@
static int
fuse_write_biobackend(struct fuse_io_data *fioda)
{
- struct vnode *vp = fioda->vp;
- struct uio *uio = fioda->uio;
- struct ucred *cred = fioda->cred;
+ struct vnode *vp = fioda->vp;
+ struct uio *uio = fioda->uio;
+ struct ucred *cred = fioda->cred;
+
+ int biosize;
- int biosize;
+ struct buf *bp;
+ daddr_t lbn;
+ int bcount;
+ int n, on, err = 0;
+ vm_ooffset_t fsize = vp->v_object->un_pager.vnp.vnp_size;
- struct buf *bp;
- daddr_t lbn;
- int bcount;
- int n, on, err = 0;
- vm_ooffset_t fsize = vp->v_object->un_pager.vnp.vnp_size;
+ DEBUG2G("fsize %lld\n", (long long int)fsize);
- DEBUG2G("fsize %lld\n", (long long int)fsize);
+ biosize = vp->v_mount->mnt_stat.f_iosize;
- biosize = vp->v_mount->mnt_stat.f_iosize;
+ /*
+ * Find all of this file's B_NEEDCOMMIT buffers. If our writes
+ * would exceed the local maximum per-file write commit size when
+ * combined with those, we must decide whether to flush,
+ * go synchronous, or return err. We don't bother checking
+ * IO_UNIT -- we just make all writes atomic anyway, as there's
+ * no point optimizing for something that really won't ever happen.
+ */
+ do {
+ lbn = uio->uio_offset / biosize;
+ on = uio->uio_offset & (biosize-1);
+ n = MIN((unsigned)(biosize - on), uio->uio_resid);
- /*
- * Find all of this file's B_NEEDCOMMIT buffers. If our writes
- * would exceed the local maximum per-file write commit size when
- * combined with those, we must decide whether to flush,
- * go synchronous, or return err. We don't bother checking
- * IO_UNIT -- we just make all writes atomic anyway, as there's
- * no point optimizing for something that really won't ever happen.
- */
- do {
- lbn = uio->uio_offset / biosize;
- on = uio->uio_offset & (biosize-1);
- n = MIN((unsigned)(biosize - on), uio->uio_resid);
-
- DEBUG2G("lbn %d, on %d, n %d, uio offset %d, uio resid %d\n",
- (int)lbn, on, n, (int)uio->uio_offset, uio->uio_resid);
+ DEBUG2G("lbn %d, on %d, n %d, uio offset %d, uio resid %d\n",
+ (int)lbn, on, n, (int)uio->uio_offset, uio->uio_resid);
again:
- /*
- * Handle direct append and file extension cases, calculate
- * unaligned buffer size.
- */
+ /*
+ * Handle direct append and file extension cases, calculate
+ * unaligned buffer size.
+ */
- if (uio->uio_offset == fsize && n) {
- /*
- * Get the buffer (in its pre-append state to maintain
- * B_CACHE if it was previously set). Resize the
- * nfsnode after we have locked the buffer to prevent
- * readers from reading garbage.
- */
- bcount = on;
- DEBUG("getting block from OS, bcount %d\n", bcount);
- bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
+ if (uio->uio_offset == fsize && n) {
+ /*
+ * Get the buffer (in its pre-append state to maintain
+ * B_CACHE if it was previously set). Resize the
+ * nfsnode after we have locked the buffer to prevent
+ * readers from reading garbage.
+ */
+ bcount = on;
+ DEBUG("getting block from OS, bcount %d\n", bcount);
+ bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
- if (bp != NULL) {
- long save;
+ if (bp != NULL) {
+ long save;
- fsize = uio->uio_offset + n;
- vnode_pager_setsize(vp, fsize);
+ fsize = uio->uio_offset + n;
+ vnode_pager_setsize(vp, fsize);
- save = bp->b_flags & B_CACHE;
- bcount += n;
- allocbuf(bp, bcount);
- bp->b_flags |= save;
- }
- } else {
- /*
- * Obtain the locked cache block first, and then
- * adjust the file's size as appropriate.
- */
- bcount = on + n;
- if ((off_t)lbn * biosize + bcount < fsize) {
- if ((off_t)(lbn + 1) * biosize < fsize)
- bcount = biosize;
- else
- bcount = fsize - (off_t)lbn * biosize;
- }
- DEBUG("getting block from OS, bcount %d\n", bcount);
- bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
- if (uio->uio_offset + n > fsize) {
- fsize = uio->uio_offset + n;
- vnode_pager_setsize(vp, fsize);
- }
- }
+ save = bp->b_flags & B_CACHE;
+ bcount += n;
+ allocbuf(bp, bcount);
+ bp->b_flags |= save;
+ }
+ } else {
+ /*
+ * Obtain the locked cache block first, and then
+ * adjust the file's size as appropriate.
+ */
+ bcount = on + n;
+ if ((off_t)lbn * biosize + bcount < fsize) {
+ if ((off_t)(lbn + 1) * biosize < fsize)
+ bcount = biosize;
+ else
+ bcount = fsize - (off_t)lbn * biosize;
+ }
+ DEBUG("getting block from OS, bcount %d\n", bcount);
+ bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
+ if (uio->uio_offset + n > fsize) {
+ fsize = uio->uio_offset + n;
+ vnode_pager_setsize(vp, fsize);
+ }
+ }
- if (!bp) {
- err = EINTR;
- break;
- }
+ if (!bp) {
+ err = EINTR;
+ break;
+ }
- /*
- * Issue a READ if B_CACHE is not set. In special-append
- * mode, B_CACHE is based on the buffer prior to the write
- * op and is typically set, avoiding the read. If a read
- * is required in special append mode, the server will
- * probably send us a short-read since we extended the file
- * on our end, resulting in b_resid == 0 and, thusly,
- * B_CACHE getting set.
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list