diff --git a/fs/fuse/file.c b/fs/fuse/file.c index dd743aa8950787..a6892a6eac087a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -20,6 +20,8 @@ #include #include +int sb_init_dio_done_wq(struct super_block *sb); + /* * Helper function to initialize fuse_args for OPEN/OPENDIR operations */ @@ -780,6 +782,19 @@ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) return io->bytes < 0 ? io->size : io->bytes; } +static void fuse_aio_invalidate_worker(struct work_struct *work) +{ + struct fuse_io_priv *io = container_of(work, struct fuse_io_priv, work); + struct address_space *mapping = io->iocb->ki_filp->f_mapping; + ssize_t res = fuse_get_res_by_io(io); + pgoff_t start = io->offset >> PAGE_SHIFT; + pgoff_t end = (io->offset + res - 1) >> PAGE_SHIFT; + + invalidate_inode_pages2_range(mapping, start, end); + io->iocb->ki_complete(io->iocb, res); + kref_put(&io->refcnt, fuse_io_release); +} + /* * In case of short read, the caller sets 'pos' to the position of * actual end of fuse request in IO request. Otherwise, if bytes_requested @@ -812,10 +827,11 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) spin_unlock(&io->lock); if (!left && !io->blocking) { + struct inode *inode = file_inode(io->iocb->ki_filp); + struct address_space *mapping = io->iocb->ki_filp->f_mapping; ssize_t res = fuse_get_res_by_io(io); if (res >= 0) { - struct inode *inode = file_inode(io->iocb->ki_filp); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); @@ -824,6 +840,17 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) spin_unlock(&fi->lock); } + if (io->write && res > 0 && mapping->nrpages) { + /* + * As in generic_file_direct_write(), invalidate after the + * write, to invalidate read-ahead cache that may have competed + * with the write. + */ + INIT_WORK(&io->work, fuse_aio_invalidate_worker); + queue_work(inode->i_sb->s_dio_done_wq, &io->work); + return; + } + io->iocb->ki_complete(io->iocb, res); } @@ -1766,15 +1793,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (res > 0) *ppos = pos; - if (res > 0 && write && fopen_direct_io) { - /* - * As in generic_file_direct_write(), invalidate after the - * write, to invalidate read-ahead cache that may have competed - * with the write. - */ - invalidate_inode_pages2_range(mapping, idx_from, idx_to); - } - return res > 0 ? res : err; } EXPORT_SYMBOL_GPL(fuse_direct_io); @@ -1814,6 +1832,8 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); + struct address_space *mapping = inode->i_mapping; + loff_t pos = iocb->ki_pos; ssize_t res; bool exclusive; @@ -1827,6 +1847,16 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) FUSE_DIO_WRITE); fuse_write_update_attr(inode, iocb->ki_pos, res); } + if (res > 0 && mapping->nrpages) { + /* + * As in generic_file_direct_write(), invalidate after + * write, to invalidate read-ahead cache that may have + * with the write. + */ + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, + (pos + res - 1) >> PAGE_SHIFT); + } } fuse_dio_unlock(iocb, exclusive); @@ -3195,6 +3225,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) size_t count = iov_iter_count(iter), shortened = 0; loff_t offset = iocb->ki_pos; struct fuse_io_priv *io; + bool async = ff->fm->fc->async_dio; pos = offset; inode = file->f_mapping->host; @@ -3203,6 +3234,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) return 0; + if ((iov_iter_rw(iter) == WRITE) && async && !inode->i_sb->s_dio_done_wq) { + ret = sb_init_dio_done_wq(inode->i_sb); + if (ret < 0) + return ret; + } + io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); if (!io) return -ENOMEM; @@ -3218,7 +3255,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) * By default, we want to optimize all I/Os with async request * submission to the client filesystem if supported. */ - io->async = ff->fm->fc->async_dio; + io->async = async; io->iocb = iocb; io->blocking = is_sync_kiocb(iocb); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ee08e01a261008..c8af2daf6507f4 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -334,6 +334,7 @@ struct fuse_args_pages { /** The request IO state (for asynchronous processing) */ struct fuse_io_priv { struct kref refcnt; + struct work_struct work; int async; spinlock_t lock; unsigned reqs;