1/* 2 FUSE: Filesystem in Userspace 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> 4 5 This program can be distributed under the terms of the GNU GPL. 6 See the file COPYING. 7*/ 8 9#include "fuse_i.h" 10 11#include <linux/pagemap.h> 12#include <linux/slab.h> 13#include <linux/kernel.h> 14#include <linux/sched.h> 15#include <linux/module.h> 16#include <linux/compat.h> 17#include <linux/swap.h> 18#include <linux/falloc.h> 19#include <linux/uio.h> 20 21static const struct file_operations fuse_direct_io_file_operations; 22 23static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 24 int opcode, struct fuse_open_out *outargp) 25{ 26 struct fuse_open_in inarg; 27 FUSE_ARGS(args); 28 29 memset(&inarg, 0, sizeof(inarg)); 30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); 31 if (!fc->atomic_o_trunc) 32 inarg.flags &= ~O_TRUNC; 33 args.in.h.opcode = opcode; 34 args.in.h.nodeid = nodeid; 35 args.in.numargs = 1; 36 args.in.args[0].size = sizeof(inarg); 37 args.in.args[0].value = &inarg; 38 args.out.numargs = 1; 39 args.out.args[0].size = sizeof(*outargp); 40 args.out.args[0].value = outargp; 41 42 return fuse_simple_request(fc, &args); 43} 44 45struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) 46{ 47 struct fuse_file *ff; 48 49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 50 if (unlikely(!ff)) 51 return NULL; 52 53 ff->fc = fc; 54 ff->reserved_req = fuse_request_alloc(0); 55 if (unlikely(!ff->reserved_req)) { 56 kfree(ff); 57 return NULL; 58 } 59 60 INIT_LIST_HEAD(&ff->write_entry); 61 atomic_set(&ff->count, 0); 62 RB_CLEAR_NODE(&ff->polled_node); 63 init_waitqueue_head(&ff->poll_wait); 64 65 spin_lock(&fc->lock); 66 ff->kh = ++fc->khctr; 67 spin_unlock(&fc->lock); 68 69 return ff; 70} 71 72void fuse_file_free(struct fuse_file *ff) 73{ 74 fuse_request_free(ff->reserved_req); 75 kfree(ff); 76} 77 78struct fuse_file *fuse_file_get(struct fuse_file *ff) 79{ 80 atomic_inc(&ff->count); 81 return ff; 82} 83 84static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 85{ 86 iput(req->misc.release.inode); 87} 88 89static void fuse_file_put(struct fuse_file *ff, bool sync) 90{ 91 if (atomic_dec_and_test(&ff->count)) { 92 struct fuse_req *req = ff->reserved_req; 93 94 if (ff->fc->no_open) { 95 /* 96 * Drop the release request when client does not 97 * implement 'open' 98 */ 99 req->background = 0; 100 iput(req->misc.release.inode); 101 fuse_put_request(ff->fc, req); 102 } else if (sync) { 103 req->background = 0; 104 fuse_request_send(ff->fc, req); 105 iput(req->misc.release.inode); 106 fuse_put_request(ff->fc, req); 107 } else { 108 req->end = fuse_release_end; 109 req->background = 1; 110 fuse_request_send_background(ff->fc, req); 111 } 112 kfree(ff); 113 } 114} 115 116int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, 117 bool isdir) 118{ 119 struct fuse_file *ff; 120 int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 121 122 ff = fuse_file_alloc(fc); 123 if (!ff) 124 return -ENOMEM; 125 126 ff->fh = 0; 127 ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */ 128 if (!fc->no_open || isdir) { 129 struct fuse_open_out outarg; 130 int err; 131 132 err = fuse_send_open(fc, nodeid, file, opcode, &outarg); 133 if (!err) { 134 ff->fh = outarg.fh; 135 ff->open_flags = outarg.open_flags; 136 137 } else if (err != -ENOSYS || isdir) { 138 fuse_file_free(ff); 139 return err; 140 } else { 141 fc->no_open = 1; 142 } 143 } 144 145 if (isdir) 146 ff->open_flags &= ~FOPEN_DIRECT_IO; 147 148 ff->nodeid = nodeid; 149 file->private_data = fuse_file_get(ff); 150 151 return 0; 152} 153EXPORT_SYMBOL_GPL(fuse_do_open); 154 155static void fuse_link_write_file(struct file *file) 156{ 157 struct inode *inode = file_inode(file); 158 struct fuse_conn *fc = get_fuse_conn(inode); 159 struct fuse_inode *fi = get_fuse_inode(inode); 160 struct fuse_file *ff = file->private_data; 161 /* 162 * file may be written through mmap, so chain it onto the 163 * inodes's write_file list 164 */ 165 spin_lock(&fc->lock); 166 if (list_empty(&ff->write_entry)) 167 list_add(&ff->write_entry, &fi->write_files); 168 spin_unlock(&fc->lock); 169} 170 171void fuse_finish_open(struct inode *inode, struct file *file) 172{ 173 struct fuse_file *ff = file->private_data; 174 struct fuse_conn *fc = get_fuse_conn(inode); 175 176 if (ff->open_flags & FOPEN_DIRECT_IO) 177 file->f_op = &fuse_direct_io_file_operations; 178 if (!(ff->open_flags & FOPEN_KEEP_CACHE)) 179 invalidate_inode_pages2(inode->i_mapping); 180 if (ff->open_flags & FOPEN_NONSEEKABLE) 181 nonseekable_open(inode, file); 182 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { 183 struct fuse_inode *fi = get_fuse_inode(inode); 184 185 spin_lock(&fc->lock); 186 fi->attr_version = ++fc->attr_version; 187 i_size_write(inode, 0); 188 spin_unlock(&fc->lock); 189 fuse_invalidate_attr(inode); 190 if (fc->writeback_cache) 191 file_update_time(file); 192 } 193 if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) 194 fuse_link_write_file(file); 195} 196 197int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 198{ 199 struct fuse_conn *fc = get_fuse_conn(inode); 200 int err; 201 bool lock_inode = (file->f_flags & O_TRUNC) && 202 fc->atomic_o_trunc && 203 fc->writeback_cache; 204 205 err = generic_file_open(inode, file); 206 if (err) 207 return err; 208 209 if (lock_inode) 210 mutex_lock(&inode->i_mutex); 211 212 err = fuse_do_open(fc, get_node_id(inode), file, isdir); 213 214 if (!err) 215 fuse_finish_open(inode, file); 216 217 if (lock_inode) 218 mutex_unlock(&inode->i_mutex); 219 220 return err; 221} 222 223static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) 224{ 225 struct fuse_conn *fc = ff->fc; 226 struct fuse_req *req = ff->reserved_req; 227 struct fuse_release_in *inarg = &req->misc.release.in; 228 229 spin_lock(&fc->lock); 230 list_del(&ff->write_entry); 231 if (!RB_EMPTY_NODE(&ff->polled_node)) 232 rb_erase(&ff->polled_node, &fc->polled_files); 233 spin_unlock(&fc->lock); 234 235 wake_up_interruptible_all(&ff->poll_wait); 236 237 inarg->fh = ff->fh; 238 inarg->flags = flags; 239 req->in.h.opcode = opcode; 240 req->in.h.nodeid = ff->nodeid; 241 req->in.numargs = 1; 242 req->in.args[0].size = sizeof(struct fuse_release_in); 243 req->in.args[0].value = inarg; 244} 245 246void fuse_release_common(struct file *file, int opcode) 247{ 248 struct fuse_file *ff; 249 struct fuse_req *req; 250 251 ff = file->private_data; 252 if (unlikely(!ff)) 253 return; 254 255 req = ff->reserved_req; 256 fuse_prepare_release(ff, file->f_flags, opcode); 257 258 if (ff->flock) { 259 struct fuse_release_in *inarg = &req->misc.release.in; 260 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; 261 inarg->lock_owner = fuse_lock_owner_id(ff->fc, 262 (fl_owner_t) file); 263 } 264 /* Hold inode until release is finished */ 265 req->misc.release.inode = igrab(file_inode(file)); 266 267 /* 268 * Normally this will send the RELEASE request, however if 269 * some asynchronous READ or WRITE requests are outstanding, 270 * the sending will be delayed. 271 * 272 * Make the release synchronous if this is a fuseblk mount, 273 * synchronous RELEASE is allowed (and desirable) in this case 274 * because the server can be trusted not to screw up. 275 */ 276 fuse_file_put(ff, ff->fc->destroy_req != NULL); 277} 278 279static int fuse_open(struct inode *inode, struct file *file) 280{ 281 return fuse_open_common(inode, file, false); 282} 283 284static int fuse_release(struct inode *inode, struct file *file) 285{ 286 struct fuse_conn *fc = get_fuse_conn(inode); 287 288 /* see fuse_vma_close() for !writeback_cache case */ 289 if (fc->writeback_cache) 290 write_inode_now(inode, 1); 291 292 fuse_release_common(file, FUSE_RELEASE); 293 294 /* return value is ignored by VFS */ 295 return 0; 296} 297 298void fuse_sync_release(struct fuse_file *ff, int flags) 299{ 300 WARN_ON(atomic_read(&ff->count) > 1); 301 fuse_prepare_release(ff, flags, FUSE_RELEASE); 302 ff->reserved_req->force = 1; 303 ff->reserved_req->background = 0; 304 fuse_request_send(ff->fc, ff->reserved_req); 305 fuse_put_request(ff->fc, ff->reserved_req); 306 kfree(ff); 307} 308EXPORT_SYMBOL_GPL(fuse_sync_release); 309 310/* 311 * Scramble the ID space with XTEA, so that the value of the files_struct 312 * pointer is not exposed to userspace. 313 */ 314u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) 315{ 316 u32 *k = fc->scramble_key; 317 u64 v = (unsigned long) id; 318 u32 v0 = v; 319 u32 v1 = v >> 32; 320 u32 sum = 0; 321 int i; 322 323 for (i = 0; i < 32; i++) { 324 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); 325 sum += 0x9E3779B9; 326 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); 327 } 328 329 return (u64) v0 + ((u64) v1 << 32); 330} 331 332/* 333 * Check if any page in a range is under writeback 334 * 335 * This is currently done by walking the list of writepage requests 336 * for the inode, which can be pretty inefficient. 337 */ 338static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, 339 pgoff_t idx_to) 340{ 341 struct fuse_conn *fc = get_fuse_conn(inode); 342 struct fuse_inode *fi = get_fuse_inode(inode); 343 struct fuse_req *req; 344 bool found = false; 345 346 spin_lock(&fc->lock); 347 list_for_each_entry(req, &fi->writepages, writepages_entry) { 348 pgoff_t curr_index; 349 350 BUG_ON(req->inode != inode); 351 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 352 if (idx_from < curr_index + req->num_pages && 353 curr_index <= idx_to) { 354 found = true; 355 break; 356 } 357 } 358 spin_unlock(&fc->lock); 359 360 return found; 361} 362 363static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) 364{ 365 return fuse_range_is_writeback(inode, index, index); 366} 367 368/* 369 * Wait for page writeback to be completed. 370 * 371 * Since fuse doesn't rely on the VM writeback tracking, this has to 372 * use some other means. 373 */ 374static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) 375{ 376 struct fuse_inode *fi = get_fuse_inode(inode); 377 378 wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); 379 return 0; 380} 381 382/* 383 * Wait for all pending writepages on the inode to finish. 384 * 385 * This is currently done by blocking further writes with FUSE_NOWRITE 386 * and waiting for all sent writes to complete. 387 * 388 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage 389 * could conflict with truncation. 390 */ 391static void fuse_sync_writes(struct inode *inode) 392{ 393 fuse_set_nowrite(inode); 394 fuse_release_nowrite(inode); 395} 396 397static int fuse_flush(struct file *file, fl_owner_t id) 398{ 399 struct inode *inode = file_inode(file); 400 struct fuse_conn *fc = get_fuse_conn(inode); 401 struct fuse_file *ff = file->private_data; 402 struct fuse_req *req; 403 struct fuse_flush_in inarg; 404 int err; 405 406 if (is_bad_inode(inode)) 407 return -EIO; 408 409 if (fc->no_flush) 410 return 0; 411 412 err = write_inode_now(inode, 1); 413 if (err) 414 return err; 415 416 mutex_lock(&inode->i_mutex); 417 fuse_sync_writes(inode); 418 mutex_unlock(&inode->i_mutex); 419 420 req = fuse_get_req_nofail_nopages(fc, file); 421 memset(&inarg, 0, sizeof(inarg)); 422 inarg.fh = ff->fh; 423 inarg.lock_owner = fuse_lock_owner_id(fc, id); 424 req->in.h.opcode = FUSE_FLUSH; 425 req->in.h.nodeid = get_node_id(inode); 426 req->in.numargs = 1; 427 req->in.args[0].size = sizeof(inarg); 428 req->in.args[0].value = &inarg; 429 req->force = 1; 430 fuse_request_send(fc, req); 431 err = req->out.h.error; 432 fuse_put_request(fc, req); 433 if (err == -ENOSYS) { 434 fc->no_flush = 1; 435 err = 0; 436 } 437 return err; 438} 439 440int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 441 int datasync, int isdir) 442{ 443 struct inode *inode = file->f_mapping->host; 444 struct fuse_conn *fc = get_fuse_conn(inode); 445 struct fuse_file *ff = file->private_data; 446 FUSE_ARGS(args); 447 struct fuse_fsync_in inarg; 448 int err; 449 450 if (is_bad_inode(inode)) 451 return -EIO; 452 453 mutex_lock(&inode->i_mutex); 454 455 /* 456 * Start writeback against all dirty pages of the inode, then 457 * wait for all outstanding writes, before sending the FSYNC 458 * request. 459 */ 460 err = filemap_write_and_wait_range(inode->i_mapping, start, end); 461 if (err) 462 goto out; 463 464 fuse_sync_writes(inode); 465 err = sync_inode_metadata(inode, 1); 466 if (err) 467 goto out; 468 469 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 470 goto out; 471 472 memset(&inarg, 0, sizeof(inarg)); 473 inarg.fh = ff->fh; 474 inarg.fsync_flags = datasync ? 1 : 0; 475 args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 476 args.in.h.nodeid = get_node_id(inode); 477 args.in.numargs = 1; 478 args.in.args[0].size = sizeof(inarg); 479 args.in.args[0].value = &inarg; 480 err = fuse_simple_request(fc, &args); 481 if (err == -ENOSYS) { 482 if (isdir) 483 fc->no_fsyncdir = 1; 484 else 485 fc->no_fsync = 1; 486 err = 0; 487 } 488out: 489 mutex_unlock(&inode->i_mutex); 490 return err; 491} 492 493static int fuse_fsync(struct file *file, loff_t start, loff_t end, 494 int datasync) 495{ 496 return fuse_fsync_common(file, start, end, datasync, 0); 497} 498 499void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 500 size_t count, int opcode) 501{ 502 struct fuse_read_in *inarg = &req->misc.read.in; 503 struct fuse_file *ff = file->private_data; 504 505 inarg->fh = ff->fh; 506 inarg->offset = pos; 507 inarg->size = count; 508 inarg->flags = file->f_flags; 509 req->in.h.opcode = opcode; 510 req->in.h.nodeid = ff->nodeid; 511 req->in.numargs = 1; 512 req->in.args[0].size = sizeof(struct fuse_read_in); 513 req->in.args[0].value = inarg; 514 req->out.argvar = 1; 515 req->out.numargs = 1; 516 req->out.args[0].size = count; 517} 518 519static void fuse_release_user_pages(struct fuse_req *req, int write) 520{ 521 unsigned i; 522 523 for (i = 0; i < req->num_pages; i++) { 524 struct page *page = req->pages[i]; 525 if (write) 526 set_page_dirty_lock(page); 527 put_page(page); 528 } 529} 530 531static void fuse_io_release(struct kref *kref) 532{ 533 kfree(container_of(kref, struct fuse_io_priv, refcnt)); 534} 535 536static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io) 537{ 538 if (io->err) 539 return io->err; 540 541 if (io->bytes >= 0 && io->write) 542 return -EIO; 543 544 return io->bytes < 0 ? io->size : io->bytes; 545} 546 547/** 548 * In case of short read, the caller sets 'pos' to the position of 549 * actual end of fuse request in IO request. Otherwise, if bytes_requested 550 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. 551 * 552 * An example: 553 * User requested DIO read of 64K. It was splitted into two 32K fuse requests, 554 * both submitted asynchronously. The first of them was ACKed by userspace as 555 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The 556 * second request was ACKed as short, e.g. only 1K was read, resulting in 557 * pos == 33K. 558 * 559 * Thus, when all fuse requests are completed, the minimal non-negative 'pos' 560 * will be equal to the length of the longest contiguous fragment of 561 * transferred data starting from the beginning of IO request. 562 */ 563static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) 564{ 565 bool is_sync = is_sync_kiocb(io->iocb); 566 int left; 567 568 spin_lock(&io->lock); 569 if (err) 570 io->err = io->err ? : err; 571 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) 572 io->bytes = pos; 573 574 left = --io->reqs; 575 if (!left && is_sync) 576 complete(io->done); 577 spin_unlock(&io->lock); 578 579 if (!left && !is_sync) { 580 ssize_t res = fuse_get_res_by_io(io); 581 582 if (res >= 0) { 583 struct inode *inode = file_inode(io->iocb->ki_filp); 584 struct fuse_conn *fc = get_fuse_conn(inode); 585 struct fuse_inode *fi = get_fuse_inode(inode); 586 587 spin_lock(&fc->lock); 588 fi->attr_version = ++fc->attr_version; 589 spin_unlock(&fc->lock); 590 } 591 592 io->iocb->ki_complete(io->iocb, res, 0); 593 } 594 595 kref_put(&io->refcnt, fuse_io_release); 596} 597 598static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) 599{ 600 struct fuse_io_priv *io = req->io; 601 ssize_t pos = -1; 602 603 fuse_release_user_pages(req, !io->write); 604 605 if (io->write) { 606 if (req->misc.write.in.size != req->misc.write.out.size) 607 pos = req->misc.write.in.offset - io->offset + 608 req->misc.write.out.size; 609 } else { 610 if (req->misc.read.in.size != req->out.args[0].size) 611 pos = req->misc.read.in.offset - io->offset + 612 req->out.args[0].size; 613 } 614 615 fuse_aio_complete(io, req->out.h.error, pos); 616} 617 618static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, 619 size_t num_bytes, struct fuse_io_priv *io) 620{ 621 spin_lock(&io->lock); 622 kref_get(&io->refcnt); 623 io->size += num_bytes; 624 io->reqs++; 625 spin_unlock(&io->lock); 626 627 req->io = io; 628 req->end = fuse_aio_complete_req; 629 630 __fuse_get_request(req); 631 fuse_request_send_background(fc, req); 632 633 return num_bytes; 634} 635 636static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, 637 loff_t pos, size_t count, fl_owner_t owner) 638{ 639 struct file *file = io->file; 640 struct fuse_file *ff = file->private_data; 641 struct fuse_conn *fc = ff->fc; 642 643 fuse_read_fill(req, file, pos, count, FUSE_READ); 644 if (owner != NULL) { 645 struct fuse_read_in *inarg = &req->misc.read.in; 646 647 inarg->read_flags |= FUSE_READ_LOCKOWNER; 648 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 649 } 650 651 if (io->async) 652 return fuse_async_req_send(fc, req, count, io); 653 654 fuse_request_send(fc, req); 655 return req->out.args[0].size; 656} 657 658static void fuse_read_update_size(struct inode *inode, loff_t size, 659 u64 attr_ver) 660{ 661 struct fuse_conn *fc = get_fuse_conn(inode); 662 struct fuse_inode *fi = get_fuse_inode(inode); 663 664 spin_lock(&fc->lock); 665 if (attr_ver == fi->attr_version && size < inode->i_size && 666 !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { 667 fi->attr_version = ++fc->attr_version; 668 i_size_write(inode, size); 669 } 670 spin_unlock(&fc->lock); 671} 672 673static void fuse_short_read(struct fuse_req *req, struct inode *inode, 674 u64 attr_ver) 675{ 676 size_t num_read = req->out.args[0].size; 677 struct fuse_conn *fc = get_fuse_conn(inode); 678 679 if (fc->writeback_cache) { 680 /* 681 * A hole in a file. Some data after the hole are in page cache, 682 * but have not reached the client fs yet. So, the hole is not 683 * present there. 684 */ 685 int i; 686 int start_idx = num_read >> PAGE_CACHE_SHIFT; 687 size_t off = num_read & (PAGE_CACHE_SIZE - 1); 688 689 for (i = start_idx; i < req->num_pages; i++) { 690 zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); 691 off = 0; 692 } 693 } else { 694 loff_t pos = page_offset(req->pages[0]) + num_read; 695 fuse_read_update_size(inode, pos, attr_ver); 696 } 697} 698 699static int fuse_do_readpage(struct file *file, struct page *page) 700{ 701 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file); 702 struct inode *inode = page->mapping->host; 703 struct fuse_conn *fc = get_fuse_conn(inode); 704 struct fuse_req *req; 705 size_t num_read; 706 loff_t pos = page_offset(page); 707 size_t count = PAGE_CACHE_SIZE; 708 u64 attr_ver; 709 int err; 710 711 /* 712 * Page writeback can extend beyond the lifetime of the 713 * page-cache page, so make sure we read a properly synced 714 * page. 715 */ 716 fuse_wait_on_page_writeback(inode, page->index); 717 718 req = fuse_get_req(fc, 1); 719 if (IS_ERR(req)) 720 return PTR_ERR(req); 721 722 attr_ver = fuse_get_attr_version(fc); 723 724 req->out.page_zeroing = 1; 725 req->out.argpages = 1; 726 req->num_pages = 1; 727 req->pages[0] = page; 728 req->page_descs[0].length = count; 729 num_read = fuse_send_read(req, &io, pos, count, NULL); 730 err = req->out.h.error; 731 732 if (!err) { 733 /* 734 * Short read means EOF. If file size is larger, truncate it 735 */ 736 if (num_read < count) 737 fuse_short_read(req, inode, attr_ver); 738 739 SetPageUptodate(page); 740 } 741 742 fuse_put_request(fc, req); 743 744 return err; 745} 746 747static int fuse_readpage(struct file *file, struct page *page) 748{ 749 struct inode *inode = page->mapping->host; 750 int err; 751 752 err = -EIO; 753 if (is_bad_inode(inode)) 754 goto out; 755 756 err = fuse_do_readpage(file, page); 757 fuse_invalidate_atime(inode); 758 out: 759 unlock_page(page); 760 return err; 761} 762 763static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) 764{ 765 int i; 766 size_t count = req->misc.read.in.size; 767 size_t num_read = req->out.args[0].size; 768 struct address_space *mapping = NULL; 769 770 for (i = 0; mapping == NULL && i < req->num_pages; i++) 771 mapping = req->pages[i]->mapping; 772 773 if (mapping) { 774 struct inode *inode = mapping->host; 775 776 /* 777 * Short read means EOF. If file size is larger, truncate it 778 */ 779 if (!req->out.h.error && num_read < count) 780 fuse_short_read(req, inode, req->misc.read.attr_ver); 781 782 fuse_invalidate_atime(inode); 783 } 784 785 for (i = 0; i < req->num_pages; i++) { 786 struct page *page = req->pages[i]; 787 if (!req->out.h.error) 788 SetPageUptodate(page); 789 else 790 SetPageError(page); 791 unlock_page(page); 792 page_cache_release(page); 793 } 794 if (req->ff) 795 fuse_file_put(req->ff, false); 796} 797 798static void fuse_send_readpages(struct fuse_req *req, struct file *file) 799{ 800 struct fuse_file *ff = file->private_data; 801 struct fuse_conn *fc = ff->fc; 802 loff_t pos = page_offset(req->pages[0]); 803 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 804 805 req->out.argpages = 1; 806 req->out.page_zeroing = 1; 807 req->out.page_replace = 1; 808 fuse_read_fill(req, file, pos, count, FUSE_READ); 809 req->misc.read.attr_ver = fuse_get_attr_version(fc); 810 if (fc->async_read) { 811 req->ff = fuse_file_get(ff); 812 req->end = fuse_readpages_end; 813 fuse_request_send_background(fc, req); 814 } else { 815 fuse_request_send(fc, req); 816 fuse_readpages_end(fc, req); 817 fuse_put_request(fc, req); 818 } 819} 820 821struct fuse_fill_data { 822 struct fuse_req *req; 823 struct file *file; 824 struct inode *inode; 825 unsigned nr_pages; 826}; 827 828static int fuse_readpages_fill(void *_data, struct page *page) 829{ 830 struct fuse_fill_data *data = _data; 831 struct fuse_req *req = data->req; 832 struct inode *inode = data->inode; 833 struct fuse_conn *fc = get_fuse_conn(inode); 834 835 fuse_wait_on_page_writeback(inode, page->index); 836 837 if (req->num_pages && 838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 840 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 841 int nr_alloc = min_t(unsigned, data->nr_pages, 842 FUSE_MAX_PAGES_PER_REQ); 843 fuse_send_readpages(req, data->file); 844 if (fc->async_read) 845 req = fuse_get_req_for_background(fc, nr_alloc); 846 else 847 req = fuse_get_req(fc, nr_alloc); 848 849 data->req = req; 850 if (IS_ERR(req)) { 851 unlock_page(page); 852 return PTR_ERR(req); 853 } 854 } 855 856 if (WARN_ON(req->num_pages >= req->max_pages)) { 857 fuse_put_request(fc, req); 858 return -EIO; 859 } 860 861 page_cache_get(page); 862 req->pages[req->num_pages] = page; 863 req->page_descs[req->num_pages].length = PAGE_SIZE; 864 req->num_pages++; 865 data->nr_pages--; 866 return 0; 867} 868 869static int fuse_readpages(struct file *file, struct address_space *mapping, 870 struct list_head *pages, unsigned nr_pages) 871{ 872 struct inode *inode = mapping->host; 873 struct fuse_conn *fc = get_fuse_conn(inode); 874 struct fuse_fill_data data; 875 int err; 876 int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); 877 878 err = -EIO; 879 if (is_bad_inode(inode)) 880 goto out; 881 882 data.file = file; 883 data.inode = inode; 884 if (fc->async_read) 885 data.req = fuse_get_req_for_background(fc, nr_alloc); 886 else 887 data.req = fuse_get_req(fc, nr_alloc); 888 data.nr_pages = nr_pages; 889 err = PTR_ERR(data.req); 890 if (IS_ERR(data.req)) 891 goto out; 892 893 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 894 if (!err) { 895 if (data.req->num_pages) 896 fuse_send_readpages(data.req, file); 897 else 898 fuse_put_request(fc, data.req); 899 } 900out: 901 return err; 902} 903 904static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 905{ 906 struct inode *inode = iocb->ki_filp->f_mapping->host; 907 struct fuse_conn *fc = get_fuse_conn(inode); 908 909 /* 910 * In auto invalidate mode, always update attributes on read. 911 * Otherwise, only update if we attempt to read past EOF (to ensure 912 * i_size is up to date). 913 */ 914 if (fc->auto_inval_data || 915 (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) { 916 int err; 917 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); 918 if (err) 919 return err; 920 } 921 922 return generic_file_read_iter(iocb, to); 923} 924 925static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, 926 loff_t pos, size_t count) 927{ 928 struct fuse_write_in *inarg = &req->misc.write.in; 929 struct fuse_write_out *outarg = &req->misc.write.out; 930 931 inarg->fh = ff->fh; 932 inarg->offset = pos; 933 inarg->size = count; 934 req->in.h.opcode = FUSE_WRITE; 935 req->in.h.nodeid = ff->nodeid; 936 req->in.numargs = 2; 937 if (ff->fc->minor < 9) 938 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; 939 else 940 req->in.args[0].size = sizeof(struct fuse_write_in); 941 req->in.args[0].value = inarg; 942 req->in.args[1].size = count; 943 req->out.numargs = 1; 944 req->out.args[0].size = sizeof(struct fuse_write_out); 945 req->out.args[0].value = outarg; 946} 947 948static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, 949 loff_t pos, size_t count, fl_owner_t owner) 950{ 951 struct file *file = io->file; 952 struct fuse_file *ff = file->private_data; 953 struct fuse_conn *fc = ff->fc; 954 struct fuse_write_in *inarg = &req->misc.write.in; 955 956 fuse_write_fill(req, ff, pos, count); 957 inarg->flags = file->f_flags; 958 if (owner != NULL) { 959 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 960 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 961 } 962 963 if (io->async) 964 return fuse_async_req_send(fc, req, count, io); 965 966 fuse_request_send(fc, req); 967 return req->misc.write.out.size; 968} 969 970bool fuse_write_update_size(struct inode *inode, loff_t pos) 971{ 972 struct fuse_conn *fc = get_fuse_conn(inode); 973 struct fuse_inode *fi = get_fuse_inode(inode); 974 bool ret = false; 975 976 spin_lock(&fc->lock); 977 fi->attr_version = ++fc->attr_version; 978 if (pos > inode->i_size) { 979 i_size_write(inode, pos); 980 ret = true; 981 } 982 spin_unlock(&fc->lock); 983 984 return ret; 985} 986 987static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, 988 struct inode *inode, loff_t pos, 989 size_t count) 990{ 991 size_t res; 992 unsigned offset; 993 unsigned i; 994 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file); 995 996 for (i = 0; i < req->num_pages; i++) 997 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 998 999 res = fuse_send_write(req, &io, pos, count, NULL); 1000 1001 offset = req->page_descs[0].offset; 1002 count = res; 1003 for (i = 0; i < req->num_pages; i++) { 1004 struct page *page = req->pages[i]; 1005 1006 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) 1007 SetPageUptodate(page); 1008 1009 if (count > PAGE_CACHE_SIZE - offset) 1010 count -= PAGE_CACHE_SIZE - offset; 1011 else 1012 count = 0; 1013 offset = 0; 1014 1015 unlock_page(page); 1016 page_cache_release(page); 1017 } 1018 1019 return res; 1020} 1021 1022static ssize_t fuse_fill_write_pages(struct fuse_req *req, 1023 struct address_space *mapping, 1024 struct iov_iter *ii, loff_t pos) 1025{ 1026 struct fuse_conn *fc = get_fuse_conn(mapping->host); 1027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 1028 size_t count = 0; 1029 int err; 1030 1031 req->in.argpages = 1; 1032 req->page_descs[0].offset = offset; 1033 1034 do { 1035 size_t tmp; 1036 struct page *page; 1037 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1038 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, 1039 iov_iter_count(ii)); 1040 1041 bytes = min_t(size_t, bytes, fc->max_write - count); 1042 1043 again: 1044 err = -EFAULT; 1045 if (iov_iter_fault_in_readable(ii, bytes)) 1046 break; 1047 1048 err = -ENOMEM; 1049 page = grab_cache_page_write_begin(mapping, index, 0); 1050 if (!page) 1051 break; 1052 1053 if (mapping_writably_mapped(mapping)) 1054 flush_dcache_page(page); 1055 1056 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1057 flush_dcache_page(page); 1058 1059 iov_iter_advance(ii, tmp); 1060 if (!tmp) { 1061 unlock_page(page); 1062 page_cache_release(page); 1063 bytes = min(bytes, iov_iter_single_seg_count(ii)); 1064 goto again; 1065 } 1066 1067 err = 0; 1068 req->pages[req->num_pages] = page; 1069 req->page_descs[req->num_pages].length = tmp; 1070 req->num_pages++; 1071 1072 count += tmp; 1073 pos += tmp; 1074 offset += tmp; 1075 if (offset == PAGE_CACHE_SIZE) 1076 offset = 0; 1077 1078 if (!fc->big_writes) 1079 break; 1080 } while (iov_iter_count(ii) && count < fc->max_write && 1081 req->num_pages < req->max_pages && offset == 0); 1082 1083 return count > 0 ? count : err; 1084} 1085 1086static inline unsigned fuse_wr_pages(loff_t pos, size_t len) 1087{ 1088 return min_t(unsigned, 1089 ((pos + len - 1) >> PAGE_CACHE_SHIFT) - 1090 (pos >> PAGE_CACHE_SHIFT) + 1, 1091 FUSE_MAX_PAGES_PER_REQ); 1092} 1093 1094static ssize_t fuse_perform_write(struct file *file, 1095 struct address_space *mapping, 1096 struct iov_iter *ii, loff_t pos) 1097{ 1098 struct inode *inode = mapping->host; 1099 struct fuse_conn *fc = get_fuse_conn(inode); 1100 struct fuse_inode *fi = get_fuse_inode(inode); 1101 int err = 0; 1102 ssize_t res = 0; 1103 1104 if (is_bad_inode(inode)) 1105 return -EIO; 1106 1107 if (inode->i_size < pos + iov_iter_count(ii)) 1108 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1109 1110 do { 1111 struct fuse_req *req; 1112 ssize_t count; 1113 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); 1114 1115 req = fuse_get_req(fc, nr_pages); 1116 if (IS_ERR(req)) { 1117 err = PTR_ERR(req); 1118 break; 1119 } 1120 1121 count = fuse_fill_write_pages(req, mapping, ii, pos); 1122 if (count <= 0) { 1123 err = count; 1124 } else { 1125 size_t num_written; 1126 1127 num_written = fuse_send_write_pages(req, file, inode, 1128 pos, count); 1129 err = req->out.h.error; 1130 if (!err) { 1131 res += num_written; 1132 pos += num_written; 1133 1134 /* break out of the loop on short write */ 1135 if (num_written != count) 1136 err = -EIO; 1137 } 1138 } 1139 fuse_put_request(fc, req); 1140 } while (!err && iov_iter_count(ii)); 1141 1142 if (res > 0) 1143 fuse_write_update_size(inode, pos); 1144 1145 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 1146 fuse_invalidate_attr(inode); 1147 1148 return res > 0 ? res : err; 1149} 1150 1151static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1152{ 1153 struct file *file = iocb->ki_filp; 1154 struct address_space *mapping = file->f_mapping; 1155 ssize_t written = 0; 1156 ssize_t written_buffered = 0; 1157 struct inode *inode = mapping->host; 1158 ssize_t err; 1159 loff_t endbyte = 0; 1160 1161 if (get_fuse_conn(inode)->writeback_cache) { 1162 /* Update size (EOF optimization) and mode (SUID clearing) */ 1163 err = fuse_update_attributes(mapping->host, NULL, file, NULL); 1164 if (err) 1165 return err; 1166 1167 return generic_file_write_iter(iocb, from); 1168 } 1169 1170 mutex_lock(&inode->i_mutex); 1171 1172 /* We can write back this queue in page reclaim */ 1173 current->backing_dev_info = inode_to_bdi(inode); 1174 1175 err = generic_write_checks(iocb, from); 1176 if (err <= 0) 1177 goto out; 1178 1179 err = file_remove_suid(file); 1180 if (err) 1181 goto out; 1182 1183 err = file_update_time(file); 1184 if (err) 1185 goto out; 1186 1187 if (iocb->ki_flags & IOCB_DIRECT) { 1188 loff_t pos = iocb->ki_pos; 1189 written = generic_file_direct_write(iocb, from, pos); 1190 if (written < 0 || !iov_iter_count(from)) 1191 goto out; 1192 1193 pos += written; 1194 1195 written_buffered = fuse_perform_write(file, mapping, from, pos); 1196 if (written_buffered < 0) { 1197 err = written_buffered; 1198 goto out; 1199 } 1200 endbyte = pos + written_buffered - 1; 1201 1202 err = filemap_write_and_wait_range(file->f_mapping, pos, 1203 endbyte); 1204 if (err) 1205 goto out; 1206 1207 invalidate_mapping_pages(file->f_mapping, 1208 pos >> PAGE_CACHE_SHIFT, 1209 endbyte >> PAGE_CACHE_SHIFT); 1210 1211 written += written_buffered; 1212 iocb->ki_pos = pos + written_buffered; 1213 } else { 1214 written = fuse_perform_write(file, mapping, from, iocb->ki_pos); 1215 if (written >= 0) 1216 iocb->ki_pos += written; 1217 } 1218out: 1219 current->backing_dev_info = NULL; 1220 mutex_unlock(&inode->i_mutex); 1221 1222 return written ? written : err; 1223} 1224 1225static inline void fuse_page_descs_length_init(struct fuse_req *req, 1226 unsigned index, unsigned nr_pages) 1227{ 1228 int i; 1229 1230 for (i = index; i < index + nr_pages; i++) 1231 req->page_descs[i].length = PAGE_SIZE - 1232 req->page_descs[i].offset; 1233} 1234 1235static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) 1236{ 1237 return (unsigned long)ii->iov->iov_base + ii->iov_offset; 1238} 1239 1240static inline size_t fuse_get_frag_size(const struct iov_iter *ii, 1241 size_t max_size) 1242{ 1243 return min(iov_iter_single_seg_count(ii), max_size); 1244} 1245 1246static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, 1247 size_t *nbytesp, int write) 1248{ 1249 size_t nbytes = 0; /* # bytes already packed in req */ 1250 1251 /* Special case for kernel I/O: can copy directly into the buffer */ 1252 if (ii->type & ITER_KVEC) { 1253 unsigned long user_addr = fuse_get_user_addr(ii); 1254 size_t frag_size = fuse_get_frag_size(ii, *nbytesp); 1255 1256 if (write) 1257 req->in.args[1].value = (void *) user_addr; 1258 else 1259 req->out.args[0].value = (void *) user_addr; 1260 1261 iov_iter_advance(ii, frag_size); 1262 *nbytesp = frag_size; 1263 return 0; 1264 } 1265 1266 while (nbytes < *nbytesp && req->num_pages < req->max_pages) { 1267 unsigned npages; 1268 size_t start; 1269 ssize_t ret = iov_iter_get_pages(ii, 1270 &req->pages[req->num_pages], 1271 *nbytesp - nbytes, 1272 req->max_pages - req->num_pages, 1273 &start); 1274 if (ret < 0) 1275 return ret; 1276 1277 iov_iter_advance(ii, ret); 1278 nbytes += ret; 1279 1280 ret += start; 1281 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; 1282 1283 req->page_descs[req->num_pages].offset = start; 1284 fuse_page_descs_length_init(req, req->num_pages, npages); 1285 1286 req->num_pages += npages; 1287 req->page_descs[req->num_pages - 1].length -= 1288 (PAGE_SIZE - ret) & (PAGE_SIZE - 1); 1289 } 1290 1291 if (write) 1292 req->in.argpages = 1; 1293 else 1294 req->out.argpages = 1; 1295 1296 *nbytesp = nbytes; 1297 1298 return 0; 1299} 1300 1301static inline int fuse_iter_npages(const struct iov_iter *ii_p) 1302{ 1303 return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ); 1304} 1305 1306ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, 1307 loff_t *ppos, int flags) 1308{ 1309 int write = flags & FUSE_DIO_WRITE; 1310 int cuse = flags & FUSE_DIO_CUSE; 1311 struct file *file = io->file; 1312 struct inode *inode = file->f_mapping->host; 1313 struct fuse_file *ff = file->private_data; 1314 struct fuse_conn *fc = ff->fc; 1315 size_t nmax = write ? fc->max_write : fc->max_read; 1316 loff_t pos = *ppos; 1317 size_t count = iov_iter_count(iter); 1318 pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; 1319 pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; 1320 ssize_t res = 0; 1321 struct fuse_req *req; 1322 1323 if (io->async) 1324 req = fuse_get_req_for_background(fc, fuse_iter_npages(iter)); 1325 else 1326 req = fuse_get_req(fc, fuse_iter_npages(iter)); 1327 if (IS_ERR(req)) 1328 return PTR_ERR(req); 1329 1330 if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { 1331 if (!write) 1332 mutex_lock(&inode->i_mutex); 1333 fuse_sync_writes(inode); 1334 if (!write) 1335 mutex_unlock(&inode->i_mutex); 1336 } 1337 1338 while (count) { 1339 size_t nres; 1340 fl_owner_t owner = current->files; 1341 size_t nbytes = min(count, nmax); 1342 int err = fuse_get_user_pages(req, iter, &nbytes, write); 1343 if (err) { 1344 res = err; 1345 break; 1346 } 1347 1348 if (write) 1349 nres = fuse_send_write(req, io, pos, nbytes, owner); 1350 else 1351 nres = fuse_send_read(req, io, pos, nbytes, owner); 1352 1353 if (!io->async) 1354 fuse_release_user_pages(req, !write); 1355 if (req->out.h.error) { 1356 if (!res) 1357 res = req->out.h.error; 1358 break; 1359 } else if (nres > nbytes) { 1360 res = -EIO; 1361 break; 1362 } 1363 count -= nres; 1364 res += nres; 1365 pos += nres; 1366 if (nres != nbytes) 1367 break; 1368 if (count) { 1369 fuse_put_request(fc, req); 1370 if (io->async) 1371 req = fuse_get_req_for_background(fc, 1372 fuse_iter_npages(iter)); 1373 else 1374 req = fuse_get_req(fc, fuse_iter_npages(iter)); 1375 if (IS_ERR(req)) 1376 break; 1377 } 1378 } 1379 if (!IS_ERR(req)) 1380 fuse_put_request(fc, req); 1381 if (res > 0) 1382 *ppos = pos; 1383 1384 return res; 1385} 1386EXPORT_SYMBOL_GPL(fuse_direct_io); 1387 1388static ssize_t __fuse_direct_read(struct fuse_io_priv *io, 1389 struct iov_iter *iter, 1390 loff_t *ppos) 1391{ 1392 ssize_t res; 1393 struct file *file = io->file; 1394 struct inode *inode = file_inode(file); 1395 1396 if (is_bad_inode(inode)) 1397 return -EIO; 1398 1399 res = fuse_direct_io(io, iter, ppos, 0); 1400 1401 fuse_invalidate_attr(inode); 1402 1403 return res; 1404} 1405 1406static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) 1407{ 1408 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp); 1409 return __fuse_direct_read(&io, to, &iocb->ki_pos); 1410} 1411 1412static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) 1413{ 1414 struct file *file = iocb->ki_filp; 1415 struct inode *inode = file_inode(file); 1416 struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file); 1417 ssize_t res; 1418 1419 if (is_bad_inode(inode)) 1420 return -EIO; 1421 1422 /* Don't allow parallel writes to the same file */ 1423 mutex_lock(&inode->i_mutex); 1424 res = generic_write_checks(iocb, from); 1425 if (res > 0) 1426 res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); 1427 fuse_invalidate_attr(inode); 1428 if (res > 0) 1429 fuse_write_update_size(inode, iocb->ki_pos); 1430 mutex_unlock(&inode->i_mutex); 1431 1432 return res; 1433} 1434 1435static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1436{ 1437 int i; 1438 1439 for (i = 0; i < req->num_pages; i++) 1440 __free_page(req->pages[i]); 1441 1442 if (req->ff) 1443 fuse_file_put(req->ff, false); 1444} 1445 1446static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1447{ 1448 struct inode *inode = req->inode; 1449 struct fuse_inode *fi = get_fuse_inode(inode); 1450 struct backing_dev_info *bdi = inode_to_bdi(inode); 1451 int i; 1452 1453 list_del(&req->writepages_entry); 1454 for (i = 0; i < req->num_pages; i++) { 1455 dec_bdi_stat(bdi, BDI_WRITEBACK); 1456 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); 1457 bdi_writeout_inc(bdi); 1458 } 1459 wake_up(&fi->page_waitq); 1460} 1461 1462/* Called under fc->lock, may release and reacquire it */ 1463static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req, 1464 loff_t size) 1465__releases(fc->lock) 1466__acquires(fc->lock) 1467{ 1468 struct fuse_inode *fi = get_fuse_inode(req->inode); 1469 struct fuse_write_in *inarg = &req->misc.write.in; 1470 __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; 1471 1472 if (!fc->connected) 1473 goto out_free; 1474 1475 if (inarg->offset + data_size <= size) { 1476 inarg->size = data_size; 1477 } else if (inarg->offset < size) { 1478 inarg->size = size - inarg->offset; 1479 } else { 1480 /* Got truncated off completely */ 1481 goto out_free; 1482 } 1483 1484 req->in.args[1].size = inarg->size; 1485 fi->writectr++; 1486 fuse_request_send_background_locked(fc, req); 1487 return; 1488 1489 out_free: 1490 fuse_writepage_finish(fc, req); 1491 spin_unlock(&fc->lock); 1492 fuse_writepage_free(fc, req); 1493 fuse_put_request(fc, req); 1494 spin_lock(&fc->lock); 1495} 1496 1497/* 1498 * If fi->writectr is positive (no truncate or fsync going on) send 1499 * all queued writepage requests. 1500 * 1501 * Called with fc->lock 1502 */ 1503void fuse_flush_writepages(struct inode *inode) 1504__releases(fc->lock) 1505__acquires(fc->lock) 1506{ 1507 struct fuse_conn *fc = get_fuse_conn(inode); 1508 struct fuse_inode *fi = get_fuse_inode(inode); 1509 size_t crop = i_size_read(inode); 1510 struct fuse_req *req; 1511 1512 while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { 1513 req = list_entry(fi->queued_writes.next, struct fuse_req, list); 1514 list_del_init(&req->list); 1515 fuse_send_writepage(fc, req, crop); 1516 } 1517} 1518 1519static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) 1520{ 1521 struct inode *inode = req->inode; 1522 struct fuse_inode *fi = get_fuse_inode(inode); 1523 1524 mapping_set_error(inode->i_mapping, req->out.h.error); 1525 spin_lock(&fc->lock); 1526 while (req->misc.write.next) { 1527 struct fuse_conn *fc = get_fuse_conn(inode); 1528 struct fuse_write_in *inarg = &req->misc.write.in; 1529 struct fuse_req *next = req->misc.write.next; 1530 req->misc.write.next = next->misc.write.next; 1531 next->misc.write.next = NULL; 1532 next->ff = fuse_file_get(req->ff); 1533 list_add(&next->writepages_entry, &fi->writepages); 1534 1535 /* 1536 * Skip fuse_flush_writepages() to make it easy to crop requests 1537 * based on primary request size. 1538 * 1539 * 1st case (trivial): there are no concurrent activities using 1540 * fuse_set/release_nowrite. Then we're on safe side because 1541 * fuse_flush_writepages() would call fuse_send_writepage() 1542 * anyway. 1543 * 1544 * 2nd case: someone called fuse_set_nowrite and it is waiting 1545 * now for completion of all in-flight requests. This happens 1546 * rarely and no more than once per page, so this should be 1547 * okay. 1548 * 1549 * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle 1550 * of fuse_set_nowrite..fuse_release_nowrite section. The fact 1551 * that fuse_set_nowrite returned implies that all in-flight 1552 * requests were completed along with all of their secondary 1553 * requests. Further primary requests are blocked by negative 1554 * writectr. Hence there cannot be any in-flight requests and 1555 * no invocations of fuse_writepage_end() while we're in 1556 * fuse_set_nowrite..fuse_release_nowrite section. 1557 */ 1558 fuse_send_writepage(fc, next, inarg->offset + inarg->size); 1559 } 1560 fi->writectr--; 1561 fuse_writepage_finish(fc, req); 1562 spin_unlock(&fc->lock); 1563 fuse_writepage_free(fc, req); 1564} 1565 1566static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc, 1567 struct fuse_inode *fi) 1568{ 1569 struct fuse_file *ff = NULL; 1570 1571 spin_lock(&fc->lock); 1572 if (!list_empty(&fi->write_files)) { 1573 ff = list_entry(fi->write_files.next, struct fuse_file, 1574 write_entry); 1575 fuse_file_get(ff); 1576 } 1577 spin_unlock(&fc->lock); 1578 1579 return ff; 1580} 1581 1582static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, 1583 struct fuse_inode *fi) 1584{ 1585 struct fuse_file *ff = __fuse_write_file_get(fc, fi); 1586 WARN_ON(!ff); 1587 return ff; 1588} 1589 1590int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) 1591{ 1592 struct fuse_conn *fc = get_fuse_conn(inode); 1593 struct fuse_inode *fi = get_fuse_inode(inode); 1594 struct fuse_file *ff; 1595 int err; 1596 1597 ff = __fuse_write_file_get(fc, fi); 1598 err = fuse_flush_times(inode, ff); 1599 if (ff) 1600 fuse_file_put(ff, 0); 1601 1602 return err; 1603} 1604 1605static int fuse_writepage_locked(struct page *page) 1606{ 1607 struct address_space *mapping = page->mapping; 1608 struct inode *inode = mapping->host; 1609 struct fuse_conn *fc = get_fuse_conn(inode); 1610 struct fuse_inode *fi = get_fuse_inode(inode); 1611 struct fuse_req *req; 1612 struct page *tmp_page; 1613 int error = -ENOMEM; 1614 1615 set_page_writeback(page); 1616 1617 req = fuse_request_alloc_nofs(1); 1618 if (!req) 1619 goto err; 1620 1621 req->background = 1; /* writeback always goes to bg_queue */ 1622 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1623 if (!tmp_page) 1624 goto err_free; 1625 1626 error = -EIO; 1627 req->ff = fuse_write_file_get(fc, fi); 1628 if (!req->ff) 1629 goto err_nofile; 1630 1631 fuse_write_fill(req, req->ff, page_offset(page), 0); 1632 1633 copy_highpage(tmp_page, page); 1634 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1635 req->misc.write.next = NULL; 1636 req->in.argpages = 1; 1637 req->num_pages = 1; 1638 req->pages[0] = tmp_page; 1639 req->page_descs[0].offset = 0; 1640 req->page_descs[0].length = PAGE_SIZE; 1641 req->end = fuse_writepage_end; 1642 req->inode = inode; 1643 1644 inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK); 1645 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1646 1647 spin_lock(&fc->lock); 1648 list_add(&req->writepages_entry, &fi->writepages); 1649 list_add_tail(&req->list, &fi->queued_writes); 1650 fuse_flush_writepages(inode); 1651 spin_unlock(&fc->lock); 1652 1653 end_page_writeback(page); 1654 1655 return 0; 1656 1657err_nofile: 1658 __free_page(tmp_page); 1659err_free: 1660 fuse_request_free(req); 1661err: 1662 end_page_writeback(page); 1663 return error; 1664} 1665 1666static int fuse_writepage(struct page *page, struct writeback_control *wbc) 1667{ 1668 int err; 1669 1670 if (fuse_page_is_writeback(page->mapping->host, page->index)) { 1671 /* 1672 * ->writepages() should be called for sync() and friends. We 1673 * should only get here on direct reclaim and then we are 1674 * allowed to skip a page which is already in flight 1675 */ 1676 WARN_ON(wbc->sync_mode == WB_SYNC_ALL); 1677 1678 redirty_page_for_writepage(wbc, page); 1679 return 0; 1680 } 1681 1682 err = fuse_writepage_locked(page); 1683 unlock_page(page); 1684 1685 return err; 1686} 1687 1688struct fuse_fill_wb_data { 1689 struct fuse_req *req; 1690 struct fuse_file *ff; 1691 struct inode *inode; 1692 struct page **orig_pages; 1693}; 1694 1695static void fuse_writepages_send(struct fuse_fill_wb_data *data) 1696{ 1697 struct fuse_req *req = data->req; 1698 struct inode *inode = data->inode; 1699 struct fuse_conn *fc = get_fuse_conn(inode); 1700 struct fuse_inode *fi = get_fuse_inode(inode); 1701 int num_pages = req->num_pages; 1702 int i; 1703 1704 req->ff = fuse_file_get(data->ff); 1705 spin_lock(&fc->lock); 1706 list_add_tail(&req->list, &fi->queued_writes); 1707 fuse_flush_writepages(inode); 1708 spin_unlock(&fc->lock); 1709 1710 for (i = 0; i < num_pages; i++) 1711 end_page_writeback(data->orig_pages[i]); 1712} 1713 1714static bool fuse_writepage_in_flight(struct fuse_req *new_req, 1715 struct page *page) 1716{ 1717 struct fuse_conn *fc = get_fuse_conn(new_req->inode); 1718 struct fuse_inode *fi = get_fuse_inode(new_req->inode); 1719 struct fuse_req *tmp; 1720 struct fuse_req *old_req; 1721 bool found = false; 1722 pgoff_t curr_index; 1723 1724 BUG_ON(new_req->num_pages != 0); 1725 1726 spin_lock(&fc->lock); 1727 list_del(&new_req->writepages_entry); 1728 list_for_each_entry(old_req, &fi->writepages, writepages_entry) { 1729 BUG_ON(old_req->inode != new_req->inode); 1730 curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1731 if (curr_index <= page->index && 1732 page->index < curr_index + old_req->num_pages) { 1733 found = true; 1734 break; 1735 } 1736 } 1737 if (!found) { 1738 list_add(&new_req->writepages_entry, &fi->writepages); 1739 goto out_unlock; 1740 } 1741 1742 new_req->num_pages = 1; 1743 for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { 1744 BUG_ON(tmp->inode != new_req->inode); 1745 curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; 1746 if (tmp->num_pages == 1 && 1747 curr_index == page->index) { 1748 old_req = tmp; 1749 } 1750 } 1751 1752 if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || 1753 old_req->state == FUSE_REQ_PENDING)) { 1754 struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host); 1755 1756 copy_highpage(old_req->pages[0], page); 1757 spin_unlock(&fc->lock); 1758 1759 dec_bdi_stat(bdi, BDI_WRITEBACK); 1760 dec_zone_page_state(page, NR_WRITEBACK_TEMP); 1761 bdi_writeout_inc(bdi); 1762 fuse_writepage_free(fc, new_req); 1763 fuse_request_free(new_req); 1764 goto out; 1765 } else { 1766 new_req->misc.write.next = old_req->misc.write.next; 1767 old_req->misc.write.next = new_req; 1768 } 1769out_unlock: 1770 spin_unlock(&fc->lock); 1771out: 1772 return found; 1773} 1774 1775static int fuse_writepages_fill(struct page *page, 1776 struct writeback_control *wbc, void *_data) 1777{ 1778 struct fuse_fill_wb_data *data = _data; 1779 struct fuse_req *req = data->req; 1780 struct inode *inode = data->inode; 1781 struct fuse_conn *fc = get_fuse_conn(inode); 1782 struct page *tmp_page; 1783 bool is_writeback; 1784 int err; 1785 1786 if (!data->ff) { 1787 err = -EIO; 1788 data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); 1789 if (!data->ff) 1790 goto out_unlock; 1791 } 1792 1793 /* 1794 * Being under writeback is unlikely but possible. For example direct 1795 * read to an mmaped fuse file will set the page dirty twice; once when 1796 * the pages are faulted with get_user_pages(), and then after the read 1797 * completed. 1798 */ 1799 is_writeback = fuse_page_is_writeback(inode, page->index); 1800 1801 if (req && req->num_pages && 1802 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || 1803 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || 1804 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { 1805 fuse_writepages_send(data); 1806 data->req = NULL; 1807 } 1808 err = -ENOMEM; 1809 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1810 if (!tmp_page) 1811 goto out_unlock; 1812 1813 /* 1814 * The page must not be redirtied until the writeout is completed 1815 * (i.e. userspace has sent a reply to the write request). Otherwise 1816 * there could be more than one temporary page instance for each real 1817 * page. 1818 * 1819 * This is ensured by holding the page lock in page_mkwrite() while 1820 * checking fuse_page_is_writeback(). We already hold the page lock 1821 * since clear_page_dirty_for_io() and keep it held until we add the 1822 * request to the fi->writepages list and increment req->num_pages. 1823 * After this fuse_page_is_writeback() will indicate that the page is 1824 * under writeback, so we can release the page lock. 1825 */ 1826 if (data->req == NULL) { 1827 struct fuse_inode *fi = get_fuse_inode(inode); 1828 1829 err = -ENOMEM; 1830 req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); 1831 if (!req) { 1832 __free_page(tmp_page); 1833 goto out_unlock; 1834 } 1835 1836 fuse_write_fill(req, data->ff, page_offset(page), 0); 1837 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1838 req->misc.write.next = NULL; 1839 req->in.argpages = 1; 1840 req->background = 1; 1841 req->num_pages = 0; 1842 req->end = fuse_writepage_end; 1843 req->inode = inode; 1844 1845 spin_lock(&fc->lock); 1846 list_add(&req->writepages_entry, &fi->writepages); 1847 spin_unlock(&fc->lock); 1848 1849 data->req = req; 1850 } 1851 set_page_writeback(page); 1852 1853 copy_highpage(tmp_page, page); 1854 req->pages[req->num_pages] = tmp_page; 1855 req->page_descs[req->num_pages].offset = 0; 1856 req->page_descs[req->num_pages].length = PAGE_SIZE; 1857 1858 inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK); 1859 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1860 1861 err = 0; 1862 if (is_writeback && fuse_writepage_in_flight(req, page)) { 1863 end_page_writeback(page); 1864 data->req = NULL; 1865 goto out_unlock; 1866 } 1867 data->orig_pages[req->num_pages] = page; 1868 1869 /* 1870 * Protected by fc->lock against concurrent access by 1871 * fuse_page_is_writeback(). 1872 */ 1873 spin_lock(&fc->lock); 1874 req->num_pages++; 1875 spin_unlock(&fc->lock); 1876 1877out_unlock: 1878 unlock_page(page); 1879 1880 return err; 1881} 1882 1883static int fuse_writepages(struct address_space *mapping, 1884 struct writeback_control *wbc) 1885{ 1886 struct inode *inode = mapping->host; 1887 struct fuse_fill_wb_data data; 1888 int err; 1889 1890 err = -EIO; 1891 if (is_bad_inode(inode)) 1892 goto out; 1893 1894 data.inode = inode; 1895 data.req = NULL; 1896 data.ff = NULL; 1897 1898 err = -ENOMEM; 1899 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, 1900 sizeof(struct page *), 1901 GFP_NOFS); 1902 if (!data.orig_pages) 1903 goto out; 1904 1905 err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); 1906 if (data.req) { 1907 /* Ignore errors if we can write at least one page */ 1908 BUG_ON(!data.req->num_pages); 1909 fuse_writepages_send(&data); 1910 err = 0; 1911 } 1912 if (data.ff) 1913 fuse_file_put(data.ff, false); 1914 1915 kfree(data.orig_pages); 1916out: 1917 return err; 1918} 1919 1920/* 1921 * It's worthy to make sure that space is reserved on disk for the write, 1922 * but how to implement it without killing performance need more thinking. 1923 */ 1924static int fuse_write_begin(struct file *file, struct address_space *mapping, 1925 loff_t pos, unsigned len, unsigned flags, 1926 struct page **pagep, void **fsdata) 1927{ 1928 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1929 struct fuse_conn *fc = get_fuse_conn(file_inode(file)); 1930 struct page *page; 1931 loff_t fsize; 1932 int err = -ENOMEM; 1933 1934 WARN_ON(!fc->writeback_cache); 1935 1936 page = grab_cache_page_write_begin(mapping, index, flags); 1937 if (!page) 1938 goto error; 1939 1940 fuse_wait_on_page_writeback(mapping->host, page->index); 1941 1942 if (PageUptodate(page) || len == PAGE_CACHE_SIZE) 1943 goto success; 1944 /* 1945 * Check if the start this page comes after the end of file, in which 1946 * case the readpage can be optimized away. 1947 */ 1948 fsize = i_size_read(mapping->host); 1949 if (fsize <= (pos & PAGE_CACHE_MASK)) { 1950 size_t off = pos & ~PAGE_CACHE_MASK; 1951 if (off) 1952 zero_user_segment(page, 0, off); 1953 goto success; 1954 } 1955 err = fuse_do_readpage(file, page); 1956 if (err) 1957 goto cleanup; 1958success: 1959 *pagep = page; 1960 return 0; 1961 1962cleanup: 1963 unlock_page(page); 1964 page_cache_release(page); 1965error: 1966 return err; 1967} 1968 1969static int fuse_write_end(struct file *file, struct address_space *mapping, 1970 loff_t pos, unsigned len, unsigned copied, 1971 struct page *page, void *fsdata) 1972{ 1973 struct inode *inode = page->mapping->host; 1974 1975 if (!PageUptodate(page)) { 1976 /* Zero any unwritten bytes at the end of the page */ 1977 size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; 1978 if (endoff) 1979 zero_user_segment(page, endoff, PAGE_CACHE_SIZE); 1980 SetPageUptodate(page); 1981 } 1982 1983 fuse_write_update_size(inode, pos + copied); 1984 set_page_dirty(page); 1985 unlock_page(page); 1986 page_cache_release(page); 1987 1988 return copied; 1989} 1990 1991static int fuse_launder_page(struct page *page) 1992{ 1993 int err = 0; 1994 if (clear_page_dirty_for_io(page)) { 1995 struct inode *inode = page->mapping->host; 1996 err = fuse_writepage_locked(page); 1997 if (!err) 1998 fuse_wait_on_page_writeback(inode, page->index); 1999 } 2000 return err; 2001} 2002 2003/* 2004 * Write back dirty pages now, because there may not be any suitable 2005 * open files later 2006 */ 2007static void fuse_vma_close(struct vm_area_struct *vma) 2008{ 2009 filemap_write_and_wait(vma->vm_file->f_mapping); 2010} 2011 2012/* 2013 * Wait for writeback against this page to complete before allowing it 2014 * to be marked dirty again, and hence written back again, possibly 2015 * before the previous writepage completed. 2016 * 2017 * Block here, instead of in ->writepage(), so that the userspace fs 2018 * can only block processes actually operating on the filesystem. 2019 * 2020 * Otherwise unprivileged userspace fs would be able to block 2021 * unrelated: 2022 * 2023 * - page migration 2024 * - sync(2) 2025 * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER 2026 */ 2027static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 2028{ 2029 struct page *page = vmf->page; 2030 struct inode *inode = file_inode(vma->vm_file); 2031 2032 file_update_time(vma->vm_file); 2033 lock_page(page); 2034 if (page->mapping != inode->i_mapping) { 2035 unlock_page(page); 2036 return VM_FAULT_NOPAGE; 2037 } 2038 2039 fuse_wait_on_page_writeback(inode, page->index); 2040 return VM_FAULT_LOCKED; 2041} 2042 2043static const struct vm_operations_struct fuse_file_vm_ops = { 2044 .close = fuse_vma_close, 2045 .fault = filemap_fault, 2046 .map_pages = filemap_map_pages, 2047 .page_mkwrite = fuse_page_mkwrite, 2048}; 2049 2050static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) 2051{ 2052 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 2053 fuse_link_write_file(file); 2054 2055 file_accessed(file); 2056 vma->vm_ops = &fuse_file_vm_ops; 2057 return 0; 2058} 2059 2060static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) 2061{ 2062 /* Can't provide the coherency needed for MAP_SHARED */ 2063 if (vma->vm_flags & VM_MAYSHARE) 2064 return -ENODEV; 2065 2066 invalidate_inode_pages2(file->f_mapping); 2067 2068 return generic_file_mmap(file, vma); 2069} 2070 2071static int convert_fuse_file_lock(const struct fuse_file_lock *ffl, 2072 struct file_lock *fl) 2073{ 2074 switch (ffl->type) { 2075 case F_UNLCK: 2076 break; 2077 2078 case F_RDLCK: 2079 case F_WRLCK: 2080 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX || 2081 ffl->end < ffl->start) 2082 return -EIO; 2083 2084 fl->fl_start = ffl->start; 2085 fl->fl_end = ffl->end; 2086 fl->fl_pid = ffl->pid; 2087 break; 2088 2089 default: 2090 return -EIO; 2091 } 2092 fl->fl_type = ffl->type; 2093 return 0; 2094} 2095 2096static void fuse_lk_fill(struct fuse_args *args, struct file *file, 2097 const struct file_lock *fl, int opcode, pid_t pid, 2098 int flock, struct fuse_lk_in *inarg) 2099{ 2100 struct inode *inode = file_inode(file); 2101 struct fuse_conn *fc = get_fuse_conn(inode); 2102 struct fuse_file *ff = file->private_data; 2103 2104 memset(inarg, 0, sizeof(*inarg)); 2105 inarg->fh = ff->fh; 2106 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); 2107 inarg->lk.start = fl->fl_start; 2108 inarg->lk.end = fl->fl_end; 2109 inarg->lk.type = fl->fl_type; 2110 inarg->lk.pid = pid; 2111 if (flock) 2112 inarg->lk_flags |= FUSE_LK_FLOCK; 2113 args->in.h.opcode = opcode; 2114 args->in.h.nodeid = get_node_id(inode); 2115 args->in.numargs = 1; 2116 args->in.args[0].size = sizeof(*inarg); 2117 args->in.args[0].value = inarg; 2118} 2119 2120static int fuse_getlk(struct file *file, struct file_lock *fl) 2121{ 2122 struct inode *inode = file_inode(file); 2123 struct fuse_conn *fc = get_fuse_conn(inode); 2124 FUSE_ARGS(args); 2125 struct fuse_lk_in inarg; 2126 struct fuse_lk_out outarg; 2127 int err; 2128 2129 fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); 2130 args.out.numargs = 1; 2131 args.out.args[0].size = sizeof(outarg); 2132 args.out.args[0].value = &outarg; 2133 err = fuse_simple_request(fc, &args); 2134 if (!err) 2135 err = convert_fuse_file_lock(&outarg.lk, fl); 2136 2137 return err; 2138} 2139 2140static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) 2141{ 2142 struct inode *inode = file_inode(file); 2143 struct fuse_conn *fc = get_fuse_conn(inode); 2144 FUSE_ARGS(args); 2145 struct fuse_lk_in inarg; 2146 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; 2147 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0; 2148 int err; 2149 2150 if (fl->fl_lmops && fl->fl_lmops->lm_grant) { 2151 /* NLM needs asynchronous locks, which we don't support yet */ 2152 return -ENOLCK; 2153 } 2154 2155 /* Unlock on close is handled by the flush method */ 2156 if (fl->fl_flags & FL_CLOSE) 2157 return 0; 2158 2159 fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg); 2160 err = fuse_simple_request(fc, &args); 2161 2162 /* locking is restartable */ 2163 if (err == -EINTR) 2164 err = -ERESTARTSYS; 2165 2166 return err; 2167} 2168 2169static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl) 2170{ 2171 struct inode *inode = file_inode(file); 2172 struct fuse_conn *fc = get_fuse_conn(inode); 2173 int err; 2174 2175 if (cmd == F_CANCELLK) { 2176 err = 0; 2177 } else if (cmd == F_GETLK) { 2178 if (fc->no_lock) { 2179 posix_test_lock(file, fl); 2180 err = 0; 2181 } else 2182 err = fuse_getlk(file, fl); 2183 } else { 2184 if (fc->no_lock) 2185 err = posix_lock_file(file, fl, NULL); 2186 else 2187 err = fuse_setlk(file, fl, 0); 2188 } 2189 return err; 2190} 2191 2192static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) 2193{ 2194 struct inode *inode = file_inode(file); 2195 struct fuse_conn *fc = get_fuse_conn(inode); 2196 int err; 2197 2198 if (fc->no_flock) { 2199 err = flock_lock_file_wait(file, fl); 2200 } else { 2201 struct fuse_file *ff = file->private_data; 2202 2203 /* emulate flock with POSIX locks */ 2204 ff->flock = true; 2205 err = fuse_setlk(file, fl, 1); 2206 } 2207 2208 return err; 2209} 2210 2211static sector_t fuse_bmap(struct address_space *mapping, sector_t block) 2212{ 2213 struct inode *inode = mapping->host; 2214 struct fuse_conn *fc = get_fuse_conn(inode); 2215 FUSE_ARGS(args); 2216 struct fuse_bmap_in inarg; 2217 struct fuse_bmap_out outarg; 2218 int err; 2219 2220 if (!inode->i_sb->s_bdev || fc->no_bmap) 2221 return 0; 2222 2223 memset(&inarg, 0, sizeof(inarg)); 2224 inarg.block = block; 2225 inarg.blocksize = inode->i_sb->s_blocksize; 2226 args.in.h.opcode = FUSE_BMAP; 2227 args.in.h.nodeid = get_node_id(inode); 2228 args.in.numargs = 1; 2229 args.in.args[0].size = sizeof(inarg); 2230 args.in.args[0].value = &inarg; 2231 args.out.numargs = 1; 2232 args.out.args[0].size = sizeof(outarg); 2233 args.out.args[0].value = &outarg; 2234 err = fuse_simple_request(fc, &args); 2235 if (err == -ENOSYS) 2236 fc->no_bmap = 1; 2237 2238 return err ? 0 : outarg.block; 2239} 2240 2241static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence) 2242{ 2243 loff_t retval; 2244 struct inode *inode = file_inode(file); 2245 2246 /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */ 2247 if (whence == SEEK_CUR || whence == SEEK_SET) 2248 return generic_file_llseek(file, offset, whence); 2249 2250 mutex_lock(&inode->i_mutex); 2251 retval = fuse_update_attributes(inode, NULL, file, NULL); 2252 if (!retval) 2253 retval = generic_file_llseek(file, offset, whence); 2254 mutex_unlock(&inode->i_mutex); 2255 2256 return retval; 2257} 2258 2259static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, 2260 unsigned int nr_segs, size_t bytes, bool to_user) 2261{ 2262 struct iov_iter ii; 2263 int page_idx = 0; 2264 2265 if (!bytes) 2266 return 0; 2267 2268 iov_iter_init(&ii, to_user ? READ : WRITE, iov, nr_segs, bytes); 2269 2270 while (iov_iter_count(&ii)) { 2271 struct page *page = pages[page_idx++]; 2272 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 2273 void *kaddr; 2274 2275 kaddr = kmap(page); 2276 2277 while (todo) { 2278 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 2279 size_t iov_len = ii.iov->iov_len - ii.iov_offset; 2280 size_t copy = min(todo, iov_len); 2281 size_t left; 2282 2283 if (!to_user) 2284 left = copy_from_user(kaddr, uaddr, copy); 2285 else 2286 left = copy_to_user(uaddr, kaddr, copy); 2287 2288 if (unlikely(left)) 2289 return -EFAULT; 2290 2291 iov_iter_advance(&ii, copy); 2292 todo -= copy; 2293 kaddr += copy; 2294 } 2295 2296 kunmap(page); 2297 } 2298 2299 return 0; 2300} 2301 2302/* 2303 * CUSE servers compiled on 32bit broke on 64bit kernels because the 2304 * ABI was defined to be 'struct iovec' which is different on 32bit 2305 * and 64bit. Fortunately we can determine which structure the server 2306 * used from the size of the reply. 2307 */ 2308static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, 2309 size_t transferred, unsigned count, 2310 bool is_compat) 2311{ 2312#ifdef CONFIG_COMPAT 2313 if (count * sizeof(struct compat_iovec) == transferred) { 2314 struct compat_iovec *ciov = src; 2315 unsigned i; 2316 2317 /* 2318 * With this interface a 32bit server cannot support 2319 * non-compat (i.e. ones coming from 64bit apps) ioctl 2320 * requests 2321 */ 2322 if (!is_compat) 2323 return -EINVAL; 2324 2325 for (i = 0; i < count; i++) { 2326 dst[i].iov_base = compat_ptr(ciov[i].iov_base); 2327 dst[i].iov_len = ciov[i].iov_len; 2328 } 2329 return 0; 2330 } 2331#endif 2332 2333 if (count * sizeof(struct iovec) != transferred) 2334 return -EIO; 2335 2336 memcpy(dst, src, transferred); 2337 return 0; 2338} 2339 2340/* Make sure iov_length() won't overflow */ 2341static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) 2342{ 2343 size_t n; 2344 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; 2345 2346 for (n = 0; n < count; n++, iov++) { 2347 if (iov->iov_len > (size_t) max) 2348 return -ENOMEM; 2349 max -= iov->iov_len; 2350 } 2351 return 0; 2352} 2353 2354static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, 2355 void *src, size_t transferred, unsigned count, 2356 bool is_compat) 2357{ 2358 unsigned i; 2359 struct fuse_ioctl_iovec *fiov = src; 2360 2361 if (fc->minor < 16) { 2362 return fuse_copy_ioctl_iovec_old(dst, src, transferred, 2363 count, is_compat); 2364 } 2365 2366 if (count * sizeof(struct fuse_ioctl_iovec) != transferred) 2367 return -EIO; 2368 2369 for (i = 0; i < count; i++) { 2370 /* Did the server supply an inappropriate value? */ 2371 if (fiov[i].base != (unsigned long) fiov[i].base || 2372 fiov[i].len != (unsigned long) fiov[i].len) 2373 return -EIO; 2374 2375 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; 2376 dst[i].iov_len = (size_t) fiov[i].len; 2377 2378#ifdef CONFIG_COMPAT 2379 if (is_compat && 2380 (ptr_to_compat(dst[i].iov_base) != fiov[i].base || 2381 (compat_size_t) dst[i].iov_len != fiov[i].len)) 2382 return -EIO; 2383#endif 2384 } 2385 2386 return 0; 2387} 2388 2389 2390/* 2391 * For ioctls, there is no generic way to determine how much memory 2392 * needs to be read and/or written. Furthermore, ioctls are allowed 2393 * to dereference the passed pointer, so the parameter requires deep 2394 * copying but FUSE has no idea whatsoever about what to copy in or 2395 * out. 2396 * 2397 * This is solved by allowing FUSE server to retry ioctl with 2398 * necessary in/out iovecs. Let's assume the ioctl implementation 2399 * needs to read in the following structure. 2400 * 2401 * struct a { 2402 * char *buf; 2403 * size_t buflen; 2404 * } 2405 * 2406 * On the first callout to FUSE server, inarg->in_size and 2407 * inarg->out_size will be NULL; then, the server completes the ioctl 2408 * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and 2409 * the actual iov array to 2410 * 2411 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } 2412 * 2413 * which tells FUSE to copy in the requested area and retry the ioctl. 2414 * On the second round, the server has access to the structure and 2415 * from that it can tell what to look for next, so on the invocation, 2416 * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to 2417 * 2418 * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, 2419 * { .iov_base = a.buf, .iov_len = a.buflen } } 2420 * 2421 * FUSE will copy both struct a and the pointed buffer from the 2422 * process doing the ioctl and retry ioctl with both struct a and the 2423 * buffer. 2424 * 2425 * This time, FUSE server has everything it needs and completes ioctl 2426 * without FUSE_IOCTL_RETRY which finishes the ioctl call. 2427 * 2428 * Copying data out works the same way. 2429 * 2430 * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel 2431 * automatically initializes in and out iovs by decoding @cmd with 2432 * _IOC_* macros and the server is not allowed to request RETRY. This 2433 * limits ioctl data transfers to well-formed ioctls and is the forced 2434 * behavior for all FUSE servers. 2435 */ 2436long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, 2437 unsigned int flags) 2438{ 2439 struct fuse_file *ff = file->private_data; 2440 struct fuse_conn *fc = ff->fc; 2441 struct fuse_ioctl_in inarg = { 2442 .fh = ff->fh, 2443 .cmd = cmd, 2444 .arg = arg, 2445 .flags = flags 2446 }; 2447 struct fuse_ioctl_out outarg; 2448 struct fuse_req *req = NULL; 2449 struct page **pages = NULL; 2450 struct iovec *iov_page = NULL; 2451 struct iovec *in_iov = NULL, *out_iov = NULL; 2452 unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; 2453 size_t in_size, out_size, transferred; 2454 int err; 2455 2456#if BITS_PER_LONG == 32 2457 inarg.flags |= FUSE_IOCTL_32BIT; 2458#else 2459 if (flags & FUSE_IOCTL_COMPAT) 2460 inarg.flags |= FUSE_IOCTL_32BIT; 2461#endif 2462 2463 /* assume all the iovs returned by client always fits in a page */ 2464 BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); 2465 2466 err = -ENOMEM; 2467 pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); 2468 iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); 2469 if (!pages || !iov_page) 2470 goto out; 2471 2472 /* 2473 * If restricted, initialize IO parameters as encoded in @cmd. 2474 * RETRY from server is not allowed. 2475 */ 2476 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { 2477 struct iovec *iov = iov_page; 2478 2479 iov->iov_base = (void __user *)arg; 2480 iov->iov_len = _IOC_SIZE(cmd); 2481 2482 if (_IOC_DIR(cmd) & _IOC_WRITE) { 2483 in_iov = iov; 2484 in_iovs = 1; 2485 } 2486 2487 if (_IOC_DIR(cmd) & _IOC_READ) { 2488 out_iov = iov; 2489 out_iovs = 1; 2490 } 2491 } 2492 2493 retry: 2494 inarg.in_size = in_size = iov_length(in_iov, in_iovs); 2495 inarg.out_size = out_size = iov_length(out_iov, out_iovs); 2496 2497 /* 2498 * Out data can be used either for actual out data or iovs, 2499 * make sure there always is at least one page. 2500 */ 2501 out_size = max_t(size_t, out_size, PAGE_SIZE); 2502 max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); 2503 2504 /* make sure there are enough buffer pages and init request with them */ 2505 err = -ENOMEM; 2506 if (max_pages > FUSE_MAX_PAGES_PER_REQ) 2507 goto out; 2508 while (num_pages < max_pages) { 2509 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 2510 if (!pages[num_pages]) 2511 goto out; 2512 num_pages++; 2513 } 2514 2515 req = fuse_get_req(fc, num_pages); 2516 if (IS_ERR(req)) { 2517 err = PTR_ERR(req); 2518 req = NULL; 2519 goto out; 2520 } 2521 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); 2522 req->num_pages = num_pages; 2523 fuse_page_descs_length_init(req, 0, req->num_pages); 2524 2525 /* okay, let's send it to the client */ 2526 req->in.h.opcode = FUSE_IOCTL; 2527 req->in.h.nodeid = ff->nodeid; 2528 req->in.numargs = 1; 2529 req->in.args[0].size = sizeof(inarg); 2530 req->in.args[0].value = &inarg; 2531 if (in_size) { 2532 req->in.numargs++; 2533 req->in.args[1].size = in_size; 2534 req->in.argpages = 1; 2535 2536 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, 2537 false); 2538 if (err) 2539 goto out; 2540 } 2541 2542 req->out.numargs = 2; 2543 req->out.args[0].size = sizeof(outarg); 2544 req->out.args[0].value = &outarg; 2545 req->out.args[1].size = out_size; 2546 req->out.argpages = 1; 2547 req->out.argvar = 1; 2548 2549 fuse_request_send(fc, req); 2550 err = req->out.h.error; 2551 transferred = req->out.args[1].size; 2552 fuse_put_request(fc, req); 2553 req = NULL; 2554 if (err) 2555 goto out; 2556 2557 /* did it ask for retry? */ 2558 if (outarg.flags & FUSE_IOCTL_RETRY) { 2559 void *vaddr; 2560 2561 /* no retry if in restricted mode */ 2562 err = -EIO; 2563 if (!(flags & FUSE_IOCTL_UNRESTRICTED)) 2564 goto out; 2565 2566 in_iovs = outarg.in_iovs; 2567 out_iovs = outarg.out_iovs; 2568 2569 /* 2570 * Make sure things are in boundary, separate checks 2571 * are to protect against overflow. 2572 */ 2573 err = -ENOMEM; 2574 if (in_iovs > FUSE_IOCTL_MAX_IOV || 2575 out_iovs > FUSE_IOCTL_MAX_IOV || 2576 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 2577 goto out; 2578 2579 vaddr = kmap_atomic(pages[0]); 2580 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 2581 transferred, in_iovs + out_iovs, 2582 (flags & FUSE_IOCTL_COMPAT) != 0); 2583 kunmap_atomic(vaddr); 2584 if (err) 2585 goto out; 2586 2587 in_iov = iov_page; 2588 out_iov = in_iov + in_iovs; 2589 2590 err = fuse_verify_ioctl_iov(in_iov, in_iovs); 2591 if (err) 2592 goto out; 2593 2594 err = fuse_verify_ioctl_iov(out_iov, out_iovs); 2595 if (err) 2596 goto out; 2597 2598 goto retry; 2599 } 2600 2601 err = -EIO; 2602 if (transferred > inarg.out_size) 2603 goto out; 2604 2605 err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); 2606 out: 2607 if (req) 2608 fuse_put_request(fc, req); 2609 free_page((unsigned long) iov_page); 2610 while (num_pages) 2611 __free_page(pages[--num_pages]); 2612 kfree(pages); 2613 2614 return err ? err : outarg.result; 2615} 2616EXPORT_SYMBOL_GPL(fuse_do_ioctl); 2617 2618long fuse_ioctl_common(struct file *file, unsigned int cmd, 2619 unsigned long arg, unsigned int flags) 2620{ 2621 struct inode *inode = file_inode(file); 2622 struct fuse_conn *fc = get_fuse_conn(inode); 2623 2624 if (!fuse_allow_current_process(fc)) 2625 return -EACCES; 2626 2627 if (is_bad_inode(inode)) 2628 return -EIO; 2629 2630 return fuse_do_ioctl(file, cmd, arg, flags); 2631} 2632 2633static long fuse_file_ioctl(struct file *file, unsigned int cmd, 2634 unsigned long arg) 2635{ 2636 return fuse_ioctl_common(file, cmd, arg, 0); 2637} 2638 2639static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, 2640 unsigned long arg) 2641{ 2642 return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); 2643} 2644 2645/* 2646 * All files which have been polled are linked to RB tree 2647 * fuse_conn->polled_files which is indexed by kh. Walk the tree and 2648 * find the matching one. 2649 */ 2650static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, 2651 struct rb_node **parent_out) 2652{ 2653 struct rb_node **link = &fc->polled_files.rb_node; 2654 struct rb_node *last = NULL; 2655 2656 while (*link) { 2657 struct fuse_file *ff; 2658 2659 last = *link; 2660 ff = rb_entry(last, struct fuse_file, polled_node); 2661 2662 if (kh < ff->kh) 2663 link = &last->rb_left; 2664 else if (kh > ff->kh) 2665 link = &last->rb_right; 2666 else 2667 return link; 2668 } 2669 2670 if (parent_out) 2671 *parent_out = last; 2672 return link; 2673} 2674 2675/* 2676 * The file is about to be polled. Make sure it's on the polled_files 2677 * RB tree. Note that files once added to the polled_files tree are 2678 * not removed before the file is released. This is because a file 2679 * polled once is likely to be polled again. 2680 */ 2681static void fuse_register_polled_file(struct fuse_conn *fc, 2682 struct fuse_file *ff) 2683{ 2684 spin_lock(&fc->lock); 2685 if (RB_EMPTY_NODE(&ff->polled_node)) { 2686 struct rb_node **link, *uninitialized_var(parent); 2687 2688 link = fuse_find_polled_node(fc, ff->kh, &parent); 2689 BUG_ON(*link); 2690 rb_link_node(&ff->polled_node, parent, link); 2691 rb_insert_color(&ff->polled_node, &fc->polled_files); 2692 } 2693 spin_unlock(&fc->lock); 2694} 2695 2696unsigned fuse_file_poll(struct file *file, poll_table *wait) 2697{ 2698 struct fuse_file *ff = file->private_data; 2699 struct fuse_conn *fc = ff->fc; 2700 struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; 2701 struct fuse_poll_out outarg; 2702 FUSE_ARGS(args); 2703 int err; 2704 2705 if (fc->no_poll) 2706 return DEFAULT_POLLMASK; 2707 2708 poll_wait(file, &ff->poll_wait, wait); 2709 inarg.events = (__u32)poll_requested_events(wait); 2710 2711 /* 2712 * Ask for notification iff there's someone waiting for it. 2713 * The client may ignore the flag and always notify. 2714 */ 2715 if (waitqueue_active(&ff->poll_wait)) { 2716 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; 2717 fuse_register_polled_file(fc, ff); 2718 } 2719 2720 args.in.h.opcode = FUSE_POLL; 2721 args.in.h.nodeid = ff->nodeid; 2722 args.in.numargs = 1; 2723 args.in.args[0].size = sizeof(inarg); 2724 args.in.args[0].value = &inarg; 2725 args.out.numargs = 1; 2726 args.out.args[0].size = sizeof(outarg); 2727 args.out.args[0].value = &outarg; 2728 err = fuse_simple_request(fc, &args); 2729 2730 if (!err) 2731 return outarg.revents; 2732 if (err == -ENOSYS) { 2733 fc->no_poll = 1; 2734 return DEFAULT_POLLMASK; 2735 } 2736 return POLLERR; 2737} 2738EXPORT_SYMBOL_GPL(fuse_file_poll); 2739 2740/* 2741 * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and 2742 * wakes up the poll waiters. 2743 */ 2744int fuse_notify_poll_wakeup(struct fuse_conn *fc, 2745 struct fuse_notify_poll_wakeup_out *outarg) 2746{ 2747 u64 kh = outarg->kh; 2748 struct rb_node **link; 2749 2750 spin_lock(&fc->lock); 2751 2752 link = fuse_find_polled_node(fc, kh, NULL); 2753 if (*link) { 2754 struct fuse_file *ff; 2755 2756 ff = rb_entry(*link, struct fuse_file, polled_node); 2757 wake_up_interruptible_sync(&ff->poll_wait); 2758 } 2759 2760 spin_unlock(&fc->lock); 2761 return 0; 2762} 2763 2764static void fuse_do_truncate(struct file *file) 2765{ 2766 struct inode *inode = file->f_mapping->host; 2767 struct iattr attr; 2768 2769 attr.ia_valid = ATTR_SIZE; 2770 attr.ia_size = i_size_read(inode); 2771 2772 attr.ia_file = file; 2773 attr.ia_valid |= ATTR_FILE; 2774 2775 fuse_do_setattr(inode, &attr, file); 2776} 2777 2778static inline loff_t fuse_round_up(loff_t off) 2779{ 2780 return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 2781} 2782 2783static ssize_t 2784fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) 2785{ 2786 DECLARE_COMPLETION_ONSTACK(wait); 2787 ssize_t ret = 0; 2788 struct file *file = iocb->ki_filp; 2789 struct fuse_file *ff = file->private_data; 2790 bool async_dio = ff->fc->async_dio; 2791 loff_t pos = 0; 2792 struct inode *inode; 2793 loff_t i_size; 2794 size_t count = iov_iter_count(iter); 2795 struct fuse_io_priv *io; 2796 bool is_sync = is_sync_kiocb(iocb); 2797 2798 pos = offset; 2799 inode = file->f_mapping->host; 2800 i_size = i_size_read(inode); 2801 2802 if ((iov_iter_rw(iter) == READ) && (offset > i_size)) 2803 return 0; 2804 2805 /* optimization for short read */ 2806 if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { 2807 if (offset >= i_size) 2808 return 0; 2809 iov_iter_truncate(iter, fuse_round_up(i_size - offset)); 2810 count = iov_iter_count(iter); 2811 } 2812 2813 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2814 if (!io) 2815 return -ENOMEM; 2816 spin_lock_init(&io->lock); 2817 kref_init(&io->refcnt); 2818 io->reqs = 1; 2819 io->bytes = -1; 2820 io->size = 0; 2821 io->offset = offset; 2822 io->write = (iov_iter_rw(iter) == WRITE); 2823 io->err = 0; 2824 io->file = file; 2825 /* 2826 * By default, we want to optimize all I/Os with async request 2827 * submission to the client filesystem if supported. 2828 */ 2829 io->async = async_dio; 2830 io->iocb = iocb; 2831 2832 /* 2833 * We cannot asynchronously extend the size of a file. We have no method 2834 * to wait on real async I/O requests, so we must submit this request 2835 * synchronously. 2836 */ 2837 if (!is_sync && (offset + count > i_size) && 2838 iov_iter_rw(iter) == WRITE) 2839 io->async = false; 2840 2841 if (io->async && is_sync) { 2842 /* 2843 * Additional reference to keep io around after 2844 * calling fuse_aio_complete() 2845 */ 2846 kref_get(&io->refcnt); 2847 io->done = &wait; 2848 } 2849 2850 if (iov_iter_rw(iter) == WRITE) { 2851 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); 2852 fuse_invalidate_attr(inode); 2853 } else { 2854 ret = __fuse_direct_read(io, iter, &pos); 2855 } 2856 2857 if (io->async) { 2858 fuse_aio_complete(io, ret < 0 ? ret : 0, -1); 2859 2860 /* we have a non-extending, async request, so return */ 2861 if (!is_sync) 2862 return -EIOCBQUEUED; 2863 2864 wait_for_completion(&wait); 2865 ret = fuse_get_res_by_io(io); 2866 } 2867 2868 kref_put(&io->refcnt, fuse_io_release); 2869 2870 if (iov_iter_rw(iter) == WRITE) { 2871 if (ret > 0) 2872 fuse_write_update_size(inode, pos); 2873 else if (ret < 0 && offset + count > i_size) 2874 fuse_do_truncate(file); 2875 } 2876 2877 return ret; 2878} 2879 2880static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2881 loff_t length) 2882{ 2883 struct fuse_file *ff = file->private_data; 2884 struct inode *inode = file_inode(file); 2885 struct fuse_inode *fi = get_fuse_inode(inode); 2886 struct fuse_conn *fc = ff->fc; 2887 FUSE_ARGS(args); 2888 struct fuse_fallocate_in inarg = { 2889 .fh = ff->fh, 2890 .offset = offset, 2891 .length = length, 2892 .mode = mode 2893 }; 2894 int err; 2895 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || 2896 (mode & FALLOC_FL_PUNCH_HOLE); 2897 2898 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 2899 return -EOPNOTSUPP; 2900 2901 if (fc->no_fallocate) 2902 return -EOPNOTSUPP; 2903 2904 if (lock_inode) { 2905 mutex_lock(&inode->i_mutex); 2906 if (mode & FALLOC_FL_PUNCH_HOLE) { 2907 loff_t endbyte = offset + length - 1; 2908 err = filemap_write_and_wait_range(inode->i_mapping, 2909 offset, endbyte); 2910 if (err) 2911 goto out; 2912 2913 fuse_sync_writes(inode); 2914 } 2915 } 2916 2917 if (!(mode & FALLOC_FL_KEEP_SIZE)) 2918 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 2919 2920 args.in.h.opcode = FUSE_FALLOCATE; 2921 args.in.h.nodeid = ff->nodeid; 2922 args.in.numargs = 1; 2923 args.in.args[0].size = sizeof(inarg); 2924 args.in.args[0].value = &inarg; 2925 err = fuse_simple_request(fc, &args); 2926 if (err == -ENOSYS) { 2927 fc->no_fallocate = 1; 2928 err = -EOPNOTSUPP; 2929 } 2930 if (err) 2931 goto out; 2932 2933 /* we could have extended the file */ 2934 if (!(mode & FALLOC_FL_KEEP_SIZE)) { 2935 bool changed = fuse_write_update_size(inode, offset + length); 2936 2937 if (changed && fc->writeback_cache) 2938 file_update_time(file); 2939 } 2940 2941 if (mode & FALLOC_FL_PUNCH_HOLE) 2942 truncate_pagecache_range(inode, offset, offset + length - 1); 2943 2944 fuse_invalidate_attr(inode); 2945 2946out: 2947 if (!(mode & FALLOC_FL_KEEP_SIZE)) 2948 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 2949 2950 if (lock_inode) 2951 mutex_unlock(&inode->i_mutex); 2952 2953 return err; 2954} 2955 2956static const struct file_operations fuse_file_operations = { 2957 .llseek = fuse_file_llseek, 2958 .read_iter = fuse_file_read_iter, 2959 .write_iter = fuse_file_write_iter, 2960 .mmap = fuse_file_mmap, 2961 .open = fuse_open, 2962 .flush = fuse_flush, 2963 .release = fuse_release, 2964 .fsync = fuse_fsync, 2965 .lock = fuse_file_lock, 2966 .flock = fuse_file_flock, 2967 .splice_read = generic_file_splice_read, 2968 .unlocked_ioctl = fuse_file_ioctl, 2969 .compat_ioctl = fuse_file_compat_ioctl, 2970 .poll = fuse_file_poll, 2971 .fallocate = fuse_file_fallocate, 2972}; 2973 2974static const struct file_operations fuse_direct_io_file_operations = { 2975 .llseek = fuse_file_llseek, 2976 .read_iter = fuse_direct_read_iter, 2977 .write_iter = fuse_direct_write_iter, 2978 .mmap = fuse_direct_mmap, 2979 .open = fuse_open, 2980 .flush = fuse_flush, 2981 .release = fuse_release, 2982 .fsync = fuse_fsync, 2983 .lock = fuse_file_lock, 2984 .flock = fuse_file_flock, 2985 .unlocked_ioctl = fuse_file_ioctl, 2986 .compat_ioctl = fuse_file_compat_ioctl, 2987 .poll = fuse_file_poll, 2988 .fallocate = fuse_file_fallocate, 2989 /* no splice_read */ 2990}; 2991 2992static const struct address_space_operations fuse_file_aops = { 2993 .readpage = fuse_readpage, 2994 .writepage = fuse_writepage, 2995 .writepages = fuse_writepages, 2996 .launder_page = fuse_launder_page, 2997 .readpages = fuse_readpages, 2998 .set_page_dirty = __set_page_dirty_nobuffers, 2999 .bmap = fuse_bmap, 3000 .direct_IO = fuse_direct_IO, 3001 .write_begin = fuse_write_begin, 3002 .write_end = fuse_write_end, 3003}; 3004 3005void fuse_init_file_inode(struct inode *inode) 3006{ 3007 inode->i_fop = &fuse_file_operations; 3008 inode->i_data.a_ops = &fuse_file_aops; 3009} 3010