1/* 2 * linux/fs/nfs/read.c 3 * 4 * Block I/O for NFS 5 * 6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c 7 * modified for async RPC by okir@monad.swb.de 8 */ 9 10#include <linux/time.h> 11#include <linux/kernel.h> 12#include <linux/errno.h> 13#include <linux/fcntl.h> 14#include <linux/stat.h> 15#include <linux/mm.h> 16#include <linux/slab.h> 17#include <linux/pagemap.h> 18#include <linux/sunrpc/clnt.h> 19#include <linux/nfs_fs.h> 20#include <linux/nfs_page.h> 21#include <linux/module.h> 22 23#include "nfs4_fs.h" 24#include "internal.h" 25#include "iostat.h" 26#include "fscache.h" 27#include "pnfs.h" 28 29#define NFSDBG_FACILITY NFSDBG_PAGECACHE 30 31static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops; 32static const struct nfs_rw_ops nfs_rw_read_ops; 33 34static struct kmem_cache *nfs_rdata_cachep; 35 36static struct nfs_pgio_header *nfs_readhdr_alloc(void) 37{ 38 return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); 39} 40 41static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) 42{ 43 kmem_cache_free(nfs_rdata_cachep, rhdr); 44} 45 46static 47int nfs_return_empty_page(struct page *page) 48{ 49 zero_user(page, 0, PAGE_CACHE_SIZE); 50 SetPageUptodate(page); 51 unlock_page(page); 52 return 0; 53} 54 55void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, 56 struct inode *inode, bool force_mds, 57 const struct nfs_pgio_completion_ops *compl_ops) 58{ 59 struct nfs_server *server = NFS_SERVER(inode); 60 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 61 62#ifdef CONFIG_NFS_V4_1 63 if (server->pnfs_curr_ld && !force_mds) 64 pg_ops = server->pnfs_curr_ld->pg_read_ops; 65#endif 66 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops, 67 server->rsize, 0); 68} 69EXPORT_SYMBOL_GPL(nfs_pageio_init_read); 70 71void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) 72{ 73 struct nfs_pgio_mirror *mirror; 74 75 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 76 pgio->pg_ops->pg_cleanup(pgio); 77 78 pgio->pg_ops = &nfs_pgio_rw_ops; 79 80 /* read path should never have more than one mirror */ 81 WARN_ON_ONCE(pgio->pg_mirror_count != 1); 82 83 mirror = &pgio->pg_mirrors[0]; 84 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; 85} 86EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 87 88int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 89 struct page *page) 90{ 91 struct nfs_page *new; 92 unsigned int len; 93 struct nfs_pageio_descriptor pgio; 94 struct nfs_pgio_mirror *pgm; 95 96 len = nfs_page_length(page); 97 if (len == 0) 98 return nfs_return_empty_page(page); 99 new = nfs_create_request(ctx, page, NULL, 0, len); 100 if (IS_ERR(new)) { 101 unlock_page(page); 102 return PTR_ERR(new); 103 } 104 if (len < PAGE_CACHE_SIZE) 105 zero_user_segment(page, len, PAGE_CACHE_SIZE); 106 107 nfs_pageio_init_read(&pgio, inode, false, 108 &nfs_async_read_completion_ops); 109 nfs_pageio_add_request(&pgio, new); 110 nfs_pageio_complete(&pgio); 111 112 /* It doesn't make sense to do mirrored reads! */ 113 WARN_ON_ONCE(pgio.pg_mirror_count != 1); 114 115 pgm = &pgio.pg_mirrors[0]; 116 NFS_I(inode)->read_io += pgm->pg_bytes_written; 117 118 return 0; 119} 120 121static void nfs_readpage_release(struct nfs_page *req) 122{ 123 struct inode *inode = d_inode(req->wb_context->dentry); 124 125 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 126 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 127 (long long)req_offset(req)); 128 129 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 130 if (PageUptodate(req->wb_page)) 131 nfs_readpage_to_fscache(inode, req->wb_page, 0); 132 133 unlock_page(req->wb_page); 134 } 135 nfs_release_request(req); 136} 137 138static void nfs_page_group_set_uptodate(struct nfs_page *req) 139{ 140 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) 141 SetPageUptodate(req->wb_page); 142} 143 144static void nfs_read_completion(struct nfs_pgio_header *hdr) 145{ 146 unsigned long bytes = 0; 147 148 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 149 goto out; 150 while (!list_empty(&hdr->pages)) { 151 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 152 struct page *page = req->wb_page; 153 unsigned long start = req->wb_pgbase; 154 unsigned long end = req->wb_pgbase + req->wb_bytes; 155 156 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { 157 /* note: regions of the page not covered by a 158 * request are zeroed in nfs_readpage_async / 159 * readpage_async_filler */ 160 if (bytes > hdr->good_bytes) { 161 /* nothing in this request was good, so zero 162 * the full extent of the request */ 163 zero_user_segment(page, start, end); 164 165 } else if (hdr->good_bytes - bytes < req->wb_bytes) { 166 /* part of this request has good bytes, but 167 * not all. zero the bad bytes */ 168 start += hdr->good_bytes - bytes; 169 WARN_ON(start < req->wb_pgbase); 170 zero_user_segment(page, start, end); 171 } 172 } 173 bytes += req->wb_bytes; 174 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 175 if (bytes <= hdr->good_bytes) 176 nfs_page_group_set_uptodate(req); 177 } else 178 nfs_page_group_set_uptodate(req); 179 nfs_list_remove_request(req); 180 nfs_readpage_release(req); 181 } 182out: 183 hdr->release(hdr); 184} 185 186static void nfs_initiate_read(struct nfs_pgio_header *hdr, 187 struct rpc_message *msg, 188 const struct nfs_rpc_ops *rpc_ops, 189 struct rpc_task_setup *task_setup_data, int how) 190{ 191 struct inode *inode = hdr->inode; 192 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; 193 194 task_setup_data->flags |= swap_flags; 195 rpc_ops->read_setup(hdr, msg); 196} 197 198static void 199nfs_async_read_error(struct list_head *head) 200{ 201 struct nfs_page *req; 202 203 while (!list_empty(head)) { 204 req = nfs_list_entry(head->next); 205 nfs_list_remove_request(req); 206 nfs_readpage_release(req); 207 } 208} 209 210static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { 211 .error_cleanup = nfs_async_read_error, 212 .completion = nfs_read_completion, 213}; 214 215/* 216 * This is the callback from RPC telling us whether a reply was 217 * received or some error occurred (timeout or socket shutdown). 218 */ 219static int nfs_readpage_done(struct rpc_task *task, 220 struct nfs_pgio_header *hdr, 221 struct inode *inode) 222{ 223 int status = NFS_PROTO(inode)->read_done(task, hdr); 224 if (status != 0) 225 return status; 226 227 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count); 228 229 if (task->tk_status == -ESTALE) { 230 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 231 nfs_mark_for_revalidate(inode); 232 } 233 return 0; 234} 235 236static void nfs_readpage_retry(struct rpc_task *task, 237 struct nfs_pgio_header *hdr) 238{ 239 struct nfs_pgio_args *argp = &hdr->args; 240 struct nfs_pgio_res *resp = &hdr->res; 241 242 /* This is a short read! */ 243 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD); 244 /* Has the server at least made some progress? */ 245 if (resp->count == 0) { 246 nfs_set_pgio_error(hdr, -EIO, argp->offset); 247 return; 248 } 249 250 /* For non rpc-based layout drivers, retry-through-MDS */ 251 if (!task->tk_ops) { 252 hdr->pnfs_error = -EAGAIN; 253 return; 254 } 255 256 /* Yes, so retry the read at the end of the hdr */ 257 hdr->mds_offset += resp->count; 258 argp->offset += resp->count; 259 argp->pgbase += resp->count; 260 argp->count -= resp->count; 261 rpc_restart_call_prepare(task); 262} 263 264static void nfs_readpage_result(struct rpc_task *task, 265 struct nfs_pgio_header *hdr) 266{ 267 if (hdr->res.eof) { 268 loff_t bound; 269 270 bound = hdr->args.offset + hdr->res.count; 271 spin_lock(&hdr->lock); 272 if (bound < hdr->io_start + hdr->good_bytes) { 273 set_bit(NFS_IOHDR_EOF, &hdr->flags); 274 clear_bit(NFS_IOHDR_ERROR, &hdr->flags); 275 hdr->good_bytes = bound - hdr->io_start; 276 } 277 spin_unlock(&hdr->lock); 278 } else if (hdr->res.count < hdr->args.count) 279 nfs_readpage_retry(task, hdr); 280} 281 282/* 283 * Read a page over NFS. 284 * We read the page synchronously in the following case: 285 * - The error flag is set for this page. This happens only when a 286 * previous async read operation failed. 287 */ 288int nfs_readpage(struct file *file, struct page *page) 289{ 290 struct nfs_open_context *ctx; 291 struct inode *inode = page_file_mapping(page)->host; 292 int error; 293 294 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", 295 page, PAGE_CACHE_SIZE, page_file_index(page)); 296 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); 297 nfs_add_stats(inode, NFSIOS_READPAGES, 1); 298 299 /* 300 * Try to flush any pending writes to the file.. 301 * 302 * NOTE! Because we own the page lock, there cannot 303 * be any new pending writes generated at this point 304 * for this page (other pages can be written to). 305 */ 306 error = nfs_wb_page(inode, page); 307 if (error) 308 goto out_unlock; 309 if (PageUptodate(page)) 310 goto out_unlock; 311 312 error = -ESTALE; 313 if (NFS_STALE(inode)) 314 goto out_unlock; 315 316 if (file == NULL) { 317 error = -EBADF; 318 ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 319 if (ctx == NULL) 320 goto out_unlock; 321 } else 322 ctx = get_nfs_open_context(nfs_file_open_context(file)); 323 324 if (!IS_SYNC(inode)) { 325 error = nfs_readpage_from_fscache(ctx, inode, page); 326 if (error == 0) 327 goto out; 328 } 329 330 error = nfs_readpage_async(ctx, inode, page); 331 332out: 333 put_nfs_open_context(ctx); 334 return error; 335out_unlock: 336 unlock_page(page); 337 return error; 338} 339 340struct nfs_readdesc { 341 struct nfs_pageio_descriptor *pgio; 342 struct nfs_open_context *ctx; 343}; 344 345static int 346readpage_async_filler(void *data, struct page *page) 347{ 348 struct nfs_readdesc *desc = (struct nfs_readdesc *)data; 349 struct nfs_page *new; 350 unsigned int len; 351 int error; 352 353 len = nfs_page_length(page); 354 if (len == 0) 355 return nfs_return_empty_page(page); 356 357 new = nfs_create_request(desc->ctx, page, NULL, 0, len); 358 if (IS_ERR(new)) 359 goto out_error; 360 361 if (len < PAGE_CACHE_SIZE) 362 zero_user_segment(page, len, PAGE_CACHE_SIZE); 363 if (!nfs_pageio_add_request(desc->pgio, new)) { 364 error = desc->pgio->pg_error; 365 goto out_unlock; 366 } 367 return 0; 368out_error: 369 error = PTR_ERR(new); 370out_unlock: 371 unlock_page(page); 372 return error; 373} 374 375int nfs_readpages(struct file *filp, struct address_space *mapping, 376 struct list_head *pages, unsigned nr_pages) 377{ 378 struct nfs_pageio_descriptor pgio; 379 struct nfs_pgio_mirror *pgm; 380 struct nfs_readdesc desc = { 381 .pgio = &pgio, 382 }; 383 struct inode *inode = mapping->host; 384 unsigned long npages; 385 int ret = -ESTALE; 386 387 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n", 388 inode->i_sb->s_id, 389 (unsigned long long)NFS_FILEID(inode), 390 nr_pages); 391 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); 392 393 if (NFS_STALE(inode)) 394 goto out; 395 396 if (filp == NULL) { 397 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); 398 if (desc.ctx == NULL) 399 return -EBADF; 400 } else 401 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp)); 402 403 /* attempt to read as many of the pages as possible from the cache 404 * - this returns -ENOBUFS immediately if the cookie is negative 405 */ 406 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, 407 pages, &nr_pages); 408 if (ret == 0) 409 goto read_complete; /* all pages were read */ 410 411 nfs_pageio_init_read(&pgio, inode, false, 412 &nfs_async_read_completion_ops); 413 414 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 415 nfs_pageio_complete(&pgio); 416 417 /* It doesn't make sense to do mirrored reads! */ 418 WARN_ON_ONCE(pgio.pg_mirror_count != 1); 419 420 pgm = &pgio.pg_mirrors[0]; 421 NFS_I(inode)->read_io += pgm->pg_bytes_written; 422 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> 423 PAGE_CACHE_SHIFT; 424 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 425read_complete: 426 put_nfs_open_context(desc.ctx); 427out: 428 return ret; 429} 430 431int __init nfs_init_readpagecache(void) 432{ 433 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 434 sizeof(struct nfs_pgio_header), 435 0, SLAB_HWCACHE_ALIGN, 436 NULL); 437 if (nfs_rdata_cachep == NULL) 438 return -ENOMEM; 439 440 return 0; 441} 442 443void nfs_destroy_readpagecache(void) 444{ 445 kmem_cache_destroy(nfs_rdata_cachep); 446} 447 448static const struct nfs_rw_ops nfs_rw_read_ops = { 449 .rw_mode = FMODE_READ, 450 .rw_alloc_header = nfs_readhdr_alloc, 451 .rw_free_header = nfs_readhdr_free, 452 .rw_done = nfs_readpage_done, 453 .rw_result = nfs_readpage_result, 454 .rw_initiate = nfs_initiate_read, 455}; 456