This source file includes following definitions.
- nfs_commitdata_alloc
- nfs_commit_free
- nfs_writehdr_alloc
- nfs_writehdr_free
- nfs_io_completion_alloc
- nfs_io_completion_init
- nfs_io_completion_release
- nfs_io_completion_get
- nfs_io_completion_put
- nfs_page_private_request
- nfs_page_find_private_request
- nfs_page_find_swap_request
- nfs_page_find_head_request
- nfs_grow_file
- nfs_set_pageerror
- nfs_mapping_set_error
- nfs_page_group_search_locked
- nfs_page_group_covers_page
- nfs_mark_uptodate
- wb_priority
- nfs_set_page_writeback
- nfs_end_page_writeback
- nfs_unroll_locks
- nfs_destroy_unlinked_subrequests
- nfs_lock_and_join_requests
- nfs_write_error
- nfs_page_async_flush
- nfs_do_writepage
- nfs_writepage_locked
- nfs_writepage
- nfs_writepages_callback
- nfs_io_completion_commit
- nfs_writepages
- nfs_inode_add_request
- nfs_inode_remove_request
- nfs_mark_request_dirty
- nfs_page_search_commits_for_head_request_locked
- nfs_request_add_commit_list_locked
- nfs_request_add_commit_list
- nfs_request_remove_commit_list
- nfs_init_cinfo_from_inode
- nfs_init_cinfo
- nfs_mark_request_commit
- nfs_clear_page_commit
- nfs_clear_request_commit
- nfs_write_need_commit
- nfs_async_write_init
- nfs_write_completion
- nfs_reqs_to_commit
- nfs_scan_commit_list
- nfs_scan_commit
- nfs_try_to_update_request
- nfs_setup_write_request
- nfs_writepage_setup
- nfs_flush_incompatible
- nfs_key_timeout_notify
- nfs_ctx_key_to_expire
- nfs_write_pageuptodate
- is_whole_file_wrlock
- nfs_can_extend_write
- nfs_updatepage
- flush_task_priority
- nfs_initiate_write
- nfs_redirty_request
- nfs_async_write_error
- nfs_async_write_reschedule_io
- nfs_pageio_init_write
- nfs_pageio_reset_write_mds
- nfs_commit_prepare
- nfs_should_remove_suid
- nfs_writeback_check_extend
- nfs_writeback_update_inode
- nfs_writeback_done
- nfs_writeback_result
- wait_on_commit
- nfs_commit_begin
- nfs_commit_end
- nfs_commitdata_release
- nfs_initiate_commit
- nfs_get_lwb
- nfs_init_commit
- nfs_retry_commit
- nfs_commit_resched_write
- nfs_commit_list
- nfs_commit_done
- nfs_commit_release_pages
- nfs_commit_release
- nfs_generic_commit_list
- __nfs_commit_inode
- nfs_commit_inode
- nfs_write_inode
- nfs_filemap_write_and_wait_range
- nfs_wb_all
- nfs_wb_page_cancel
- nfs_wb_page
- nfs_migrate_page
- nfs_init_writepagecache
- nfs_destroy_writepagecache
1
2
3
4
5
6
7
8
9
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/file.h>
15 #include <linux/writeback.h>
16 #include <linux/swap.h>
17 #include <linux/migrate.h>
18
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/nfs_page.h>
23 #include <linux/backing-dev.h>
24 #include <linux/export.h>
25 #include <linux/freezer.h>
26 #include <linux/wait.h>
27 #include <linux/iversion.h>
28
29 #include <linux/uaccess.h>
30 #include <linux/sched/mm.h>
31
32 #include "delegation.h"
33 #include "internal.h"
34 #include "iostat.h"
35 #include "nfs4_fs.h"
36 #include "fscache.h"
37 #include "pnfs.h"
38
39 #include "nfstrace.h"
40
41 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
42
43 #define MIN_POOL_WRITE (32)
44 #define MIN_POOL_COMMIT (4)
45
46 struct nfs_io_completion {
47 void (*complete)(void *data);
48 void *data;
49 struct kref refcount;
50 };
51
52
53
54
55 static void nfs_redirty_request(struct nfs_page *req);
56 static const struct rpc_call_ops nfs_commit_ops;
57 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59 static const struct nfs_rw_ops nfs_rw_write_ops;
60 static void nfs_inode_remove_request(struct nfs_page *req);
61 static void nfs_clear_request_commit(struct nfs_page *req);
62 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
63 struct inode *inode);
64 static struct nfs_page *
65 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
66 struct page *page);
67
68 static struct kmem_cache *nfs_wdata_cachep;
69 static mempool_t *nfs_wdata_mempool;
70 static struct kmem_cache *nfs_cdata_cachep;
71 static mempool_t *nfs_commit_mempool;
72
73 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail)
74 {
75 struct nfs_commit_data *p;
76
77 if (never_fail)
78 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
79 else {
80
81
82
83
84
85 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
86 if (!p)
87 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO |
88 __GFP_NOWARN | __GFP_NORETRY);
89 if (!p)
90 return NULL;
91 }
92
93 memset(p, 0, sizeof(*p));
94 INIT_LIST_HEAD(&p->pages);
95 return p;
96 }
97 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
98
99 void nfs_commit_free(struct nfs_commit_data *p)
100 {
101 mempool_free(p, nfs_commit_mempool);
102 }
103 EXPORT_SYMBOL_GPL(nfs_commit_free);
104
105 static struct nfs_pgio_header *nfs_writehdr_alloc(void)
106 {
107 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL);
108
109 memset(p, 0, sizeof(*p));
110 p->rw_mode = FMODE_WRITE;
111 return p;
112 }
113
114 static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
115 {
116 mempool_free(hdr, nfs_wdata_mempool);
117 }
118
119 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
120 {
121 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
122 }
123
124 static void nfs_io_completion_init(struct nfs_io_completion *ioc,
125 void (*complete)(void *), void *data)
126 {
127 ioc->complete = complete;
128 ioc->data = data;
129 kref_init(&ioc->refcount);
130 }
131
132 static void nfs_io_completion_release(struct kref *kref)
133 {
134 struct nfs_io_completion *ioc = container_of(kref,
135 struct nfs_io_completion, refcount);
136 ioc->complete(ioc->data);
137 kfree(ioc);
138 }
139
140 static void nfs_io_completion_get(struct nfs_io_completion *ioc)
141 {
142 if (ioc != NULL)
143 kref_get(&ioc->refcount);
144 }
145
146 static void nfs_io_completion_put(struct nfs_io_completion *ioc)
147 {
148 if (ioc != NULL)
149 kref_put(&ioc->refcount, nfs_io_completion_release);
150 }
151
152 static struct nfs_page *
153 nfs_page_private_request(struct page *page)
154 {
155 if (!PagePrivate(page))
156 return NULL;
157 return (struct nfs_page *)page_private(page);
158 }
159
160
161
162
163
164
165
166
167 static struct nfs_page *
168 nfs_page_find_private_request(struct page *page)
169 {
170 struct address_space *mapping = page_file_mapping(page);
171 struct nfs_page *req;
172
173 if (!PagePrivate(page))
174 return NULL;
175 spin_lock(&mapping->private_lock);
176 req = nfs_page_private_request(page);
177 if (req) {
178 WARN_ON_ONCE(req->wb_head != req);
179 kref_get(&req->wb_kref);
180 }
181 spin_unlock(&mapping->private_lock);
182 return req;
183 }
184
185 static struct nfs_page *
186 nfs_page_find_swap_request(struct page *page)
187 {
188 struct inode *inode = page_file_mapping(page)->host;
189 struct nfs_inode *nfsi = NFS_I(inode);
190 struct nfs_page *req = NULL;
191 if (!PageSwapCache(page))
192 return NULL;
193 mutex_lock(&nfsi->commit_mutex);
194 if (PageSwapCache(page)) {
195 req = nfs_page_search_commits_for_head_request_locked(nfsi,
196 page);
197 if (req) {
198 WARN_ON_ONCE(req->wb_head != req);
199 kref_get(&req->wb_kref);
200 }
201 }
202 mutex_unlock(&nfsi->commit_mutex);
203 return req;
204 }
205
206
207
208
209
210
211 static struct nfs_page *nfs_page_find_head_request(struct page *page)
212 {
213 struct nfs_page *req;
214
215 req = nfs_page_find_private_request(page);
216 if (!req)
217 req = nfs_page_find_swap_request(page);
218 return req;
219 }
220
221
222 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
223 {
224 struct inode *inode = page_file_mapping(page)->host;
225 loff_t end, i_size;
226 pgoff_t end_index;
227
228 spin_lock(&inode->i_lock);
229 i_size = i_size_read(inode);
230 end_index = (i_size - 1) >> PAGE_SHIFT;
231 if (i_size > 0 && page_index(page) < end_index)
232 goto out;
233 end = page_file_offset(page) + ((loff_t)offset+count);
234 if (i_size >= end)
235 goto out;
236 i_size_write(inode, end);
237 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
238 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
239 out:
240 spin_unlock(&inode->i_lock);
241 }
242
243
244 static void nfs_set_pageerror(struct address_space *mapping)
245 {
246 struct inode *inode = mapping->host;
247
248 nfs_zap_mapping(mapping->host, mapping);
249
250 spin_lock(&inode->i_lock);
251 NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
252 NFS_INO_REVAL_PAGECACHE |
253 NFS_INO_INVALID_SIZE;
254 spin_unlock(&inode->i_lock);
255 }
256
257 static void nfs_mapping_set_error(struct page *page, int error)
258 {
259 SetPageError(page);
260 mapping_set_error(page_file_mapping(page), error);
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276 static struct nfs_page *
277 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
278 {
279 struct nfs_page *req;
280
281 req = head;
282 do {
283 if (page_offset >= req->wb_pgbase &&
284 page_offset < (req->wb_pgbase + req->wb_bytes))
285 return req;
286
287 req = req->wb_this_page;
288 } while (req != head);
289
290 return NULL;
291 }
292
293
294
295
296
297
298
299
300 static bool nfs_page_group_covers_page(struct nfs_page *req)
301 {
302 struct nfs_page *tmp;
303 unsigned int pos = 0;
304 unsigned int len = nfs_page_length(req->wb_page);
305
306 nfs_page_group_lock(req);
307
308 for (;;) {
309 tmp = nfs_page_group_search_locked(req->wb_head, pos);
310 if (!tmp)
311 break;
312 pos = tmp->wb_pgbase + tmp->wb_bytes;
313 }
314
315 nfs_page_group_unlock(req);
316 return pos >= len;
317 }
318
319
320
321
322 static void nfs_mark_uptodate(struct nfs_page *req)
323 {
324 if (PageUptodate(req->wb_page))
325 return;
326 if (!nfs_page_group_covers_page(req))
327 return;
328 SetPageUptodate(req->wb_page);
329 }
330
331 static int wb_priority(struct writeback_control *wbc)
332 {
333 int ret = 0;
334
335 if (wbc->sync_mode == WB_SYNC_ALL)
336 ret = FLUSH_COND_STABLE;
337 return ret;
338 }
339
340
341
342
343
344 int nfs_congestion_kb;
345
346 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
347 #define NFS_CONGESTION_OFF_THRESH \
348 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
349
350 static void nfs_set_page_writeback(struct page *page)
351 {
352 struct inode *inode = page_file_mapping(page)->host;
353 struct nfs_server *nfss = NFS_SERVER(inode);
354 int ret = test_set_page_writeback(page);
355
356 WARN_ON_ONCE(ret != 0);
357
358 if (atomic_long_inc_return(&nfss->writeback) >
359 NFS_CONGESTION_ON_THRESH)
360 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
361 }
362
363 static void nfs_end_page_writeback(struct nfs_page *req)
364 {
365 struct inode *inode = page_file_mapping(req->wb_page)->host;
366 struct nfs_server *nfss = NFS_SERVER(inode);
367 bool is_done;
368
369 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END);
370 nfs_unlock_request(req);
371 if (!is_done)
372 return;
373
374 end_page_writeback(req->wb_page);
375 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
376 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
377 }
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393 static void
394 nfs_unroll_locks(struct inode *inode, struct nfs_page *head,
395 struct nfs_page *req)
396 {
397 struct nfs_page *tmp;
398
399
400 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
401 if (!kref_read(&tmp->wb_kref))
402 continue;
403 nfs_unlock_and_release_request(tmp);
404 }
405 }
406
407
408
409
410
411
412
413
414
415
416
417 static void
418 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
419 struct nfs_page *old_head,
420 struct inode *inode)
421 {
422 while (destroy_list) {
423 struct nfs_page *subreq = destroy_list;
424
425 destroy_list = (subreq->wb_this_page == old_head) ?
426 NULL : subreq->wb_this_page;
427
428 WARN_ON_ONCE(old_head != subreq->wb_head);
429
430
431 subreq->wb_this_page = subreq;
432
433 clear_bit(PG_REMOVE, &subreq->wb_flags);
434
435
436 if (!kref_read(&subreq->wb_kref)) {
437
438 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
439 nfs_free_request(subreq);
440 continue;
441 }
442
443 subreq->wb_head = subreq;
444 nfs_release_request(old_head);
445
446 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
447 nfs_release_request(subreq);
448 atomic_long_dec(&NFS_I(inode)->nrequests);
449 }
450
451
452
453 nfs_unlock_and_release_request(subreq);
454 }
455 }
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475 static struct nfs_page *
476 nfs_lock_and_join_requests(struct page *page)
477 {
478 struct inode *inode = page_file_mapping(page)->host;
479 struct nfs_page *head, *subreq;
480 struct nfs_page *destroy_list = NULL;
481 unsigned int total_bytes;
482 int ret;
483
484 try_again:
485
486
487
488
489
490 head = nfs_page_find_head_request(page);
491 if (!head)
492 return NULL;
493
494
495 if (!nfs_lock_request(head)) {
496 ret = nfs_wait_on_request(head);
497 nfs_release_request(head);
498 if (ret < 0)
499 return ERR_PTR(ret);
500 goto try_again;
501 }
502
503
504 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
505 nfs_unlock_and_release_request(head);
506 goto try_again;
507 }
508
509 ret = nfs_page_group_lock(head);
510 if (ret < 0)
511 goto release_request;
512
513
514 total_bytes = head->wb_bytes;
515 for (subreq = head->wb_this_page; subreq != head;
516 subreq = subreq->wb_this_page) {
517
518 if (!kref_get_unless_zero(&subreq->wb_kref)) {
519 if (subreq->wb_offset == head->wb_offset + total_bytes)
520 total_bytes += subreq->wb_bytes;
521 continue;
522 }
523
524 while (!nfs_lock_request(subreq)) {
525
526
527
528
529 nfs_page_group_unlock(head);
530 ret = nfs_wait_on_request(subreq);
531 if (!ret)
532 ret = nfs_page_group_lock(head);
533 if (ret < 0) {
534 nfs_unroll_locks(inode, head, subreq);
535 nfs_release_request(subreq);
536 goto release_request;
537 }
538 }
539
540
541
542
543 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
544
545 total_bytes += subreq->wb_bytes;
546 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
547 ((subreq->wb_offset + subreq->wb_bytes) >
548 (head->wb_offset + total_bytes)))) {
549 nfs_page_group_unlock(head);
550 nfs_unroll_locks(inode, head, subreq);
551 nfs_unlock_and_release_request(subreq);
552 ret = -EIO;
553 goto release_request;
554 }
555 }
556
557
558
559 subreq = head;
560 do {
561 nfs_clear_request_commit(subreq);
562 subreq = subreq->wb_this_page;
563 } while (subreq != head);
564
565
566 if (head->wb_this_page != head) {
567
568 destroy_list = head->wb_this_page;
569 head->wb_this_page = head;
570
571
572
573 head->wb_bytes = total_bytes;
574 }
575
576
577 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) {
578 set_bit(PG_INODE_REF, &head->wb_flags);
579 kref_get(&head->wb_kref);
580 atomic_long_inc(&NFS_I(inode)->nrequests);
581 }
582
583 nfs_page_group_unlock(head);
584
585 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
586
587
588 if (!(PagePrivate(page) || PageSwapCache(page))) {
589 nfs_unlock_and_release_request(head);
590 return NULL;
591 }
592
593
594
595 return head;
596
597 release_request:
598 nfs_unlock_and_release_request(head);
599 return ERR_PTR(ret);
600 }
601
602 static void nfs_write_error(struct nfs_page *req, int error)
603 {
604 nfs_set_pageerror(page_file_mapping(req->wb_page));
605 nfs_mapping_set_error(req->wb_page, error);
606 nfs_inode_remove_request(req);
607 nfs_end_page_writeback(req);
608 nfs_release_request(req);
609 }
610
611
612
613
614
615 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
616 struct page *page)
617 {
618 struct nfs_page *req;
619 int ret = 0;
620
621 req = nfs_lock_and_join_requests(page);
622 if (!req)
623 goto out;
624 ret = PTR_ERR(req);
625 if (IS_ERR(req))
626 goto out;
627
628 nfs_set_page_writeback(page);
629 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
630
631
632 ret = pgio->pg_error;
633 if (nfs_error_is_fatal_on_server(ret))
634 goto out_launder;
635
636 ret = 0;
637 if (!nfs_pageio_add_request(pgio, req)) {
638 ret = pgio->pg_error;
639
640
641
642 if (nfs_error_is_fatal(ret)) {
643 if (nfs_error_is_fatal_on_server(ret))
644 goto out_launder;
645 } else
646 ret = -EAGAIN;
647 nfs_redirty_request(req);
648 pgio->pg_error = 0;
649 } else
650 nfs_add_stats(page_file_mapping(page)->host,
651 NFSIOS_WRITEPAGES, 1);
652 out:
653 return ret;
654 out_launder:
655 nfs_write_error(req, ret);
656 return 0;
657 }
658
659 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
660 struct nfs_pageio_descriptor *pgio)
661 {
662 int ret;
663
664 nfs_pageio_cond_complete(pgio, page_index(page));
665 ret = nfs_page_async_flush(pgio, page);
666 if (ret == -EAGAIN) {
667 redirty_page_for_writepage(wbc, page);
668 ret = AOP_WRITEPAGE_ACTIVATE;
669 }
670 return ret;
671 }
672
673
674
675
676 static int nfs_writepage_locked(struct page *page,
677 struct writeback_control *wbc)
678 {
679 struct nfs_pageio_descriptor pgio;
680 struct inode *inode = page_file_mapping(page)->host;
681 int err;
682
683 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
684 nfs_pageio_init_write(&pgio, inode, 0,
685 false, &nfs_async_write_completion_ops);
686 err = nfs_do_writepage(page, wbc, &pgio);
687 pgio.pg_error = 0;
688 nfs_pageio_complete(&pgio);
689 if (err < 0)
690 return err;
691 if (nfs_error_is_fatal(pgio.pg_error))
692 return pgio.pg_error;
693 return 0;
694 }
695
696 int nfs_writepage(struct page *page, struct writeback_control *wbc)
697 {
698 int ret;
699
700 ret = nfs_writepage_locked(page, wbc);
701 if (ret != AOP_WRITEPAGE_ACTIVATE)
702 unlock_page(page);
703 return ret;
704 }
705
706 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
707 {
708 int ret;
709
710 ret = nfs_do_writepage(page, wbc, data);
711 if (ret != AOP_WRITEPAGE_ACTIVATE)
712 unlock_page(page);
713 return ret;
714 }
715
716 static void nfs_io_completion_commit(void *inode)
717 {
718 nfs_commit_inode(inode, 0);
719 }
720
721 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
722 {
723 struct inode *inode = mapping->host;
724 struct nfs_pageio_descriptor pgio;
725 struct nfs_io_completion *ioc;
726 int err;
727
728 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
729
730 ioc = nfs_io_completion_alloc(GFP_KERNEL);
731 if (ioc)
732 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
733
734 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
735 &nfs_async_write_completion_ops);
736 pgio.pg_io_completion = ioc;
737 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
738 pgio.pg_error = 0;
739 nfs_pageio_complete(&pgio);
740 nfs_io_completion_put(ioc);
741
742 if (err < 0)
743 goto out_err;
744 err = pgio.pg_error;
745 if (nfs_error_is_fatal(err))
746 goto out_err;
747 return 0;
748 out_err:
749 return err;
750 }
751
752
753
754
755 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
756 {
757 struct address_space *mapping = page_file_mapping(req->wb_page);
758 struct nfs_inode *nfsi = NFS_I(inode);
759
760 WARN_ON_ONCE(req->wb_this_page != req);
761
762
763 nfs_lock_request(req);
764
765
766
767
768
769 spin_lock(&mapping->private_lock);
770 if (!nfs_have_writebacks(inode) &&
771 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
772 inode_inc_iversion_raw(inode);
773 if (likely(!PageSwapCache(req->wb_page))) {
774 set_bit(PG_MAPPED, &req->wb_flags);
775 SetPagePrivate(req->wb_page);
776 set_page_private(req->wb_page, (unsigned long)req);
777 }
778 spin_unlock(&mapping->private_lock);
779 atomic_long_inc(&nfsi->nrequests);
780
781
782
783
784 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
785 kref_get(&req->wb_kref);
786 }
787
788
789
790
791 static void nfs_inode_remove_request(struct nfs_page *req)
792 {
793 struct address_space *mapping = page_file_mapping(req->wb_page);
794 struct inode *inode = mapping->host;
795 struct nfs_inode *nfsi = NFS_I(inode);
796 struct nfs_page *head;
797
798 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
799 head = req->wb_head;
800
801 spin_lock(&mapping->private_lock);
802 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
803 set_page_private(head->wb_page, 0);
804 ClearPagePrivate(head->wb_page);
805 clear_bit(PG_MAPPED, &head->wb_flags);
806 }
807 spin_unlock(&mapping->private_lock);
808 }
809
810 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
811 nfs_release_request(req);
812 atomic_long_dec(&nfsi->nrequests);
813 }
814 }
815
816 static void
817 nfs_mark_request_dirty(struct nfs_page *req)
818 {
819 if (req->wb_page)
820 __set_page_dirty_nobuffers(req->wb_page);
821 }
822
823
824
825
826
827
828
829
830
831 static struct nfs_page *
832 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
833 struct page *page)
834 {
835 struct nfs_page *freq, *t;
836 struct nfs_commit_info cinfo;
837 struct inode *inode = &nfsi->vfs_inode;
838
839 nfs_init_cinfo_from_inode(&cinfo, inode);
840
841
842 freq = pnfs_search_commit_reqs(inode, &cinfo, page);
843 if (freq)
844 return freq->wb_head;
845
846
847 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
848 if (freq->wb_page == page)
849 return freq->wb_head;
850 }
851
852 return NULL;
853 }
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868 void
869 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
870 struct nfs_commit_info *cinfo)
871 {
872 set_bit(PG_CLEAN, &req->wb_flags);
873 nfs_list_add_request(req, dst);
874 atomic_long_inc(&cinfo->mds->ncommit);
875 }
876 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
877
878
879
880
881
882
883
884
885
886
887
888
889
890 void
891 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
892 {
893 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
894 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
895 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
896 if (req->wb_page)
897 nfs_mark_page_unstable(req->wb_page, cinfo);
898 }
899 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
900
901
902
903
904
905
906
907
908
909
910
911
912 void
913 nfs_request_remove_commit_list(struct nfs_page *req,
914 struct nfs_commit_info *cinfo)
915 {
916 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
917 return;
918 nfs_list_remove_request(req);
919 atomic_long_dec(&cinfo->mds->ncommit);
920 }
921 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
922
923 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
924 struct inode *inode)
925 {
926 cinfo->inode = inode;
927 cinfo->mds = &NFS_I(inode)->commit_info;
928 cinfo->ds = pnfs_get_ds_info(inode);
929 cinfo->dreq = NULL;
930 cinfo->completion_ops = &nfs_commit_completion_ops;
931 }
932
933 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
934 struct inode *inode,
935 struct nfs_direct_req *dreq)
936 {
937 if (dreq)
938 nfs_init_cinfo_from_dreq(cinfo, dreq);
939 else
940 nfs_init_cinfo_from_inode(cinfo, inode);
941 }
942 EXPORT_SYMBOL_GPL(nfs_init_cinfo);
943
944
945
946
947 void
948 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
949 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
950 {
951 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
952 return;
953 nfs_request_add_commit_list(req, cinfo);
954 }
955
956 static void
957 nfs_clear_page_commit(struct page *page)
958 {
959 dec_node_page_state(page, NR_UNSTABLE_NFS);
960 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
961 WB_RECLAIMABLE);
962 }
963
964
965 static void
966 nfs_clear_request_commit(struct nfs_page *req)
967 {
968 if (test_bit(PG_CLEAN, &req->wb_flags)) {
969 struct nfs_open_context *ctx = nfs_req_openctx(req);
970 struct inode *inode = d_inode(ctx->dentry);
971 struct nfs_commit_info cinfo;
972
973 nfs_init_cinfo_from_inode(&cinfo, inode);
974 mutex_lock(&NFS_I(inode)->commit_mutex);
975 if (!pnfs_clear_request_commit(req, &cinfo)) {
976 nfs_request_remove_commit_list(req, &cinfo);
977 }
978 mutex_unlock(&NFS_I(inode)->commit_mutex);
979 nfs_clear_page_commit(req->wb_page);
980 }
981 }
982
983 int nfs_write_need_commit(struct nfs_pgio_header *hdr)
984 {
985 if (hdr->verf.committed == NFS_DATA_SYNC)
986 return hdr->lseg == NULL;
987 return hdr->verf.committed != NFS_FILE_SYNC;
988 }
989
990 static void nfs_async_write_init(struct nfs_pgio_header *hdr)
991 {
992 nfs_io_completion_get(hdr->io_completion);
993 }
994
995 static void nfs_write_completion(struct nfs_pgio_header *hdr)
996 {
997 struct nfs_commit_info cinfo;
998 unsigned long bytes = 0;
999
1000 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
1001 goto out;
1002 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
1003 while (!list_empty(&hdr->pages)) {
1004 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
1005
1006 bytes += req->wb_bytes;
1007 nfs_list_remove_request(req);
1008 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
1009 (hdr->good_bytes < bytes)) {
1010 nfs_set_pageerror(page_file_mapping(req->wb_page));
1011 nfs_mapping_set_error(req->wb_page, hdr->error);
1012 goto remove_req;
1013 }
1014 if (nfs_write_need_commit(hdr)) {
1015
1016 req->wb_nio = 0;
1017 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
1018 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
1019 hdr->pgio_mirror_idx);
1020 goto next;
1021 }
1022 remove_req:
1023 nfs_inode_remove_request(req);
1024 next:
1025 nfs_end_page_writeback(req);
1026 nfs_release_request(req);
1027 }
1028 out:
1029 nfs_io_completion_put(hdr->io_completion);
1030 hdr->release(hdr);
1031 }
1032
1033 unsigned long
1034 nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
1035 {
1036 return atomic_long_read(&cinfo->mds->ncommit);
1037 }
1038
1039
1040 int
1041 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1042 struct nfs_commit_info *cinfo, int max)
1043 {
1044 struct nfs_page *req, *tmp;
1045 int ret = 0;
1046
1047 restart:
1048 list_for_each_entry_safe(req, tmp, src, wb_list) {
1049 kref_get(&req->wb_kref);
1050 if (!nfs_lock_request(req)) {
1051 int status;
1052
1053
1054 if (!list_empty(dst)) {
1055 nfs_release_request(req);
1056 continue;
1057 }
1058
1059 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1060 status = nfs_wait_on_request(req);
1061 nfs_release_request(req);
1062 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1063 if (status < 0)
1064 break;
1065 goto restart;
1066 }
1067 nfs_request_remove_commit_list(req, cinfo);
1068 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1069 nfs_list_add_request(req, dst);
1070 ret++;
1071 if ((ret == max) && !cinfo->dreq)
1072 break;
1073 cond_resched();
1074 }
1075 return ret;
1076 }
1077 EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 int
1089 nfs_scan_commit(struct inode *inode, struct list_head *dst,
1090 struct nfs_commit_info *cinfo)
1091 {
1092 int ret = 0;
1093
1094 if (!atomic_long_read(&cinfo->mds->ncommit))
1095 return 0;
1096 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1097 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1098 const int max = INT_MAX;
1099
1100 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1101 cinfo, max);
1102 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1103 }
1104 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1105 return ret;
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
1116 struct page *page,
1117 unsigned int offset,
1118 unsigned int bytes)
1119 {
1120 struct nfs_page *req;
1121 unsigned int rqend;
1122 unsigned int end;
1123 int error;
1124
1125 end = offset + bytes;
1126
1127 req = nfs_lock_and_join_requests(page);
1128 if (IS_ERR_OR_NULL(req))
1129 return req;
1130
1131 rqend = req->wb_offset + req->wb_bytes;
1132
1133
1134
1135
1136
1137
1138 if (offset > rqend || end < req->wb_offset)
1139 goto out_flushme;
1140
1141
1142 if (offset < req->wb_offset) {
1143 req->wb_offset = offset;
1144 req->wb_pgbase = offset;
1145 }
1146 if (end > rqend)
1147 req->wb_bytes = end - req->wb_offset;
1148 else
1149 req->wb_bytes = rqend - req->wb_offset;
1150 req->wb_nio = 0;
1151 return req;
1152 out_flushme:
1153
1154
1155
1156
1157
1158 nfs_mark_request_dirty(req);
1159 nfs_unlock_and_release_request(req);
1160 error = nfs_wb_page(inode, page);
1161 return (error < 0) ? ERR_PTR(error) : NULL;
1162 }
1163
1164
1165
1166
1167
1168
1169
1170
1171 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
1172 struct page *page, unsigned int offset, unsigned int bytes)
1173 {
1174 struct inode *inode = page_file_mapping(page)->host;
1175 struct nfs_page *req;
1176
1177 req = nfs_try_to_update_request(inode, page, offset, bytes);
1178 if (req != NULL)
1179 goto out;
1180 req = nfs_create_request(ctx, page, offset, bytes);
1181 if (IS_ERR(req))
1182 goto out;
1183 nfs_inode_add_request(inode, req);
1184 out:
1185 return req;
1186 }
1187
1188 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
1189 unsigned int offset, unsigned int count)
1190 {
1191 struct nfs_page *req;
1192
1193 req = nfs_setup_write_request(ctx, page, offset, count);
1194 if (IS_ERR(req))
1195 return PTR_ERR(req);
1196
1197 nfs_grow_file(page, offset, count);
1198 nfs_mark_uptodate(req);
1199 nfs_mark_request_dirty(req);
1200 nfs_unlock_and_release_request(req);
1201 return 0;
1202 }
1203
1204 int nfs_flush_incompatible(struct file *file, struct page *page)
1205 {
1206 struct nfs_open_context *ctx = nfs_file_open_context(file);
1207 struct nfs_lock_context *l_ctx;
1208 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1209 struct nfs_page *req;
1210 int do_flush, status;
1211
1212
1213
1214
1215
1216
1217
1218
1219 do {
1220 req = nfs_page_find_head_request(page);
1221 if (req == NULL)
1222 return 0;
1223 l_ctx = req->wb_lock_context;
1224 do_flush = req->wb_page != page ||
1225 !nfs_match_open_context(nfs_req_openctx(req), ctx);
1226 if (l_ctx && flctx &&
1227 !(list_empty_careful(&flctx->flc_posix) &&
1228 list_empty_careful(&flctx->flc_flock))) {
1229 do_flush |= l_ctx->lockowner != current->files;
1230 }
1231 nfs_release_request(req);
1232 if (!do_flush)
1233 return 0;
1234 status = nfs_wb_page(page_file_mapping(page)->host, page);
1235 } while (status == 0);
1236 return status;
1237 }
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 int
1250 nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1251 {
1252 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1253
1254 if (nfs_ctx_key_to_expire(ctx, inode) &&
1255 !ctx->ll_cred)
1256
1257 return -EACCES;
1258 return 0;
1259 }
1260
1261
1262
1263
1264 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1265 {
1266 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1267 struct rpc_cred *cred = ctx->ll_cred;
1268 struct auth_cred acred = {
1269 .cred = ctx->cred,
1270 };
1271
1272 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) {
1273 put_rpccred(cred);
1274 ctx->ll_cred = NULL;
1275 cred = NULL;
1276 }
1277 if (!cred)
1278 cred = auth->au_ops->lookup_cred(auth, &acred, 0);
1279 if (!cred || IS_ERR(cred))
1280 return true;
1281 ctx->ll_cred = cred;
1282 return !!(cred->cr_ops->crkey_timeout &&
1283 cred->cr_ops->crkey_timeout(cred));
1284 }
1285
1286
1287
1288
1289
1290
1291 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
1292 {
1293 struct nfs_inode *nfsi = NFS_I(inode);
1294
1295 if (nfs_have_delegated_attributes(inode))
1296 goto out;
1297 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
1298 return false;
1299 smp_rmb();
1300 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
1301 return false;
1302 out:
1303 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1304 return false;
1305 return PageUptodate(page) != 0;
1306 }
1307
1308 static bool
1309 is_whole_file_wrlock(struct file_lock *fl)
1310 {
1311 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1312 fl->fl_type == F_WRLCK;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1324 {
1325 int ret;
1326 struct file_lock_context *flctx = inode->i_flctx;
1327 struct file_lock *fl;
1328
1329 if (file->f_flags & O_DSYNC)
1330 return 0;
1331 if (!nfs_write_pageuptodate(page, inode))
1332 return 0;
1333 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1334 return 1;
1335 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1336 list_empty_careful(&flctx->flc_posix)))
1337 return 1;
1338
1339
1340 ret = 0;
1341 spin_lock(&flctx->flc_lock);
1342 if (!list_empty(&flctx->flc_posix)) {
1343 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1344 fl_list);
1345 if (is_whole_file_wrlock(fl))
1346 ret = 1;
1347 } else if (!list_empty(&flctx->flc_flock)) {
1348 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1349 fl_list);
1350 if (fl->fl_type == F_WRLCK)
1351 ret = 1;
1352 }
1353 spin_unlock(&flctx->flc_lock);
1354 return ret;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363 int nfs_updatepage(struct file *file, struct page *page,
1364 unsigned int offset, unsigned int count)
1365 {
1366 struct nfs_open_context *ctx = nfs_file_open_context(file);
1367 struct address_space *mapping = page_file_mapping(page);
1368 struct inode *inode = mapping->host;
1369 int status = 0;
1370
1371 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1372
1373 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
1374 file, count, (long long)(page_file_offset(page) + offset));
1375
1376 if (!count)
1377 goto out;
1378
1379 if (nfs_can_extend_write(file, page, inode)) {
1380 count = max(count + offset, nfs_page_length(page));
1381 offset = 0;
1382 }
1383
1384 status = nfs_writepage_setup(ctx, page, offset, count);
1385 if (status < 0)
1386 nfs_set_pageerror(mapping);
1387 else
1388 __set_page_dirty_nobuffers(page);
1389 out:
1390 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
1391 status, (long long)i_size_read(inode));
1392 return status;
1393 }
1394
1395 static int flush_task_priority(int how)
1396 {
1397 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1398 case FLUSH_HIGHPRI:
1399 return RPC_PRIORITY_HIGH;
1400 case FLUSH_LOWPRI:
1401 return RPC_PRIORITY_LOW;
1402 }
1403 return RPC_PRIORITY_NORMAL;
1404 }
1405
1406 static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1407 struct rpc_message *msg,
1408 const struct nfs_rpc_ops *rpc_ops,
1409 struct rpc_task_setup *task_setup_data, int how)
1410 {
1411 int priority = flush_task_priority(how);
1412
1413 task_setup_data->priority = priority;
1414 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
1415 trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes,
1416 hdr->args.stable);
1417 }
1418
1419
1420
1421
1422
1423 static void nfs_redirty_request(struct nfs_page *req)
1424 {
1425
1426 req->wb_nio++;
1427 nfs_mark_request_dirty(req);
1428 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1429 nfs_end_page_writeback(req);
1430 nfs_release_request(req);
1431 }
1432
1433 static void nfs_async_write_error(struct list_head *head, int error)
1434 {
1435 struct nfs_page *req;
1436
1437 while (!list_empty(head)) {
1438 req = nfs_list_entry(head->next);
1439 nfs_list_remove_request(req);
1440 if (nfs_error_is_fatal(error))
1441 nfs_write_error(req, error);
1442 else
1443 nfs_redirty_request(req);
1444 }
1445 }
1446
1447 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1448 {
1449 nfs_async_write_error(&hdr->pages, 0);
1450 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset,
1451 hdr->args.offset + hdr->args.count - 1);
1452 }
1453
1454 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1455 .init_hdr = nfs_async_write_init,
1456 .error_cleanup = nfs_async_write_error,
1457 .completion = nfs_write_completion,
1458 .reschedule_io = nfs_async_write_reschedule_io,
1459 };
1460
1461 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1462 struct inode *inode, int ioflags, bool force_mds,
1463 const struct nfs_pgio_completion_ops *compl_ops)
1464 {
1465 struct nfs_server *server = NFS_SERVER(inode);
1466 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1467
1468 #ifdef CONFIG_NFS_V4_1
1469 if (server->pnfs_curr_ld && !force_mds)
1470 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1471 #endif
1472 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1473 server->wsize, ioflags);
1474 }
1475 EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1476
1477 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1478 {
1479 struct nfs_pgio_mirror *mirror;
1480
1481 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1482 pgio->pg_ops->pg_cleanup(pgio);
1483
1484 pgio->pg_ops = &nfs_pgio_rw_ops;
1485
1486 nfs_pageio_stop_mirroring(pgio);
1487
1488 mirror = &pgio->pg_mirrors[0];
1489 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1490 }
1491 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1492
1493
1494 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1495 {
1496 struct nfs_commit_data *data = calldata;
1497
1498 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1499 }
1500
1501
1502
1503
1504 static int nfs_should_remove_suid(const struct inode *inode)
1505 {
1506 umode_t mode = inode->i_mode;
1507 int kill = 0;
1508
1509
1510 if (unlikely(mode & S_ISUID))
1511 kill = ATTR_KILL_SUID;
1512
1513
1514
1515
1516
1517 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1518 kill |= ATTR_KILL_SGID;
1519
1520 if (unlikely(kill && S_ISREG(mode)))
1521 return kill;
1522
1523 return 0;
1524 }
1525
1526 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1527 struct nfs_fattr *fattr)
1528 {
1529 struct nfs_pgio_args *argp = &hdr->args;
1530 struct nfs_pgio_res *resp = &hdr->res;
1531 u64 size = argp->offset + resp->count;
1532
1533 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1534 fattr->size = size;
1535 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1536 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1537 return;
1538 }
1539 if (size != fattr->size)
1540 return;
1541
1542 nfs_fattr_set_barrier(fattr);
1543
1544 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1545 }
1546
1547 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1548 {
1549 struct nfs_fattr *fattr = &hdr->fattr;
1550 struct inode *inode = hdr->inode;
1551
1552 spin_lock(&inode->i_lock);
1553 nfs_writeback_check_extend(hdr, fattr);
1554 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1555 spin_unlock(&inode->i_lock);
1556 }
1557 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1558
1559
1560
1561
1562 static int nfs_writeback_done(struct rpc_task *task,
1563 struct nfs_pgio_header *hdr,
1564 struct inode *inode)
1565 {
1566 int status;
1567
1568
1569
1570
1571
1572
1573
1574
1575 status = NFS_PROTO(inode)->write_done(task, hdr);
1576 if (status != 0)
1577 return status;
1578
1579 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1580 trace_nfs_writeback_done(inode, task->tk_status,
1581 hdr->args.offset, hdr->res.verf);
1582
1583 if (hdr->res.verf->committed < hdr->args.stable &&
1584 task->tk_status >= 0) {
1585
1586
1587
1588
1589
1590
1591
1592
1593 static unsigned long complain;
1594
1595
1596 if (time_before(complain, jiffies)) {
1597 dprintk("NFS: faulty NFS server %s:"
1598 " (committed = %d) != (stable = %d)\n",
1599 NFS_SERVER(inode)->nfs_client->cl_hostname,
1600 hdr->res.verf->committed, hdr->args.stable);
1601 complain = jiffies + 300 * HZ;
1602 }
1603 }
1604
1605
1606 if (nfs_should_remove_suid(inode)) {
1607 spin_lock(&inode->i_lock);
1608 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1609 spin_unlock(&inode->i_lock);
1610 }
1611 return 0;
1612 }
1613
1614
1615
1616
1617 static void nfs_writeback_result(struct rpc_task *task,
1618 struct nfs_pgio_header *hdr)
1619 {
1620 struct nfs_pgio_args *argp = &hdr->args;
1621 struct nfs_pgio_res *resp = &hdr->res;
1622
1623 if (resp->count < argp->count) {
1624 static unsigned long complain;
1625
1626
1627 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1628
1629
1630 if (resp->count == 0) {
1631 if (time_before(complain, jiffies)) {
1632 printk(KERN_WARNING
1633 "NFS: Server wrote zero bytes, expected %u.\n",
1634 argp->count);
1635 complain = jiffies + 300 * HZ;
1636 }
1637 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1638 task->tk_status = -EIO;
1639 return;
1640 }
1641
1642
1643 if (!task->tk_ops) {
1644 hdr->pnfs_error = -EAGAIN;
1645 return;
1646 }
1647
1648
1649 if (resp->verf->committed != NFS_UNSTABLE) {
1650
1651 hdr->mds_offset += resp->count;
1652 argp->offset += resp->count;
1653 argp->pgbase += resp->count;
1654 argp->count -= resp->count;
1655 } else {
1656
1657
1658
1659 argp->stable = NFS_FILE_SYNC;
1660 }
1661 rpc_restart_call_prepare(task);
1662 }
1663 }
1664
1665 static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1666 {
1667 return wait_var_event_killable(&cinfo->rpcs_out,
1668 !atomic_read(&cinfo->rpcs_out));
1669 }
1670
1671 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1672 {
1673 atomic_inc(&cinfo->rpcs_out);
1674 }
1675
1676 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1677 {
1678 if (atomic_dec_and_test(&cinfo->rpcs_out))
1679 wake_up_var(&cinfo->rpcs_out);
1680 }
1681
1682 void nfs_commitdata_release(struct nfs_commit_data *data)
1683 {
1684 put_nfs_open_context(data->context);
1685 nfs_commit_free(data);
1686 }
1687 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1688
1689 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1690 const struct nfs_rpc_ops *nfs_ops,
1691 const struct rpc_call_ops *call_ops,
1692 int how, int flags)
1693 {
1694 struct rpc_task *task;
1695 int priority = flush_task_priority(how);
1696 struct rpc_message msg = {
1697 .rpc_argp = &data->args,
1698 .rpc_resp = &data->res,
1699 .rpc_cred = data->cred,
1700 };
1701 struct rpc_task_setup task_setup_data = {
1702 .task = &data->task,
1703 .rpc_client = clnt,
1704 .rpc_message = &msg,
1705 .callback_ops = call_ops,
1706 .callback_data = data,
1707 .workqueue = nfsiod_workqueue,
1708 .flags = RPC_TASK_ASYNC | flags,
1709 .priority = priority,
1710 };
1711
1712 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
1713 trace_nfs_initiate_commit(data);
1714
1715 dprintk("NFS: initiated commit call\n");
1716
1717 task = rpc_run_task(&task_setup_data);
1718 if (IS_ERR(task))
1719 return PTR_ERR(task);
1720 if (how & FLUSH_SYNC)
1721 rpc_wait_for_completion_task(task);
1722 rpc_put_task(task);
1723 return 0;
1724 }
1725 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1726
1727 static loff_t nfs_get_lwb(struct list_head *head)
1728 {
1729 loff_t lwb = 0;
1730 struct nfs_page *req;
1731
1732 list_for_each_entry(req, head, wb_list)
1733 if (lwb < (req_offset(req) + req->wb_bytes))
1734 lwb = req_offset(req) + req->wb_bytes;
1735
1736 return lwb;
1737 }
1738
1739
1740
1741
1742 void nfs_init_commit(struct nfs_commit_data *data,
1743 struct list_head *head,
1744 struct pnfs_layout_segment *lseg,
1745 struct nfs_commit_info *cinfo)
1746 {
1747 struct nfs_page *first = nfs_list_entry(head->next);
1748 struct nfs_open_context *ctx = nfs_req_openctx(first);
1749 struct inode *inode = d_inode(ctx->dentry);
1750
1751
1752
1753
1754 list_splice_init(head, &data->pages);
1755
1756 data->inode = inode;
1757 data->cred = ctx->cred;
1758 data->lseg = lseg;
1759
1760 if (lseg)
1761 data->lwb = nfs_get_lwb(&data->pages);
1762 data->mds_ops = &nfs_commit_ops;
1763 data->completion_ops = cinfo->completion_ops;
1764 data->dreq = cinfo->dreq;
1765
1766 data->args.fh = NFS_FH(data->inode);
1767
1768 data->args.offset = 0;
1769 data->args.count = 0;
1770 data->context = get_nfs_open_context(ctx);
1771 data->res.fattr = &data->fattr;
1772 data->res.verf = &data->verf;
1773 nfs_fattr_init(&data->fattr);
1774 }
1775 EXPORT_SYMBOL_GPL(nfs_init_commit);
1776
1777 void nfs_retry_commit(struct list_head *page_list,
1778 struct pnfs_layout_segment *lseg,
1779 struct nfs_commit_info *cinfo,
1780 u32 ds_commit_idx)
1781 {
1782 struct nfs_page *req;
1783
1784 while (!list_empty(page_list)) {
1785 req = nfs_list_entry(page_list->next);
1786 nfs_list_remove_request(req);
1787 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1788 if (!cinfo->dreq)
1789 nfs_clear_page_commit(req->wb_page);
1790 nfs_unlock_and_release_request(req);
1791 }
1792 }
1793 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1794
1795 static void
1796 nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1797 struct nfs_page *req)
1798 {
1799 __set_page_dirty_nobuffers(req->wb_page);
1800 }
1801
1802
1803
1804
1805 static int
1806 nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1807 struct nfs_commit_info *cinfo)
1808 {
1809 struct nfs_commit_data *data;
1810
1811
1812 if (list_empty(head))
1813 return 0;
1814
1815 data = nfs_commitdata_alloc(true);
1816
1817
1818 nfs_init_commit(data, head, NULL, cinfo);
1819 atomic_inc(&cinfo->mds->rpcs_out);
1820 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1821 data->mds_ops, how, 0);
1822 }
1823
1824
1825
1826
1827 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1828 {
1829 struct nfs_commit_data *data = calldata;
1830
1831 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1832 task->tk_pid, task->tk_status);
1833
1834
1835 NFS_PROTO(data->inode)->commit_done(task, data);
1836 trace_nfs_commit_done(data);
1837 }
1838
1839 static void nfs_commit_release_pages(struct nfs_commit_data *data)
1840 {
1841 const struct nfs_writeverf *verf = data->res.verf;
1842 struct nfs_page *req;
1843 int status = data->task.tk_status;
1844 struct nfs_commit_info cinfo;
1845 struct nfs_server *nfss;
1846
1847 while (!list_empty(&data->pages)) {
1848 req = nfs_list_entry(data->pages.next);
1849 nfs_list_remove_request(req);
1850 if (req->wb_page)
1851 nfs_clear_page_commit(req->wb_page);
1852
1853 dprintk("NFS: commit (%s/%llu %d@%lld)",
1854 nfs_req_openctx(req)->dentry->d_sb->s_id,
1855 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)),
1856 req->wb_bytes,
1857 (long long)req_offset(req));
1858 if (status < 0) {
1859 if (req->wb_page) {
1860 nfs_mapping_set_error(req->wb_page, status);
1861 nfs_inode_remove_request(req);
1862 }
1863 dprintk_cont(", error = %d\n", status);
1864 goto next;
1865 }
1866
1867
1868
1869 if (verf->committed > NFS_UNSTABLE &&
1870 !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier)) {
1871
1872 if (req->wb_page)
1873 nfs_inode_remove_request(req);
1874 dprintk_cont(" OK\n");
1875 goto next;
1876 }
1877
1878 dprintk_cont(" mismatch\n");
1879 nfs_mark_request_dirty(req);
1880 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags);
1881 next:
1882 nfs_unlock_and_release_request(req);
1883
1884 cond_resched();
1885 }
1886 nfss = NFS_SERVER(data->inode);
1887 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
1888 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC);
1889
1890 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1891 nfs_commit_end(cinfo.mds);
1892 }
1893
1894 static void nfs_commit_release(void *calldata)
1895 {
1896 struct nfs_commit_data *data = calldata;
1897
1898 data->completion_ops->completion(data);
1899 nfs_commitdata_release(calldata);
1900 }
1901
1902 static const struct rpc_call_ops nfs_commit_ops = {
1903 .rpc_call_prepare = nfs_commit_prepare,
1904 .rpc_call_done = nfs_commit_done,
1905 .rpc_release = nfs_commit_release,
1906 };
1907
1908 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1909 .completion = nfs_commit_release_pages,
1910 .resched_write = nfs_commit_resched_write,
1911 };
1912
1913 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1914 int how, struct nfs_commit_info *cinfo)
1915 {
1916 int status;
1917
1918 status = pnfs_commit_list(inode, head, how, cinfo);
1919 if (status == PNFS_NOT_ATTEMPTED)
1920 status = nfs_commit_list(inode, head, how, cinfo);
1921 return status;
1922 }
1923
1924 static int __nfs_commit_inode(struct inode *inode, int how,
1925 struct writeback_control *wbc)
1926 {
1927 LIST_HEAD(head);
1928 struct nfs_commit_info cinfo;
1929 int may_wait = how & FLUSH_SYNC;
1930 int ret, nscan;
1931
1932 nfs_init_cinfo_from_inode(&cinfo, inode);
1933 nfs_commit_begin(cinfo.mds);
1934 for (;;) {
1935 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1936 if (ret <= 0)
1937 break;
1938 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1939 if (ret < 0)
1940 break;
1941 ret = 0;
1942 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1943 if (nscan < wbc->nr_to_write)
1944 wbc->nr_to_write -= nscan;
1945 else
1946 wbc->nr_to_write = 0;
1947 }
1948 if (nscan < INT_MAX)
1949 break;
1950 cond_resched();
1951 }
1952 nfs_commit_end(cinfo.mds);
1953 if (ret || !may_wait)
1954 return ret;
1955 return wait_on_commit(cinfo.mds);
1956 }
1957
1958 int nfs_commit_inode(struct inode *inode, int how)
1959 {
1960 return __nfs_commit_inode(inode, how, NULL);
1961 }
1962 EXPORT_SYMBOL_GPL(nfs_commit_inode);
1963
1964 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1965 {
1966 struct nfs_inode *nfsi = NFS_I(inode);
1967 int flags = FLUSH_SYNC;
1968 int ret = 0;
1969
1970 if (wbc->sync_mode == WB_SYNC_NONE) {
1971
1972 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1973 goto check_requests_outstanding;
1974
1975
1976
1977
1978 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1979 goto out_mark_dirty;
1980
1981
1982 flags = 0;
1983 }
1984
1985 ret = __nfs_commit_inode(inode, flags, wbc);
1986 if (!ret) {
1987 if (flags & FLUSH_SYNC)
1988 return 0;
1989 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1990 goto out_mark_dirty;
1991
1992 check_requests_outstanding:
1993 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1994 return ret;
1995 out_mark_dirty:
1996 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1997 return ret;
1998 }
1999 EXPORT_SYMBOL_GPL(nfs_write_inode);
2000
2001
2002
2003
2004
2005
2006
2007 int nfs_filemap_write_and_wait_range(struct address_space *mapping,
2008 loff_t lstart, loff_t lend)
2009 {
2010 int ret;
2011
2012 ret = filemap_write_and_wait_range(mapping, lstart, lend);
2013 if (ret == 0)
2014 ret = pnfs_sync_inode(mapping->host, true);
2015 return ret;
2016 }
2017 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
2018
2019
2020
2021
2022 int nfs_wb_all(struct inode *inode)
2023 {
2024 int ret;
2025
2026 trace_nfs_writeback_inode_enter(inode);
2027
2028 ret = filemap_write_and_wait(inode->i_mapping);
2029 if (ret)
2030 goto out;
2031 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2032 if (ret < 0)
2033 goto out;
2034 pnfs_sync_inode(inode, true);
2035 ret = 0;
2036
2037 out:
2038 trace_nfs_writeback_inode_exit(inode, ret);
2039 return ret;
2040 }
2041 EXPORT_SYMBOL_GPL(nfs_wb_all);
2042
2043 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
2044 {
2045 struct nfs_page *req;
2046 int ret = 0;
2047
2048 wait_on_page_writeback(page);
2049
2050
2051
2052 req = nfs_lock_and_join_requests(page);
2053
2054 if (IS_ERR(req)) {
2055 ret = PTR_ERR(req);
2056 } else if (req) {
2057
2058
2059
2060
2061 nfs_inode_remove_request(req);
2062 nfs_unlock_and_release_request(req);
2063 }
2064
2065 return ret;
2066 }
2067
2068
2069
2070
2071 int nfs_wb_page(struct inode *inode, struct page *page)
2072 {
2073 loff_t range_start = page_file_offset(page);
2074 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
2075 struct writeback_control wbc = {
2076 .sync_mode = WB_SYNC_ALL,
2077 .nr_to_write = 0,
2078 .range_start = range_start,
2079 .range_end = range_end,
2080 };
2081 int ret;
2082
2083 trace_nfs_writeback_page_enter(inode);
2084
2085 for (;;) {
2086 wait_on_page_writeback(page);
2087 if (clear_page_dirty_for_io(page)) {
2088 ret = nfs_writepage_locked(page, &wbc);
2089 if (ret < 0)
2090 goto out_error;
2091 continue;
2092 }
2093 ret = 0;
2094 if (!PagePrivate(page))
2095 break;
2096 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2097 if (ret < 0)
2098 goto out_error;
2099 }
2100 out_error:
2101 trace_nfs_writeback_page_exit(inode, ret);
2102 return ret;
2103 }
2104
2105 #ifdef CONFIG_MIGRATION
2106 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
2107 struct page *page, enum migrate_mode mode)
2108 {
2109
2110
2111
2112
2113
2114
2115
2116
2117 if (PagePrivate(page))
2118 return -EBUSY;
2119
2120 if (!nfs_fscache_release_page(page, GFP_KERNEL))
2121 return -EBUSY;
2122
2123 return migrate_page(mapping, newpage, page, mode);
2124 }
2125 #endif
2126
2127 int __init nfs_init_writepagecache(void)
2128 {
2129 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2130 sizeof(struct nfs_pgio_header),
2131 0, SLAB_HWCACHE_ALIGN,
2132 NULL);
2133 if (nfs_wdata_cachep == NULL)
2134 return -ENOMEM;
2135
2136 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2137 nfs_wdata_cachep);
2138 if (nfs_wdata_mempool == NULL)
2139 goto out_destroy_write_cache;
2140
2141 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2142 sizeof(struct nfs_commit_data),
2143 0, SLAB_HWCACHE_ALIGN,
2144 NULL);
2145 if (nfs_cdata_cachep == NULL)
2146 goto out_destroy_write_mempool;
2147
2148 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2149 nfs_cdata_cachep);
2150 if (nfs_commit_mempool == NULL)
2151 goto out_destroy_commit_cache;
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
2170 if (nfs_congestion_kb > 256*1024)
2171 nfs_congestion_kb = 256*1024;
2172
2173 return 0;
2174
2175 out_destroy_commit_cache:
2176 kmem_cache_destroy(nfs_cdata_cachep);
2177 out_destroy_write_mempool:
2178 mempool_destroy(nfs_wdata_mempool);
2179 out_destroy_write_cache:
2180 kmem_cache_destroy(nfs_wdata_cachep);
2181 return -ENOMEM;
2182 }
2183
2184 void nfs_destroy_writepagecache(void)
2185 {
2186 mempool_destroy(nfs_commit_mempool);
2187 kmem_cache_destroy(nfs_cdata_cachep);
2188 mempool_destroy(nfs_wdata_mempool);
2189 kmem_cache_destroy(nfs_wdata_cachep);
2190 }
2191
2192 static const struct nfs_rw_ops nfs_rw_write_ops = {
2193 .rw_alloc_header = nfs_writehdr_alloc,
2194 .rw_free_header = nfs_writehdr_free,
2195 .rw_done = nfs_writeback_done,
2196 .rw_result = nfs_writeback_result,
2197 .rw_initiate = nfs_initiate_write,
2198 };