1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_format.h"
38 #include "xfs_log_format.h"
39 #include "xfs_trans_resv.h"
40 #include "xfs_sb.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43 #include "xfs_log.h"
44
45 static kmem_zone_t *xfs_buf_zone;
46
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
51 #else
52 # define XB_SET_OWNER(bp) do { } while (0)
53 # define XB_CLEAR_OWNER(bp) do { } while (0)
54 # define XB_GET_OWNER(bp) do { } while (0)
55 #endif
56
57 #define xb_to_gfp(flags) \
58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
59
60
61 static inline int
xfs_buf_is_vmapped(struct xfs_buf * bp)62 xfs_buf_is_vmapped(
63 struct xfs_buf *bp)
64 {
65 /*
66 * Return true if the buffer is vmapped.
67 *
68 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
71 */
72 return bp->b_addr && bp->b_page_count > 1;
73 }
74
75 static inline int
xfs_buf_vmap_len(struct xfs_buf * bp)76 xfs_buf_vmap_len(
77 struct xfs_buf *bp)
78 {
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80 }
81
82 /*
83 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
84 * b_lru_ref count so that the buffer is freed immediately when the buffer
85 * reference count falls to zero. If the buffer is already on the LRU, we need
86 * to remove the reference that LRU holds on the buffer.
87 *
88 * This prevents build-up of stale buffers on the LRU.
89 */
90 void
xfs_buf_stale(struct xfs_buf * bp)91 xfs_buf_stale(
92 struct xfs_buf *bp)
93 {
94 ASSERT(xfs_buf_islocked(bp));
95
96 bp->b_flags |= XBF_STALE;
97
98 /*
99 * Clear the delwri status so that a delwri queue walker will not
100 * flush this buffer to disk now that it is stale. The delwri queue has
101 * a reference to the buffer, so this is safe to do.
102 */
103 bp->b_flags &= ~_XBF_DELWRI_Q;
104
105 spin_lock(&bp->b_lock);
106 atomic_set(&bp->b_lru_ref, 0);
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
109 atomic_dec(&bp->b_hold);
110
111 ASSERT(atomic_read(&bp->b_hold) >= 1);
112 spin_unlock(&bp->b_lock);
113 }
114
115 static int
xfs_buf_get_maps(struct xfs_buf * bp,int map_count)116 xfs_buf_get_maps(
117 struct xfs_buf *bp,
118 int map_count)
119 {
120 ASSERT(bp->b_maps == NULL);
121 bp->b_map_count = map_count;
122
123 if (map_count == 1) {
124 bp->b_maps = &bp->__b_map;
125 return 0;
126 }
127
128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
129 KM_NOFS);
130 if (!bp->b_maps)
131 return -ENOMEM;
132 return 0;
133 }
134
135 /*
136 * Frees b_pages if it was allocated.
137 */
138 static void
xfs_buf_free_maps(struct xfs_buf * bp)139 xfs_buf_free_maps(
140 struct xfs_buf *bp)
141 {
142 if (bp->b_maps != &bp->__b_map) {
143 kmem_free(bp->b_maps);
144 bp->b_maps = NULL;
145 }
146 }
147
148 struct xfs_buf *
_xfs_buf_alloc(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags)149 _xfs_buf_alloc(
150 struct xfs_buftarg *target,
151 struct xfs_buf_map *map,
152 int nmaps,
153 xfs_buf_flags_t flags)
154 {
155 struct xfs_buf *bp;
156 int error;
157 int i;
158
159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
160 if (unlikely(!bp))
161 return NULL;
162
163 /*
164 * We don't want certain flags to appear in b_flags unless they are
165 * specifically set by later operations on the buffer.
166 */
167 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
168
169 atomic_set(&bp->b_hold, 1);
170 atomic_set(&bp->b_lru_ref, 1);
171 init_completion(&bp->b_iowait);
172 INIT_LIST_HEAD(&bp->b_lru);
173 INIT_LIST_HEAD(&bp->b_list);
174 RB_CLEAR_NODE(&bp->b_rbnode);
175 sema_init(&bp->b_sema, 0); /* held, no waiters */
176 spin_lock_init(&bp->b_lock);
177 XB_SET_OWNER(bp);
178 bp->b_target = target;
179 bp->b_flags = flags;
180
181 /*
182 * Set length and io_length to the same value initially.
183 * I/O routines should use io_length, which will be the same in
184 * most cases but may be reset (e.g. XFS recovery).
185 */
186 error = xfs_buf_get_maps(bp, nmaps);
187 if (error) {
188 kmem_zone_free(xfs_buf_zone, bp);
189 return NULL;
190 }
191
192 bp->b_bn = map[0].bm_bn;
193 bp->b_length = 0;
194 for (i = 0; i < nmaps; i++) {
195 bp->b_maps[i].bm_bn = map[i].bm_bn;
196 bp->b_maps[i].bm_len = map[i].bm_len;
197 bp->b_length += map[i].bm_len;
198 }
199 bp->b_io_length = bp->b_length;
200
201 atomic_set(&bp->b_pin_count, 0);
202 init_waitqueue_head(&bp->b_waiters);
203
204 XFS_STATS_INC(xb_create);
205 trace_xfs_buf_init(bp, _RET_IP_);
206
207 return bp;
208 }
209
210 /*
211 * Allocate a page array capable of holding a specified number
212 * of pages, and point the page buf at it.
213 */
214 STATIC int
_xfs_buf_get_pages(xfs_buf_t * bp,int page_count)215 _xfs_buf_get_pages(
216 xfs_buf_t *bp,
217 int page_count)
218 {
219 /* Make sure that we have a page list */
220 if (bp->b_pages == NULL) {
221 bp->b_page_count = page_count;
222 if (page_count <= XB_PAGES) {
223 bp->b_pages = bp->b_page_array;
224 } else {
225 bp->b_pages = kmem_alloc(sizeof(struct page *) *
226 page_count, KM_NOFS);
227 if (bp->b_pages == NULL)
228 return -ENOMEM;
229 }
230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
231 }
232 return 0;
233 }
234
235 /*
236 * Frees b_pages if it was allocated.
237 */
238 STATIC void
_xfs_buf_free_pages(xfs_buf_t * bp)239 _xfs_buf_free_pages(
240 xfs_buf_t *bp)
241 {
242 if (bp->b_pages != bp->b_page_array) {
243 kmem_free(bp->b_pages);
244 bp->b_pages = NULL;
245 }
246 }
247
248 /*
249 * Releases the specified buffer.
250 *
251 * The modification state of any associated pages is left unchanged.
252 * The buffer must not be on any hash - use xfs_buf_rele instead for
253 * hashed and refcounted buffers
254 */
255 void
xfs_buf_free(xfs_buf_t * bp)256 xfs_buf_free(
257 xfs_buf_t *bp)
258 {
259 trace_xfs_buf_free(bp, _RET_IP_);
260
261 ASSERT(list_empty(&bp->b_lru));
262
263 if (bp->b_flags & _XBF_PAGES) {
264 uint i;
265
266 if (xfs_buf_is_vmapped(bp))
267 vm_unmap_ram(bp->b_addr - bp->b_offset,
268 bp->b_page_count);
269
270 for (i = 0; i < bp->b_page_count; i++) {
271 struct page *page = bp->b_pages[i];
272
273 __free_page(page);
274 }
275 } else if (bp->b_flags & _XBF_KMEM)
276 kmem_free(bp->b_addr);
277 _xfs_buf_free_pages(bp);
278 xfs_buf_free_maps(bp);
279 kmem_zone_free(xfs_buf_zone, bp);
280 }
281
282 /*
283 * Allocates all the pages for buffer in question and builds it's page list.
284 */
285 STATIC int
xfs_buf_allocate_memory(xfs_buf_t * bp,uint flags)286 xfs_buf_allocate_memory(
287 xfs_buf_t *bp,
288 uint flags)
289 {
290 size_t size;
291 size_t nbytes, offset;
292 gfp_t gfp_mask = xb_to_gfp(flags);
293 unsigned short page_count, i;
294 xfs_off_t start, end;
295 int error;
296
297 /*
298 * for buffers that are contained within a single page, just allocate
299 * the memory from the heap - there's no need for the complexity of
300 * page arrays to keep allocation down to order 0.
301 */
302 size = BBTOB(bp->b_length);
303 if (size < PAGE_SIZE) {
304 bp->b_addr = kmem_alloc(size, KM_NOFS);
305 if (!bp->b_addr) {
306 /* low memory - use alloc_page loop instead */
307 goto use_alloc_page;
308 }
309
310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
311 ((unsigned long)bp->b_addr & PAGE_MASK)) {
312 /* b_addr spans two pages - use alloc_page instead */
313 kmem_free(bp->b_addr);
314 bp->b_addr = NULL;
315 goto use_alloc_page;
316 }
317 bp->b_offset = offset_in_page(bp->b_addr);
318 bp->b_pages = bp->b_page_array;
319 bp->b_pages[0] = virt_to_page(bp->b_addr);
320 bp->b_page_count = 1;
321 bp->b_flags |= _XBF_KMEM;
322 return 0;
323 }
324
325 use_alloc_page:
326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
328 >> PAGE_SHIFT;
329 page_count = end - start;
330 error = _xfs_buf_get_pages(bp, page_count);
331 if (unlikely(error))
332 return error;
333
334 offset = bp->b_offset;
335 bp->b_flags |= _XBF_PAGES;
336
337 for (i = 0; i < bp->b_page_count; i++) {
338 struct page *page;
339 uint retries = 0;
340 retry:
341 page = alloc_page(gfp_mask);
342 if (unlikely(page == NULL)) {
343 if (flags & XBF_READ_AHEAD) {
344 bp->b_page_count = i;
345 error = -ENOMEM;
346 goto out_free_pages;
347 }
348
349 /*
350 * This could deadlock.
351 *
352 * But until all the XFS lowlevel code is revamped to
353 * handle buffer allocation failures we can't do much.
354 */
355 if (!(++retries % 100))
356 xfs_err(NULL,
357 "possible memory allocation deadlock in %s (mode:0x%x)",
358 __func__, gfp_mask);
359
360 XFS_STATS_INC(xb_page_retries);
361 congestion_wait(BLK_RW_ASYNC, HZ/50);
362 goto retry;
363 }
364
365 XFS_STATS_INC(xb_page_found);
366
367 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
368 size -= nbytes;
369 bp->b_pages[i] = page;
370 offset = 0;
371 }
372 return 0;
373
374 out_free_pages:
375 for (i = 0; i < bp->b_page_count; i++)
376 __free_page(bp->b_pages[i]);
377 return error;
378 }
379
380 /*
381 * Map buffer into kernel address-space if necessary.
382 */
383 STATIC int
_xfs_buf_map_pages(xfs_buf_t * bp,uint flags)384 _xfs_buf_map_pages(
385 xfs_buf_t *bp,
386 uint flags)
387 {
388 ASSERT(bp->b_flags & _XBF_PAGES);
389 if (bp->b_page_count == 1) {
390 /* A single page buffer is always mappable */
391 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
392 } else if (flags & XBF_UNMAPPED) {
393 bp->b_addr = NULL;
394 } else {
395 int retried = 0;
396 unsigned noio_flag;
397
398 /*
399 * vm_map_ram() will allocate auxillary structures (e.g.
400 * pagetables) with GFP_KERNEL, yet we are likely to be under
401 * GFP_NOFS context here. Hence we need to tell memory reclaim
402 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
403 * memory reclaim re-entering the filesystem here and
404 * potentially deadlocking.
405 */
406 noio_flag = memalloc_noio_save();
407 do {
408 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
409 -1, PAGE_KERNEL);
410 if (bp->b_addr)
411 break;
412 vm_unmap_aliases();
413 } while (retried++ <= 1);
414 memalloc_noio_restore(noio_flag);
415
416 if (!bp->b_addr)
417 return -ENOMEM;
418 bp->b_addr += bp->b_offset;
419 }
420
421 return 0;
422 }
423
424 /*
425 * Finding and Reading Buffers
426 */
427
428 /*
429 * Look up, and creates if absent, a lockable buffer for
430 * a given range of an inode. The buffer is returned
431 * locked. No I/O is implied by this call.
432 */
433 xfs_buf_t *
_xfs_buf_find(struct xfs_buftarg * btp,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,xfs_buf_t * new_bp)434 _xfs_buf_find(
435 struct xfs_buftarg *btp,
436 struct xfs_buf_map *map,
437 int nmaps,
438 xfs_buf_flags_t flags,
439 xfs_buf_t *new_bp)
440 {
441 size_t numbytes;
442 struct xfs_perag *pag;
443 struct rb_node **rbp;
444 struct rb_node *parent;
445 xfs_buf_t *bp;
446 xfs_daddr_t blkno = map[0].bm_bn;
447 xfs_daddr_t eofs;
448 int numblks = 0;
449 int i;
450
451 for (i = 0; i < nmaps; i++)
452 numblks += map[i].bm_len;
453 numbytes = BBTOB(numblks);
454
455 /* Check for IOs smaller than the sector size / not sector aligned */
456 ASSERT(!(numbytes < btp->bt_meta_sectorsize));
457 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
458
459 /*
460 * Corrupted block numbers can get through to here, unfortunately, so we
461 * have to check that the buffer falls within the filesystem bounds.
462 */
463 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
464 if (blkno < 0 || blkno >= eofs) {
465 /*
466 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
467 * but none of the higher level infrastructure supports
468 * returning a specific error on buffer lookup failures.
469 */
470 xfs_alert(btp->bt_mount,
471 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
472 __func__, blkno, eofs);
473 WARN_ON(1);
474 return NULL;
475 }
476
477 /* get tree root */
478 pag = xfs_perag_get(btp->bt_mount,
479 xfs_daddr_to_agno(btp->bt_mount, blkno));
480
481 /* walk tree */
482 spin_lock(&pag->pag_buf_lock);
483 rbp = &pag->pag_buf_tree.rb_node;
484 parent = NULL;
485 bp = NULL;
486 while (*rbp) {
487 parent = *rbp;
488 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
489
490 if (blkno < bp->b_bn)
491 rbp = &(*rbp)->rb_left;
492 else if (blkno > bp->b_bn)
493 rbp = &(*rbp)->rb_right;
494 else {
495 /*
496 * found a block number match. If the range doesn't
497 * match, the only way this is allowed is if the buffer
498 * in the cache is stale and the transaction that made
499 * it stale has not yet committed. i.e. we are
500 * reallocating a busy extent. Skip this buffer and
501 * continue searching to the right for an exact match.
502 */
503 if (bp->b_length != numblks) {
504 ASSERT(bp->b_flags & XBF_STALE);
505 rbp = &(*rbp)->rb_right;
506 continue;
507 }
508 atomic_inc(&bp->b_hold);
509 goto found;
510 }
511 }
512
513 /* No match found */
514 if (new_bp) {
515 rb_link_node(&new_bp->b_rbnode, parent, rbp);
516 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
517 /* the buffer keeps the perag reference until it is freed */
518 new_bp->b_pag = pag;
519 spin_unlock(&pag->pag_buf_lock);
520 } else {
521 XFS_STATS_INC(xb_miss_locked);
522 spin_unlock(&pag->pag_buf_lock);
523 xfs_perag_put(pag);
524 }
525 return new_bp;
526
527 found:
528 spin_unlock(&pag->pag_buf_lock);
529 xfs_perag_put(pag);
530
531 if (!xfs_buf_trylock(bp)) {
532 if (flags & XBF_TRYLOCK) {
533 xfs_buf_rele(bp);
534 XFS_STATS_INC(xb_busy_locked);
535 return NULL;
536 }
537 xfs_buf_lock(bp);
538 XFS_STATS_INC(xb_get_locked_waited);
539 }
540
541 /*
542 * if the buffer is stale, clear all the external state associated with
543 * it. We need to keep flags such as how we allocated the buffer memory
544 * intact here.
545 */
546 if (bp->b_flags & XBF_STALE) {
547 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
548 ASSERT(bp->b_iodone == NULL);
549 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
550 bp->b_ops = NULL;
551 }
552
553 trace_xfs_buf_find(bp, flags, _RET_IP_);
554 XFS_STATS_INC(xb_get_locked);
555 return bp;
556 }
557
558 /*
559 * Assembles a buffer covering the specified range. The code is optimised for
560 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
561 * more hits than misses.
562 */
563 struct xfs_buf *
xfs_buf_get_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags)564 xfs_buf_get_map(
565 struct xfs_buftarg *target,
566 struct xfs_buf_map *map,
567 int nmaps,
568 xfs_buf_flags_t flags)
569 {
570 struct xfs_buf *bp;
571 struct xfs_buf *new_bp;
572 int error = 0;
573
574 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
575 if (likely(bp))
576 goto found;
577
578 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
579 if (unlikely(!new_bp))
580 return NULL;
581
582 error = xfs_buf_allocate_memory(new_bp, flags);
583 if (error) {
584 xfs_buf_free(new_bp);
585 return NULL;
586 }
587
588 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
589 if (!bp) {
590 xfs_buf_free(new_bp);
591 return NULL;
592 }
593
594 if (bp != new_bp)
595 xfs_buf_free(new_bp);
596
597 found:
598 if (!bp->b_addr) {
599 error = _xfs_buf_map_pages(bp, flags);
600 if (unlikely(error)) {
601 xfs_warn(target->bt_mount,
602 "%s: failed to map pagesn", __func__);
603 xfs_buf_relse(bp);
604 return NULL;
605 }
606 }
607
608 /*
609 * Clear b_error if this is a lookup from a caller that doesn't expect
610 * valid data to be found in the buffer.
611 */
612 if (!(flags & XBF_READ))
613 xfs_buf_ioerror(bp, 0);
614
615 XFS_STATS_INC(xb_get);
616 trace_xfs_buf_get(bp, flags, _RET_IP_);
617 return bp;
618 }
619
620 STATIC int
_xfs_buf_read(xfs_buf_t * bp,xfs_buf_flags_t flags)621 _xfs_buf_read(
622 xfs_buf_t *bp,
623 xfs_buf_flags_t flags)
624 {
625 ASSERT(!(flags & XBF_WRITE));
626 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
627
628 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
629 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
630
631 if (flags & XBF_ASYNC) {
632 xfs_buf_submit(bp);
633 return 0;
634 }
635 return xfs_buf_submit_wait(bp);
636 }
637
638 xfs_buf_t *
xfs_buf_read_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,xfs_buf_flags_t flags,const struct xfs_buf_ops * ops)639 xfs_buf_read_map(
640 struct xfs_buftarg *target,
641 struct xfs_buf_map *map,
642 int nmaps,
643 xfs_buf_flags_t flags,
644 const struct xfs_buf_ops *ops)
645 {
646 struct xfs_buf *bp;
647
648 flags |= XBF_READ;
649
650 bp = xfs_buf_get_map(target, map, nmaps, flags);
651 if (bp) {
652 trace_xfs_buf_read(bp, flags, _RET_IP_);
653
654 if (!XFS_BUF_ISDONE(bp)) {
655 XFS_STATS_INC(xb_get_read);
656 bp->b_ops = ops;
657 _xfs_buf_read(bp, flags);
658 } else if (flags & XBF_ASYNC) {
659 /*
660 * Read ahead call which is already satisfied,
661 * drop the buffer
662 */
663 xfs_buf_relse(bp);
664 return NULL;
665 } else {
666 /* We do not want read in the flags */
667 bp->b_flags &= ~XBF_READ;
668 }
669 }
670
671 return bp;
672 }
673
674 /*
675 * If we are not low on memory then do the readahead in a deadlock
676 * safe manner.
677 */
678 void
xfs_buf_readahead_map(struct xfs_buftarg * target,struct xfs_buf_map * map,int nmaps,const struct xfs_buf_ops * ops)679 xfs_buf_readahead_map(
680 struct xfs_buftarg *target,
681 struct xfs_buf_map *map,
682 int nmaps,
683 const struct xfs_buf_ops *ops)
684 {
685 if (bdi_read_congested(target->bt_bdi))
686 return;
687
688 xfs_buf_read_map(target, map, nmaps,
689 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
690 }
691
692 /*
693 * Read an uncached buffer from disk. Allocates and returns a locked
694 * buffer containing the disk contents or nothing.
695 */
696 int
xfs_buf_read_uncached(struct xfs_buftarg * target,xfs_daddr_t daddr,size_t numblks,int flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)697 xfs_buf_read_uncached(
698 struct xfs_buftarg *target,
699 xfs_daddr_t daddr,
700 size_t numblks,
701 int flags,
702 struct xfs_buf **bpp,
703 const struct xfs_buf_ops *ops)
704 {
705 struct xfs_buf *bp;
706
707 *bpp = NULL;
708
709 bp = xfs_buf_get_uncached(target, numblks, flags);
710 if (!bp)
711 return -ENOMEM;
712
713 /* set up the buffer for a read IO */
714 ASSERT(bp->b_map_count == 1);
715 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
716 bp->b_maps[0].bm_bn = daddr;
717 bp->b_flags |= XBF_READ;
718 bp->b_ops = ops;
719
720 xfs_buf_submit_wait(bp);
721 if (bp->b_error) {
722 int error = bp->b_error;
723 xfs_buf_relse(bp);
724 return error;
725 }
726
727 *bpp = bp;
728 return 0;
729 }
730
731 /*
732 * Return a buffer allocated as an empty buffer and associated to external
733 * memory via xfs_buf_associate_memory() back to it's empty state.
734 */
735 void
xfs_buf_set_empty(struct xfs_buf * bp,size_t numblks)736 xfs_buf_set_empty(
737 struct xfs_buf *bp,
738 size_t numblks)
739 {
740 if (bp->b_pages)
741 _xfs_buf_free_pages(bp);
742
743 bp->b_pages = NULL;
744 bp->b_page_count = 0;
745 bp->b_addr = NULL;
746 bp->b_length = numblks;
747 bp->b_io_length = numblks;
748
749 ASSERT(bp->b_map_count == 1);
750 bp->b_bn = XFS_BUF_DADDR_NULL;
751 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
752 bp->b_maps[0].bm_len = bp->b_length;
753 }
754
755 static inline struct page *
mem_to_page(void * addr)756 mem_to_page(
757 void *addr)
758 {
759 if ((!is_vmalloc_addr(addr))) {
760 return virt_to_page(addr);
761 } else {
762 return vmalloc_to_page(addr);
763 }
764 }
765
766 int
xfs_buf_associate_memory(xfs_buf_t * bp,void * mem,size_t len)767 xfs_buf_associate_memory(
768 xfs_buf_t *bp,
769 void *mem,
770 size_t len)
771 {
772 int rval;
773 int i = 0;
774 unsigned long pageaddr;
775 unsigned long offset;
776 size_t buflen;
777 int page_count;
778
779 pageaddr = (unsigned long)mem & PAGE_MASK;
780 offset = (unsigned long)mem - pageaddr;
781 buflen = PAGE_ALIGN(len + offset);
782 page_count = buflen >> PAGE_SHIFT;
783
784 /* Free any previous set of page pointers */
785 if (bp->b_pages)
786 _xfs_buf_free_pages(bp);
787
788 bp->b_pages = NULL;
789 bp->b_addr = mem;
790
791 rval = _xfs_buf_get_pages(bp, page_count);
792 if (rval)
793 return rval;
794
795 bp->b_offset = offset;
796
797 for (i = 0; i < bp->b_page_count; i++) {
798 bp->b_pages[i] = mem_to_page((void *)pageaddr);
799 pageaddr += PAGE_SIZE;
800 }
801
802 bp->b_io_length = BTOBB(len);
803 bp->b_length = BTOBB(buflen);
804
805 return 0;
806 }
807
808 xfs_buf_t *
xfs_buf_get_uncached(struct xfs_buftarg * target,size_t numblks,int flags)809 xfs_buf_get_uncached(
810 struct xfs_buftarg *target,
811 size_t numblks,
812 int flags)
813 {
814 unsigned long page_count;
815 int error, i;
816 struct xfs_buf *bp;
817 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
818
819 bp = _xfs_buf_alloc(target, &map, 1, 0);
820 if (unlikely(bp == NULL))
821 goto fail;
822
823 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
824 error = _xfs_buf_get_pages(bp, page_count);
825 if (error)
826 goto fail_free_buf;
827
828 for (i = 0; i < page_count; i++) {
829 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
830 if (!bp->b_pages[i])
831 goto fail_free_mem;
832 }
833 bp->b_flags |= _XBF_PAGES;
834
835 error = _xfs_buf_map_pages(bp, 0);
836 if (unlikely(error)) {
837 xfs_warn(target->bt_mount,
838 "%s: failed to map pages", __func__);
839 goto fail_free_mem;
840 }
841
842 trace_xfs_buf_get_uncached(bp, _RET_IP_);
843 return bp;
844
845 fail_free_mem:
846 while (--i >= 0)
847 __free_page(bp->b_pages[i]);
848 _xfs_buf_free_pages(bp);
849 fail_free_buf:
850 xfs_buf_free_maps(bp);
851 kmem_zone_free(xfs_buf_zone, bp);
852 fail:
853 return NULL;
854 }
855
856 /*
857 * Increment reference count on buffer, to hold the buffer concurrently
858 * with another thread which may release (free) the buffer asynchronously.
859 * Must hold the buffer already to call this function.
860 */
861 void
xfs_buf_hold(xfs_buf_t * bp)862 xfs_buf_hold(
863 xfs_buf_t *bp)
864 {
865 trace_xfs_buf_hold(bp, _RET_IP_);
866 atomic_inc(&bp->b_hold);
867 }
868
869 /*
870 * Releases a hold on the specified buffer. If the
871 * the hold count is 1, calls xfs_buf_free.
872 */
873 void
xfs_buf_rele(xfs_buf_t * bp)874 xfs_buf_rele(
875 xfs_buf_t *bp)
876 {
877 struct xfs_perag *pag = bp->b_pag;
878
879 trace_xfs_buf_rele(bp, _RET_IP_);
880
881 if (!pag) {
882 ASSERT(list_empty(&bp->b_lru));
883 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
884 if (atomic_dec_and_test(&bp->b_hold))
885 xfs_buf_free(bp);
886 return;
887 }
888
889 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
890
891 ASSERT(atomic_read(&bp->b_hold) > 0);
892 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
893 spin_lock(&bp->b_lock);
894 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
895 /*
896 * If the buffer is added to the LRU take a new
897 * reference to the buffer for the LRU and clear the
898 * (now stale) dispose list state flag
899 */
900 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
901 bp->b_state &= ~XFS_BSTATE_DISPOSE;
902 atomic_inc(&bp->b_hold);
903 }
904 spin_unlock(&bp->b_lock);
905 spin_unlock(&pag->pag_buf_lock);
906 } else {
907 /*
908 * most of the time buffers will already be removed from
909 * the LRU, so optimise that case by checking for the
910 * XFS_BSTATE_DISPOSE flag indicating the last list the
911 * buffer was on was the disposal list
912 */
913 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
914 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
915 } else {
916 ASSERT(list_empty(&bp->b_lru));
917 }
918 spin_unlock(&bp->b_lock);
919
920 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
921 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
922 spin_unlock(&pag->pag_buf_lock);
923 xfs_perag_put(pag);
924 xfs_buf_free(bp);
925 }
926 }
927 }
928
929
930 /*
931 * Lock a buffer object, if it is not already locked.
932 *
933 * If we come across a stale, pinned, locked buffer, we know that we are
934 * being asked to lock a buffer that has been reallocated. Because it is
935 * pinned, we know that the log has not been pushed to disk and hence it
936 * will still be locked. Rather than continuing to have trylock attempts
937 * fail until someone else pushes the log, push it ourselves before
938 * returning. This means that the xfsaild will not get stuck trying
939 * to push on stale inode buffers.
940 */
941 int
xfs_buf_trylock(struct xfs_buf * bp)942 xfs_buf_trylock(
943 struct xfs_buf *bp)
944 {
945 int locked;
946
947 locked = down_trylock(&bp->b_sema) == 0;
948 if (locked)
949 XB_SET_OWNER(bp);
950
951 trace_xfs_buf_trylock(bp, _RET_IP_);
952 return locked;
953 }
954
955 /*
956 * Lock a buffer object.
957 *
958 * If we come across a stale, pinned, locked buffer, we know that we
959 * are being asked to lock a buffer that has been reallocated. Because
960 * it is pinned, we know that the log has not been pushed to disk and
961 * hence it will still be locked. Rather than sleeping until someone
962 * else pushes the log, push it ourselves before trying to get the lock.
963 */
964 void
xfs_buf_lock(struct xfs_buf * bp)965 xfs_buf_lock(
966 struct xfs_buf *bp)
967 {
968 trace_xfs_buf_lock(bp, _RET_IP_);
969
970 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
971 xfs_log_force(bp->b_target->bt_mount, 0);
972 down(&bp->b_sema);
973 XB_SET_OWNER(bp);
974
975 trace_xfs_buf_lock_done(bp, _RET_IP_);
976 }
977
978 void
xfs_buf_unlock(struct xfs_buf * bp)979 xfs_buf_unlock(
980 struct xfs_buf *bp)
981 {
982 XB_CLEAR_OWNER(bp);
983 up(&bp->b_sema);
984
985 trace_xfs_buf_unlock(bp, _RET_IP_);
986 }
987
988 STATIC void
xfs_buf_wait_unpin(xfs_buf_t * bp)989 xfs_buf_wait_unpin(
990 xfs_buf_t *bp)
991 {
992 DECLARE_WAITQUEUE (wait, current);
993
994 if (atomic_read(&bp->b_pin_count) == 0)
995 return;
996
997 add_wait_queue(&bp->b_waiters, &wait);
998 for (;;) {
999 set_current_state(TASK_UNINTERRUPTIBLE);
1000 if (atomic_read(&bp->b_pin_count) == 0)
1001 break;
1002 io_schedule();
1003 }
1004 remove_wait_queue(&bp->b_waiters, &wait);
1005 set_current_state(TASK_RUNNING);
1006 }
1007
1008 /*
1009 * Buffer Utility Routines
1010 */
1011
1012 void
xfs_buf_ioend(struct xfs_buf * bp)1013 xfs_buf_ioend(
1014 struct xfs_buf *bp)
1015 {
1016 bool read = bp->b_flags & XBF_READ;
1017
1018 trace_xfs_buf_iodone(bp, _RET_IP_);
1019
1020 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1021
1022 /*
1023 * Pull in IO completion errors now. We are guaranteed to be running
1024 * single threaded, so we don't need the lock to read b_io_error.
1025 */
1026 if (!bp->b_error && bp->b_io_error)
1027 xfs_buf_ioerror(bp, bp->b_io_error);
1028
1029 /* Only validate buffers that were read without errors */
1030 if (read && !bp->b_error && bp->b_ops) {
1031 ASSERT(!bp->b_iodone);
1032 bp->b_ops->verify_read(bp);
1033 }
1034
1035 if (!bp->b_error)
1036 bp->b_flags |= XBF_DONE;
1037
1038 if (bp->b_iodone)
1039 (*(bp->b_iodone))(bp);
1040 else if (bp->b_flags & XBF_ASYNC)
1041 xfs_buf_relse(bp);
1042 else
1043 complete(&bp->b_iowait);
1044 }
1045
1046 static void
xfs_buf_ioend_work(struct work_struct * work)1047 xfs_buf_ioend_work(
1048 struct work_struct *work)
1049 {
1050 struct xfs_buf *bp =
1051 container_of(work, xfs_buf_t, b_ioend_work);
1052
1053 xfs_buf_ioend(bp);
1054 }
1055
1056 void
xfs_buf_ioend_async(struct xfs_buf * bp)1057 xfs_buf_ioend_async(
1058 struct xfs_buf *bp)
1059 {
1060 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1061 queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
1062 }
1063
1064 void
xfs_buf_ioerror(xfs_buf_t * bp,int error)1065 xfs_buf_ioerror(
1066 xfs_buf_t *bp,
1067 int error)
1068 {
1069 ASSERT(error <= 0 && error >= -1000);
1070 bp->b_error = error;
1071 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1072 }
1073
1074 void
xfs_buf_ioerror_alert(struct xfs_buf * bp,const char * func)1075 xfs_buf_ioerror_alert(
1076 struct xfs_buf *bp,
1077 const char *func)
1078 {
1079 xfs_alert(bp->b_target->bt_mount,
1080 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1081 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
1082 }
1083
1084 int
xfs_bwrite(struct xfs_buf * bp)1085 xfs_bwrite(
1086 struct xfs_buf *bp)
1087 {
1088 int error;
1089
1090 ASSERT(xfs_buf_islocked(bp));
1091
1092 bp->b_flags |= XBF_WRITE;
1093 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1094 XBF_WRITE_FAIL | XBF_DONE);
1095
1096 error = xfs_buf_submit_wait(bp);
1097 if (error) {
1098 xfs_force_shutdown(bp->b_target->bt_mount,
1099 SHUTDOWN_META_IO_ERROR);
1100 }
1101 return error;
1102 }
1103
1104 STATIC void
xfs_buf_bio_end_io(struct bio * bio,int error)1105 xfs_buf_bio_end_io(
1106 struct bio *bio,
1107 int error)
1108 {
1109 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1110
1111 /*
1112 * don't overwrite existing errors - otherwise we can lose errors on
1113 * buffers that require multiple bios to complete.
1114 */
1115 if (error) {
1116 spin_lock(&bp->b_lock);
1117 if (!bp->b_io_error)
1118 bp->b_io_error = error;
1119 spin_unlock(&bp->b_lock);
1120 }
1121
1122 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1123 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1124
1125 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1126 xfs_buf_ioend_async(bp);
1127 bio_put(bio);
1128 }
1129
1130 static void
xfs_buf_ioapply_map(struct xfs_buf * bp,int map,int * buf_offset,int * count,int rw)1131 xfs_buf_ioapply_map(
1132 struct xfs_buf *bp,
1133 int map,
1134 int *buf_offset,
1135 int *count,
1136 int rw)
1137 {
1138 int page_index;
1139 int total_nr_pages = bp->b_page_count;
1140 int nr_pages;
1141 struct bio *bio;
1142 sector_t sector = bp->b_maps[map].bm_bn;
1143 int size;
1144 int offset;
1145
1146 total_nr_pages = bp->b_page_count;
1147
1148 /* skip the pages in the buffer before the start offset */
1149 page_index = 0;
1150 offset = *buf_offset;
1151 while (offset >= PAGE_SIZE) {
1152 page_index++;
1153 offset -= PAGE_SIZE;
1154 }
1155
1156 /*
1157 * Limit the IO size to the length of the current vector, and update the
1158 * remaining IO count for the next time around.
1159 */
1160 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1161 *count -= size;
1162 *buf_offset += size;
1163
1164 next_chunk:
1165 atomic_inc(&bp->b_io_remaining);
1166 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1167 if (nr_pages > total_nr_pages)
1168 nr_pages = total_nr_pages;
1169
1170 bio = bio_alloc(GFP_NOIO, nr_pages);
1171 bio->bi_bdev = bp->b_target->bt_bdev;
1172 bio->bi_iter.bi_sector = sector;
1173 bio->bi_end_io = xfs_buf_bio_end_io;
1174 bio->bi_private = bp;
1175
1176
1177 for (; size && nr_pages; nr_pages--, page_index++) {
1178 int rbytes, nbytes = PAGE_SIZE - offset;
1179
1180 if (nbytes > size)
1181 nbytes = size;
1182
1183 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1184 offset);
1185 if (rbytes < nbytes)
1186 break;
1187
1188 offset = 0;
1189 sector += BTOBB(nbytes);
1190 size -= nbytes;
1191 total_nr_pages--;
1192 }
1193
1194 if (likely(bio->bi_iter.bi_size)) {
1195 if (xfs_buf_is_vmapped(bp)) {
1196 flush_kernel_vmap_range(bp->b_addr,
1197 xfs_buf_vmap_len(bp));
1198 }
1199 submit_bio(rw, bio);
1200 if (size)
1201 goto next_chunk;
1202 } else {
1203 /*
1204 * This is guaranteed not to be the last io reference count
1205 * because the caller (xfs_buf_submit) holds a count itself.
1206 */
1207 atomic_dec(&bp->b_io_remaining);
1208 xfs_buf_ioerror(bp, -EIO);
1209 bio_put(bio);
1210 }
1211
1212 }
1213
1214 STATIC void
_xfs_buf_ioapply(struct xfs_buf * bp)1215 _xfs_buf_ioapply(
1216 struct xfs_buf *bp)
1217 {
1218 struct blk_plug plug;
1219 int rw;
1220 int offset;
1221 int size;
1222 int i;
1223
1224 /*
1225 * Make sure we capture only current IO errors rather than stale errors
1226 * left over from previous use of the buffer (e.g. failed readahead).
1227 */
1228 bp->b_error = 0;
1229
1230 /*
1231 * Initialize the I/O completion workqueue if we haven't yet or the
1232 * submitter has not opted to specify a custom one.
1233 */
1234 if (!bp->b_ioend_wq)
1235 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1236
1237 if (bp->b_flags & XBF_WRITE) {
1238 if (bp->b_flags & XBF_SYNCIO)
1239 rw = WRITE_SYNC;
1240 else
1241 rw = WRITE;
1242 if (bp->b_flags & XBF_FUA)
1243 rw |= REQ_FUA;
1244 if (bp->b_flags & XBF_FLUSH)
1245 rw |= REQ_FLUSH;
1246
1247 /*
1248 * Run the write verifier callback function if it exists. If
1249 * this function fails it will mark the buffer with an error and
1250 * the IO should not be dispatched.
1251 */
1252 if (bp->b_ops) {
1253 bp->b_ops->verify_write(bp);
1254 if (bp->b_error) {
1255 xfs_force_shutdown(bp->b_target->bt_mount,
1256 SHUTDOWN_CORRUPT_INCORE);
1257 return;
1258 }
1259 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1260 struct xfs_mount *mp = bp->b_target->bt_mount;
1261
1262 /*
1263 * non-crc filesystems don't attach verifiers during
1264 * log recovery, so don't warn for such filesystems.
1265 */
1266 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1267 xfs_warn(mp,
1268 "%s: no ops on block 0x%llx/0x%x",
1269 __func__, bp->b_bn, bp->b_length);
1270 xfs_hex_dump(bp->b_addr, 64);
1271 dump_stack();
1272 }
1273 }
1274 } else if (bp->b_flags & XBF_READ_AHEAD) {
1275 rw = READA;
1276 } else {
1277 rw = READ;
1278 }
1279
1280 /* we only use the buffer cache for meta-data */
1281 rw |= REQ_META;
1282
1283 /*
1284 * Walk all the vectors issuing IO on them. Set up the initial offset
1285 * into the buffer and the desired IO size before we start -
1286 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1287 * subsequent call.
1288 */
1289 offset = bp->b_offset;
1290 size = BBTOB(bp->b_io_length);
1291 blk_start_plug(&plug);
1292 for (i = 0; i < bp->b_map_count; i++) {
1293 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1294 if (bp->b_error)
1295 break;
1296 if (size <= 0)
1297 break; /* all done */
1298 }
1299 blk_finish_plug(&plug);
1300 }
1301
1302 /*
1303 * Asynchronous IO submission path. This transfers the buffer lock ownership and
1304 * the current reference to the IO. It is not safe to reference the buffer after
1305 * a call to this function unless the caller holds an additional reference
1306 * itself.
1307 */
1308 void
xfs_buf_submit(struct xfs_buf * bp)1309 xfs_buf_submit(
1310 struct xfs_buf *bp)
1311 {
1312 trace_xfs_buf_submit(bp, _RET_IP_);
1313
1314 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1315 ASSERT(bp->b_flags & XBF_ASYNC);
1316
1317 /* on shutdown we stale and complete the buffer immediately */
1318 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1319 xfs_buf_ioerror(bp, -EIO);
1320 bp->b_flags &= ~XBF_DONE;
1321 xfs_buf_stale(bp);
1322 xfs_buf_ioend(bp);
1323 return;
1324 }
1325
1326 if (bp->b_flags & XBF_WRITE)
1327 xfs_buf_wait_unpin(bp);
1328
1329 /* clear the internal error state to avoid spurious errors */
1330 bp->b_io_error = 0;
1331
1332 /*
1333 * The caller's reference is released during I/O completion.
1334 * This occurs some time after the last b_io_remaining reference is
1335 * released, so after we drop our Io reference we have to have some
1336 * other reference to ensure the buffer doesn't go away from underneath
1337 * us. Take a direct reference to ensure we have safe access to the
1338 * buffer until we are finished with it.
1339 */
1340 xfs_buf_hold(bp);
1341
1342 /*
1343 * Set the count to 1 initially, this will stop an I/O completion
1344 * callout which happens before we have started all the I/O from calling
1345 * xfs_buf_ioend too early.
1346 */
1347 atomic_set(&bp->b_io_remaining, 1);
1348 _xfs_buf_ioapply(bp);
1349
1350 /*
1351 * If _xfs_buf_ioapply failed, we can get back here with only the IO
1352 * reference we took above. If we drop it to zero, run completion so
1353 * that we don't return to the caller with completion still pending.
1354 */
1355 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1356 if (bp->b_error)
1357 xfs_buf_ioend(bp);
1358 else
1359 xfs_buf_ioend_async(bp);
1360 }
1361
1362 xfs_buf_rele(bp);
1363 /* Note: it is not safe to reference bp now we've dropped our ref */
1364 }
1365
1366 /*
1367 * Synchronous buffer IO submission path, read or write.
1368 */
1369 int
xfs_buf_submit_wait(struct xfs_buf * bp)1370 xfs_buf_submit_wait(
1371 struct xfs_buf *bp)
1372 {
1373 int error;
1374
1375 trace_xfs_buf_submit_wait(bp, _RET_IP_);
1376
1377 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
1378
1379 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1380 xfs_buf_ioerror(bp, -EIO);
1381 xfs_buf_stale(bp);
1382 bp->b_flags &= ~XBF_DONE;
1383 return -EIO;
1384 }
1385
1386 if (bp->b_flags & XBF_WRITE)
1387 xfs_buf_wait_unpin(bp);
1388
1389 /* clear the internal error state to avoid spurious errors */
1390 bp->b_io_error = 0;
1391
1392 /*
1393 * For synchronous IO, the IO does not inherit the submitters reference
1394 * count, nor the buffer lock. Hence we cannot release the reference we
1395 * are about to take until we've waited for all IO completion to occur,
1396 * including any xfs_buf_ioend_async() work that may be pending.
1397 */
1398 xfs_buf_hold(bp);
1399
1400 /*
1401 * Set the count to 1 initially, this will stop an I/O completion
1402 * callout which happens before we have started all the I/O from calling
1403 * xfs_buf_ioend too early.
1404 */
1405 atomic_set(&bp->b_io_remaining, 1);
1406 _xfs_buf_ioapply(bp);
1407
1408 /*
1409 * make sure we run completion synchronously if it raced with us and is
1410 * already complete.
1411 */
1412 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1413 xfs_buf_ioend(bp);
1414
1415 /* wait for completion before gathering the error from the buffer */
1416 trace_xfs_buf_iowait(bp, _RET_IP_);
1417 wait_for_completion(&bp->b_iowait);
1418 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1419 error = bp->b_error;
1420
1421 /*
1422 * all done now, we can release the hold that keeps the buffer
1423 * referenced for the entire IO.
1424 */
1425 xfs_buf_rele(bp);
1426 return error;
1427 }
1428
1429 xfs_caddr_t
xfs_buf_offset(xfs_buf_t * bp,size_t offset)1430 xfs_buf_offset(
1431 xfs_buf_t *bp,
1432 size_t offset)
1433 {
1434 struct page *page;
1435
1436 if (bp->b_addr)
1437 return bp->b_addr + offset;
1438
1439 offset += bp->b_offset;
1440 page = bp->b_pages[offset >> PAGE_SHIFT];
1441 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1442 }
1443
1444 /*
1445 * Move data into or out of a buffer.
1446 */
1447 void
xfs_buf_iomove(xfs_buf_t * bp,size_t boff,size_t bsize,void * data,xfs_buf_rw_t mode)1448 xfs_buf_iomove(
1449 xfs_buf_t *bp, /* buffer to process */
1450 size_t boff, /* starting buffer offset */
1451 size_t bsize, /* length to copy */
1452 void *data, /* data address */
1453 xfs_buf_rw_t mode) /* read/write/zero flag */
1454 {
1455 size_t bend;
1456
1457 bend = boff + bsize;
1458 while (boff < bend) {
1459 struct page *page;
1460 int page_index, page_offset, csize;
1461
1462 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1463 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1464 page = bp->b_pages[page_index];
1465 csize = min_t(size_t, PAGE_SIZE - page_offset,
1466 BBTOB(bp->b_io_length) - boff);
1467
1468 ASSERT((csize + page_offset) <= PAGE_SIZE);
1469
1470 switch (mode) {
1471 case XBRW_ZERO:
1472 memset(page_address(page) + page_offset, 0, csize);
1473 break;
1474 case XBRW_READ:
1475 memcpy(data, page_address(page) + page_offset, csize);
1476 break;
1477 case XBRW_WRITE:
1478 memcpy(page_address(page) + page_offset, data, csize);
1479 }
1480
1481 boff += csize;
1482 data += csize;
1483 }
1484 }
1485
1486 /*
1487 * Handling of buffer targets (buftargs).
1488 */
1489
1490 /*
1491 * Wait for any bufs with callbacks that have been submitted but have not yet
1492 * returned. These buffers will have an elevated hold count, so wait on those
1493 * while freeing all the buffers only held by the LRU.
1494 */
1495 static enum lru_status
xfs_buftarg_wait_rele(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)1496 xfs_buftarg_wait_rele(
1497 struct list_head *item,
1498 struct list_lru_one *lru,
1499 spinlock_t *lru_lock,
1500 void *arg)
1501
1502 {
1503 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1504 struct list_head *dispose = arg;
1505
1506 if (atomic_read(&bp->b_hold) > 1) {
1507 /* need to wait, so skip it this pass */
1508 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1509 return LRU_SKIP;
1510 }
1511 if (!spin_trylock(&bp->b_lock))
1512 return LRU_SKIP;
1513
1514 /*
1515 * clear the LRU reference count so the buffer doesn't get
1516 * ignored in xfs_buf_rele().
1517 */
1518 atomic_set(&bp->b_lru_ref, 0);
1519 bp->b_state |= XFS_BSTATE_DISPOSE;
1520 list_lru_isolate_move(lru, item, dispose);
1521 spin_unlock(&bp->b_lock);
1522 return LRU_REMOVED;
1523 }
1524
1525 void
xfs_wait_buftarg(struct xfs_buftarg * btp)1526 xfs_wait_buftarg(
1527 struct xfs_buftarg *btp)
1528 {
1529 LIST_HEAD(dispose);
1530 int loop = 0;
1531
1532 /*
1533 * We need to flush the buffer workqueue to ensure that all IO
1534 * completion processing is 100% done. Just waiting on buffer locks is
1535 * not sufficient for async IO as the reference count held over IO is
1536 * not released until after the buffer lock is dropped. Hence we need to
1537 * ensure here that all reference counts have been dropped before we
1538 * start walking the LRU list.
1539 */
1540 drain_workqueue(btp->bt_mount->m_buf_workqueue);
1541
1542 /* loop until there is nothing left on the lru list. */
1543 while (list_lru_count(&btp->bt_lru)) {
1544 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1545 &dispose, LONG_MAX);
1546
1547 while (!list_empty(&dispose)) {
1548 struct xfs_buf *bp;
1549 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1550 list_del_init(&bp->b_lru);
1551 if (bp->b_flags & XBF_WRITE_FAIL) {
1552 xfs_alert(btp->bt_mount,
1553 "Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
1554 "Please run xfs_repair to determine the extent of the problem.",
1555 (long long)bp->b_bn);
1556 }
1557 xfs_buf_rele(bp);
1558 }
1559 if (loop++ != 0)
1560 delay(100);
1561 }
1562 }
1563
1564 static enum lru_status
xfs_buftarg_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)1565 xfs_buftarg_isolate(
1566 struct list_head *item,
1567 struct list_lru_one *lru,
1568 spinlock_t *lru_lock,
1569 void *arg)
1570 {
1571 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1572 struct list_head *dispose = arg;
1573
1574 /*
1575 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1576 * If we fail to get the lock, just skip it.
1577 */
1578 if (!spin_trylock(&bp->b_lock))
1579 return LRU_SKIP;
1580 /*
1581 * Decrement the b_lru_ref count unless the value is already
1582 * zero. If the value is already zero, we need to reclaim the
1583 * buffer, otherwise it gets another trip through the LRU.
1584 */
1585 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1586 spin_unlock(&bp->b_lock);
1587 return LRU_ROTATE;
1588 }
1589
1590 bp->b_state |= XFS_BSTATE_DISPOSE;
1591 list_lru_isolate_move(lru, item, dispose);
1592 spin_unlock(&bp->b_lock);
1593 return LRU_REMOVED;
1594 }
1595
1596 static unsigned long
xfs_buftarg_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)1597 xfs_buftarg_shrink_scan(
1598 struct shrinker *shrink,
1599 struct shrink_control *sc)
1600 {
1601 struct xfs_buftarg *btp = container_of(shrink,
1602 struct xfs_buftarg, bt_shrinker);
1603 LIST_HEAD(dispose);
1604 unsigned long freed;
1605
1606 freed = list_lru_shrink_walk(&btp->bt_lru, sc,
1607 xfs_buftarg_isolate, &dispose);
1608
1609 while (!list_empty(&dispose)) {
1610 struct xfs_buf *bp;
1611 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1612 list_del_init(&bp->b_lru);
1613 xfs_buf_rele(bp);
1614 }
1615
1616 return freed;
1617 }
1618
1619 static unsigned long
xfs_buftarg_shrink_count(struct shrinker * shrink,struct shrink_control * sc)1620 xfs_buftarg_shrink_count(
1621 struct shrinker *shrink,
1622 struct shrink_control *sc)
1623 {
1624 struct xfs_buftarg *btp = container_of(shrink,
1625 struct xfs_buftarg, bt_shrinker);
1626 return list_lru_shrink_count(&btp->bt_lru, sc);
1627 }
1628
1629 void
xfs_free_buftarg(struct xfs_mount * mp,struct xfs_buftarg * btp)1630 xfs_free_buftarg(
1631 struct xfs_mount *mp,
1632 struct xfs_buftarg *btp)
1633 {
1634 unregister_shrinker(&btp->bt_shrinker);
1635 list_lru_destroy(&btp->bt_lru);
1636
1637 if (mp->m_flags & XFS_MOUNT_BARRIER)
1638 xfs_blkdev_issue_flush(btp);
1639
1640 kmem_free(btp);
1641 }
1642
1643 int
xfs_setsize_buftarg(xfs_buftarg_t * btp,unsigned int sectorsize)1644 xfs_setsize_buftarg(
1645 xfs_buftarg_t *btp,
1646 unsigned int sectorsize)
1647 {
1648 /* Set up metadata sector size info */
1649 btp->bt_meta_sectorsize = sectorsize;
1650 btp->bt_meta_sectormask = sectorsize - 1;
1651
1652 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1653 char name[BDEVNAME_SIZE];
1654
1655 bdevname(btp->bt_bdev, name);
1656
1657 xfs_warn(btp->bt_mount,
1658 "Cannot set_blocksize to %u on device %s",
1659 sectorsize, name);
1660 return -EINVAL;
1661 }
1662
1663 /* Set up device logical sector size mask */
1664 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
1665 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
1666
1667 return 0;
1668 }
1669
1670 /*
1671 * When allocating the initial buffer target we have not yet
1672 * read in the superblock, so don't know what sized sectors
1673 * are being used at this early stage. Play safe.
1674 */
1675 STATIC int
xfs_setsize_buftarg_early(xfs_buftarg_t * btp,struct block_device * bdev)1676 xfs_setsize_buftarg_early(
1677 xfs_buftarg_t *btp,
1678 struct block_device *bdev)
1679 {
1680 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
1681 }
1682
1683 xfs_buftarg_t *
xfs_alloc_buftarg(struct xfs_mount * mp,struct block_device * bdev)1684 xfs_alloc_buftarg(
1685 struct xfs_mount *mp,
1686 struct block_device *bdev)
1687 {
1688 xfs_buftarg_t *btp;
1689
1690 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1691
1692 btp->bt_mount = mp;
1693 btp->bt_dev = bdev->bd_dev;
1694 btp->bt_bdev = bdev;
1695 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1696
1697 if (xfs_setsize_buftarg_early(btp, bdev))
1698 goto error;
1699
1700 if (list_lru_init(&btp->bt_lru))
1701 goto error;
1702
1703 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1704 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1705 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1706 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1707 register_shrinker(&btp->bt_shrinker);
1708 return btp;
1709
1710 error:
1711 kmem_free(btp);
1712 return NULL;
1713 }
1714
1715 /*
1716 * Add a buffer to the delayed write list.
1717 *
1718 * This queues a buffer for writeout if it hasn't already been. Note that
1719 * neither this routine nor the buffer list submission functions perform
1720 * any internal synchronization. It is expected that the lists are thread-local
1721 * to the callers.
1722 *
1723 * Returns true if we queued up the buffer, or false if it already had
1724 * been on the buffer list.
1725 */
1726 bool
xfs_buf_delwri_queue(struct xfs_buf * bp,struct list_head * list)1727 xfs_buf_delwri_queue(
1728 struct xfs_buf *bp,
1729 struct list_head *list)
1730 {
1731 ASSERT(xfs_buf_islocked(bp));
1732 ASSERT(!(bp->b_flags & XBF_READ));
1733
1734 /*
1735 * If the buffer is already marked delwri it already is queued up
1736 * by someone else for imediate writeout. Just ignore it in that
1737 * case.
1738 */
1739 if (bp->b_flags & _XBF_DELWRI_Q) {
1740 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1741 return false;
1742 }
1743
1744 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1745
1746 /*
1747 * If a buffer gets written out synchronously or marked stale while it
1748 * is on a delwri list we lazily remove it. To do this, the other party
1749 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1750 * It remains referenced and on the list. In a rare corner case it
1751 * might get readded to a delwri list after the synchronous writeout, in
1752 * which case we need just need to re-add the flag here.
1753 */
1754 bp->b_flags |= _XBF_DELWRI_Q;
1755 if (list_empty(&bp->b_list)) {
1756 atomic_inc(&bp->b_hold);
1757 list_add_tail(&bp->b_list, list);
1758 }
1759
1760 return true;
1761 }
1762
1763 /*
1764 * Compare function is more complex than it needs to be because
1765 * the return value is only 32 bits and we are doing comparisons
1766 * on 64 bit values
1767 */
1768 static int
xfs_buf_cmp(void * priv,struct list_head * a,struct list_head * b)1769 xfs_buf_cmp(
1770 void *priv,
1771 struct list_head *a,
1772 struct list_head *b)
1773 {
1774 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1775 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1776 xfs_daddr_t diff;
1777
1778 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1779 if (diff < 0)
1780 return -1;
1781 if (diff > 0)
1782 return 1;
1783 return 0;
1784 }
1785
1786 static int
__xfs_buf_delwri_submit(struct list_head * buffer_list,struct list_head * io_list,bool wait)1787 __xfs_buf_delwri_submit(
1788 struct list_head *buffer_list,
1789 struct list_head *io_list,
1790 bool wait)
1791 {
1792 struct blk_plug plug;
1793 struct xfs_buf *bp, *n;
1794 int pinned = 0;
1795
1796 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1797 if (!wait) {
1798 if (xfs_buf_ispinned(bp)) {
1799 pinned++;
1800 continue;
1801 }
1802 if (!xfs_buf_trylock(bp))
1803 continue;
1804 } else {
1805 xfs_buf_lock(bp);
1806 }
1807
1808 /*
1809 * Someone else might have written the buffer synchronously or
1810 * marked it stale in the meantime. In that case only the
1811 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1812 * reference and remove it from the list here.
1813 */
1814 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1815 list_del_init(&bp->b_list);
1816 xfs_buf_relse(bp);
1817 continue;
1818 }
1819
1820 list_move_tail(&bp->b_list, io_list);
1821 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1822 }
1823
1824 list_sort(NULL, io_list, xfs_buf_cmp);
1825
1826 blk_start_plug(&plug);
1827 list_for_each_entry_safe(bp, n, io_list, b_list) {
1828 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
1829 bp->b_flags |= XBF_WRITE | XBF_ASYNC;
1830
1831 /*
1832 * we do all Io submission async. This means if we need to wait
1833 * for IO completion we need to take an extra reference so the
1834 * buffer is still valid on the other side.
1835 */
1836 if (wait)
1837 xfs_buf_hold(bp);
1838 else
1839 list_del_init(&bp->b_list);
1840
1841 xfs_buf_submit(bp);
1842 }
1843 blk_finish_plug(&plug);
1844
1845 return pinned;
1846 }
1847
1848 /*
1849 * Write out a buffer list asynchronously.
1850 *
1851 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1852 * out and not wait for I/O completion on any of the buffers. This interface
1853 * is only safely useable for callers that can track I/O completion by higher
1854 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1855 * function.
1856 */
1857 int
xfs_buf_delwri_submit_nowait(struct list_head * buffer_list)1858 xfs_buf_delwri_submit_nowait(
1859 struct list_head *buffer_list)
1860 {
1861 LIST_HEAD (io_list);
1862 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1863 }
1864
1865 /*
1866 * Write out a buffer list synchronously.
1867 *
1868 * This will take the @buffer_list, write all buffers out and wait for I/O
1869 * completion on all of the buffers. @buffer_list is consumed by the function,
1870 * so callers must have some other way of tracking buffers if they require such
1871 * functionality.
1872 */
1873 int
xfs_buf_delwri_submit(struct list_head * buffer_list)1874 xfs_buf_delwri_submit(
1875 struct list_head *buffer_list)
1876 {
1877 LIST_HEAD (io_list);
1878 int error = 0, error2;
1879 struct xfs_buf *bp;
1880
1881 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1882
1883 /* Wait for IO to complete. */
1884 while (!list_empty(&io_list)) {
1885 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1886
1887 list_del_init(&bp->b_list);
1888
1889 /* locking the buffer will wait for async IO completion. */
1890 xfs_buf_lock(bp);
1891 error2 = bp->b_error;
1892 xfs_buf_relse(bp);
1893 if (!error)
1894 error = error2;
1895 }
1896
1897 return error;
1898 }
1899
1900 int __init
xfs_buf_init(void)1901 xfs_buf_init(void)
1902 {
1903 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1904 KM_ZONE_HWALIGN, NULL);
1905 if (!xfs_buf_zone)
1906 goto out;
1907
1908 return 0;
1909
1910 out:
1911 return -ENOMEM;
1912 }
1913
1914 void
xfs_buf_terminate(void)1915 xfs_buf_terminate(void)
1916 {
1917 kmem_zone_destroy(xfs_buf_zone);
1918 }
1919