This source file includes following definitions.
- xfs_extent_busy_insert
- xfs_extent_busy_search
- xfs_extent_busy_update_extent
- xfs_extent_busy_reuse
- xfs_extent_busy_trim
- xfs_extent_busy_clear_one
- xfs_extent_busy_put_pag
- xfs_extent_busy_clear
- xfs_extent_busy_flush
- xfs_extent_busy_wait_all
- xfs_extent_busy_ag_cmp
1
2
3
4
5
6
7
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_shared.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_trace.h"
19 #include "xfs_trans.h"
20 #include "xfs_log.h"
21
22 void
23 xfs_extent_busy_insert(
24 struct xfs_trans *tp,
25 xfs_agnumber_t agno,
26 xfs_agblock_t bno,
27 xfs_extlen_t len,
28 unsigned int flags)
29 {
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
32 struct xfs_perag *pag;
33 struct rb_node **rbp;
34 struct rb_node *parent = NULL;
35
36 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
37 new->agno = agno;
38 new->bno = bno;
39 new->length = len;
40 INIT_LIST_HEAD(&new->list);
41 new->flags = flags;
42
43
44 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
45
46 pag = xfs_perag_get(tp->t_mountp, new->agno);
47 spin_lock(&pag->pagb_lock);
48 rbp = &pag->pagb_tree.rb_node;
49 while (*rbp) {
50 parent = *rbp;
51 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
52
53 if (new->bno < busyp->bno) {
54 rbp = &(*rbp)->rb_left;
55 ASSERT(new->bno + new->length <= busyp->bno);
56 } else if (new->bno > busyp->bno) {
57 rbp = &(*rbp)->rb_right;
58 ASSERT(bno >= busyp->bno + busyp->length);
59 } else {
60 ASSERT(0);
61 }
62 }
63
64 rb_link_node(&new->rb_node, parent, rbp);
65 rb_insert_color(&new->rb_node, &pag->pagb_tree);
66
67 list_add(&new->list, &tp->t_busy);
68 spin_unlock(&pag->pagb_lock);
69 xfs_perag_put(pag);
70 }
71
72
73
74
75
76
77
78
79
80
81 int
82 xfs_extent_busy_search(
83 struct xfs_mount *mp,
84 xfs_agnumber_t agno,
85 xfs_agblock_t bno,
86 xfs_extlen_t len)
87 {
88 struct xfs_perag *pag;
89 struct rb_node *rbp;
90 struct xfs_extent_busy *busyp;
91 int match = 0;
92
93 pag = xfs_perag_get(mp, agno);
94 spin_lock(&pag->pagb_lock);
95
96 rbp = pag->pagb_tree.rb_node;
97
98
99 while (rbp) {
100 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
101 if (bno < busyp->bno) {
102
103 if (bno + len > busyp->bno)
104 match = -1;
105 rbp = rbp->rb_left;
106 } else if (bno > busyp->bno) {
107
108 if (bno < busyp->bno + busyp->length)
109 match = -1;
110 rbp = rbp->rb_right;
111 } else {
112
113 match = (busyp->length == len) ? 1 : -1;
114 break;
115 }
116 }
117 spin_unlock(&pag->pagb_lock);
118 xfs_perag_put(pag);
119 return match;
120 }
121
122
123
124
125
126
127
128
129
130
131
132
133 STATIC bool
134 xfs_extent_busy_update_extent(
135 struct xfs_mount *mp,
136 struct xfs_perag *pag,
137 struct xfs_extent_busy *busyp,
138 xfs_agblock_t fbno,
139 xfs_extlen_t flen,
140 bool userdata) __releases(&pag->pagb_lock)
141 __acquires(&pag->pagb_lock)
142 {
143 xfs_agblock_t fend = fbno + flen;
144 xfs_agblock_t bbno = busyp->bno;
145 xfs_agblock_t bend = bbno + busyp->length;
146
147
148
149
150
151
152 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
153 spin_unlock(&pag->pagb_lock);
154 delay(1);
155 spin_lock(&pag->pagb_lock);
156 return false;
157 }
158
159
160
161
162
163
164
165
166
167 if (userdata)
168 goto out_force_log;
169
170 if (bbno < fbno && bend > fend) {
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188 goto out_force_log;
189 } else if (bbno >= fbno && bend <= fend) {
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228 rb_erase(&busyp->rb_node, &pag->pagb_tree);
229 busyp->length = 0;
230 return false;
231 } else if (fend < bend) {
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246 busyp->bno = fend;
247 } else if (bbno < fbno) {
248
249
250
251
252
253
254
255
256
257
258
259
260
261 busyp->length = fbno - busyp->bno;
262 } else {
263 ASSERT(0);
264 }
265
266 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
267 return true;
268
269 out_force_log:
270 spin_unlock(&pag->pagb_lock);
271 xfs_log_force(mp, XFS_LOG_SYNC);
272 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
273 spin_lock(&pag->pagb_lock);
274 return false;
275 }
276
277
278
279
280
281 void
282 xfs_extent_busy_reuse(
283 struct xfs_mount *mp,
284 xfs_agnumber_t agno,
285 xfs_agblock_t fbno,
286 xfs_extlen_t flen,
287 bool userdata)
288 {
289 struct xfs_perag *pag;
290 struct rb_node *rbp;
291
292 ASSERT(flen > 0);
293
294 pag = xfs_perag_get(mp, agno);
295 spin_lock(&pag->pagb_lock);
296 restart:
297 rbp = pag->pagb_tree.rb_node;
298 while (rbp) {
299 struct xfs_extent_busy *busyp =
300 rb_entry(rbp, struct xfs_extent_busy, rb_node);
301 xfs_agblock_t bbno = busyp->bno;
302 xfs_agblock_t bend = bbno + busyp->length;
303
304 if (fbno + flen <= bbno) {
305 rbp = rbp->rb_left;
306 continue;
307 } else if (fbno >= bend) {
308 rbp = rbp->rb_right;
309 continue;
310 }
311
312 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
313 userdata))
314 goto restart;
315 }
316 spin_unlock(&pag->pagb_lock);
317 xfs_perag_put(pag);
318 }
319
320
321
322
323
324
325
326
327
328
329
330
331
332 bool
333 xfs_extent_busy_trim(
334 struct xfs_alloc_arg *args,
335 xfs_agblock_t *bno,
336 xfs_extlen_t *len,
337 unsigned *busy_gen)
338 {
339 xfs_agblock_t fbno;
340 xfs_extlen_t flen;
341 struct rb_node *rbp;
342 bool ret = false;
343
344 ASSERT(*len > 0);
345
346 spin_lock(&args->pag->pagb_lock);
347 restart:
348 fbno = *bno;
349 flen = *len;
350 rbp = args->pag->pagb_tree.rb_node;
351 while (rbp && flen >= args->minlen) {
352 struct xfs_extent_busy *busyp =
353 rb_entry(rbp, struct xfs_extent_busy, rb_node);
354 xfs_agblock_t fend = fbno + flen;
355 xfs_agblock_t bbno = busyp->bno;
356 xfs_agblock_t bend = bbno + busyp->length;
357
358 if (fend <= bbno) {
359 rbp = rbp->rb_left;
360 continue;
361 } else if (fbno >= bend) {
362 rbp = rbp->rb_right;
363 continue;
364 }
365
366
367
368
369
370 if (!xfs_alloc_is_userdata(args->datatype) &&
371 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
372 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
373 busyp, fbno, flen,
374 false))
375 goto restart;
376 continue;
377 }
378
379 if (bbno <= fbno) {
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409 if (fend <= bend)
410 goto fail;
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429 fbno = bend;
430 } else if (bend >= fend) {
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 fend = bbno;
451 } else {
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485 if (bbno - fbno >= args->maxlen) {
486
487 fend = bbno;
488 } else if (fend - bend >= args->maxlen * 4) {
489
490 fbno = bend;
491 } else if (bbno - fbno >= args->minlen) {
492
493 fend = bbno;
494 } else {
495 goto fail;
496 }
497 }
498
499 flen = fend - fbno;
500 }
501 out:
502
503 if (fbno != *bno || flen != *len) {
504 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
505 fbno, flen);
506 *bno = fbno;
507 *len = flen;
508 *busy_gen = args->pag->pagb_gen;
509 ret = true;
510 }
511 spin_unlock(&args->pag->pagb_lock);
512 return ret;
513 fail:
514
515
516
517
518 flen = 0;
519 goto out;
520 }
521
522 STATIC void
523 xfs_extent_busy_clear_one(
524 struct xfs_mount *mp,
525 struct xfs_perag *pag,
526 struct xfs_extent_busy *busyp)
527 {
528 if (busyp->length) {
529 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
530 busyp->length);
531 rb_erase(&busyp->rb_node, &pag->pagb_tree);
532 }
533
534 list_del_init(&busyp->list);
535 kmem_free(busyp);
536 }
537
538 static void
539 xfs_extent_busy_put_pag(
540 struct xfs_perag *pag,
541 bool wakeup)
542 __releases(pag->pagb_lock)
543 {
544 if (wakeup) {
545 pag->pagb_gen++;
546 wake_up_all(&pag->pagb_wait);
547 }
548
549 spin_unlock(&pag->pagb_lock);
550 xfs_perag_put(pag);
551 }
552
553
554
555
556
557
558 void
559 xfs_extent_busy_clear(
560 struct xfs_mount *mp,
561 struct list_head *list,
562 bool do_discard)
563 {
564 struct xfs_extent_busy *busyp, *n;
565 struct xfs_perag *pag = NULL;
566 xfs_agnumber_t agno = NULLAGNUMBER;
567 bool wakeup = false;
568
569 list_for_each_entry_safe(busyp, n, list, list) {
570 if (busyp->agno != agno) {
571 if (pag)
572 xfs_extent_busy_put_pag(pag, wakeup);
573 agno = busyp->agno;
574 pag = xfs_perag_get(mp, agno);
575 spin_lock(&pag->pagb_lock);
576 wakeup = false;
577 }
578
579 if (do_discard && busyp->length &&
580 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
581 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
582 } else {
583 xfs_extent_busy_clear_one(mp, pag, busyp);
584 wakeup = true;
585 }
586 }
587
588 if (pag)
589 xfs_extent_busy_put_pag(pag, wakeup);
590 }
591
592
593
594
595 void
596 xfs_extent_busy_flush(
597 struct xfs_mount *mp,
598 struct xfs_perag *pag,
599 unsigned busy_gen)
600 {
601 DEFINE_WAIT (wait);
602 int error;
603
604 error = xfs_log_force(mp, XFS_LOG_SYNC);
605 if (error)
606 return;
607
608 do {
609 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
610 if (busy_gen != READ_ONCE(pag->pagb_gen))
611 break;
612 schedule();
613 } while (1);
614
615 finish_wait(&pag->pagb_wait, &wait);
616 }
617
618 void
619 xfs_extent_busy_wait_all(
620 struct xfs_mount *mp)
621 {
622 DEFINE_WAIT (wait);
623 xfs_agnumber_t agno;
624
625 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
626 struct xfs_perag *pag = xfs_perag_get(mp, agno);
627
628 do {
629 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
630 if (RB_EMPTY_ROOT(&pag->pagb_tree))
631 break;
632 schedule();
633 } while (1);
634 finish_wait(&pag->pagb_wait, &wait);
635
636 xfs_perag_put(pag);
637 }
638 }
639
640
641
642
643 int
644 xfs_extent_busy_ag_cmp(
645 void *priv,
646 struct list_head *l1,
647 struct list_head *l2)
648 {
649 struct xfs_extent_busy *b1 =
650 container_of(l1, struct xfs_extent_busy, list);
651 struct xfs_extent_busy *b2 =
652 container_of(l2, struct xfs_extent_busy, list);
653 s32 diff;
654
655 diff = b1->agno - b2->agno;
656 if (!diff)
657 diff = b1->bno - b2->bno;
658 return diff;
659 }