1/* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public Licens 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 17 */ 18 19 20/* 21 * mballoc.c contains the multiblocks allocation routines 22 */ 23 24#include "ext4_jbd2.h" 25#include "mballoc.h" 26#include <linux/log2.h> 27#include <linux/module.h> 28#include <linux/slab.h> 29#include <linux/backing-dev.h> 30#include <trace/events/ext4.h> 31 32#ifdef CONFIG_EXT4_DEBUG 33ushort ext4_mballoc_debug __read_mostly; 34 35module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); 36MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); 37#endif 38 39/* 40 * MUSTDO: 41 * - test ext4_ext_search_left() and ext4_ext_search_right() 42 * - search for metadata in few groups 43 * 44 * TODO v4: 45 * - normalization should take into account whether file is still open 46 * - discard preallocations if no free space left (policy?) 47 * - don't normalize tails 48 * - quota 49 * - reservation for superuser 50 * 51 * TODO v3: 52 * - bitmap read-ahead (proposed by Oleg Drokin aka green) 53 * - track min/max extents in each group for better group selection 54 * - mb_mark_used() may allocate chunk right after splitting buddy 55 * - tree of groups sorted by number of free blocks 56 * - error handling 57 */ 58 59/* 60 * The allocation request involve request for multiple number of blocks 61 * near to the goal(block) value specified. 62 * 63 * During initialization phase of the allocator we decide to use the 64 * group preallocation or inode preallocation depending on the size of 65 * the file. The size of the file could be the resulting file size we 66 * would have after allocation, or the current file size, which ever 67 * is larger. If the size is less than sbi->s_mb_stream_request we 68 * select to use the group preallocation. The default value of 69 * s_mb_stream_request is 16 blocks. This can also be tuned via 70 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 71 * terms of number of blocks. 72 * 73 * The main motivation for having small file use group preallocation is to 74 * ensure that we have small files closer together on the disk. 75 * 76 * First stage the allocator looks at the inode prealloc list, 77 * ext4_inode_info->i_prealloc_list, which contains list of prealloc 78 * spaces for this particular inode. The inode prealloc space is 79 * represented as: 80 * 81 * pa_lstart -> the logical start block for this prealloc space 82 * pa_pstart -> the physical start block for this prealloc space 83 * pa_len -> length for this prealloc space (in clusters) 84 * pa_free -> free space available in this prealloc space (in clusters) 85 * 86 * The inode preallocation space is used looking at the _logical_ start 87 * block. If only the logical file block falls within the range of prealloc 88 * space we will consume the particular prealloc space. This makes sure that 89 * we have contiguous physical blocks representing the file blocks 90 * 91 * The important thing to be noted in case of inode prealloc space is that 92 * we don't modify the values associated to inode prealloc space except 93 * pa_free. 94 * 95 * If we are not able to find blocks in the inode prealloc space and if we 96 * have the group allocation flag set then we look at the locality group 97 * prealloc space. These are per CPU prealloc list represented as 98 * 99 * ext4_sb_info.s_locality_groups[smp_processor_id()] 100 * 101 * The reason for having a per cpu locality group is to reduce the contention 102 * between CPUs. It is possible to get scheduled at this point. 103 * 104 * The locality group prealloc space is used looking at whether we have 105 * enough free space (pa_free) within the prealloc space. 106 * 107 * If we can't allocate blocks via inode prealloc or/and locality group 108 * prealloc then we look at the buddy cache. The buddy cache is represented 109 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 110 * mapped to the buddy and bitmap information regarding different 111 * groups. The buddy information is attached to buddy cache inode so that 112 * we can access them through the page cache. The information regarding 113 * each group is loaded via ext4_mb_load_buddy. The information involve 114 * block bitmap and buddy information. The information are stored in the 115 * inode as: 116 * 117 * { page } 118 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 119 * 120 * 121 * one block each for bitmap and buddy information. So for each group we 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 123 * blocksize) blocks. So it can have information regarding groups_per_page 124 * which is blocks_per_page/2 125 * 126 * The buddy cache inode is not stored on disk. The inode is thrown 127 * away when the filesystem is unmounted. 128 * 129 * We look for count number of blocks in the buddy cache. If we were able 130 * to locate that many free blocks we return with additional information 131 * regarding rest of the contiguous physical block available 132 * 133 * Before allocating blocks via buddy cache we normalize the request 134 * blocks. This ensure we ask for more blocks that we needed. The extra 135 * blocks that we get after allocation is added to the respective prealloc 136 * list. In case of inode preallocation we follow a list of heuristics 137 * based on file size. This can be found in ext4_mb_normalize_request. If 138 * we are doing a group prealloc we try to normalize the request to 139 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 140 * dependent on the cluster size; for non-bigalloc file systems, it is 141 * 512 blocks. This can be tuned via 142 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 143 * terms of number of blocks. If we have mounted the file system with -O 144 * stripe=<value> option the group prealloc request is normalized to the 145 * the smallest multiple of the stripe value (sbi->s_stripe) which is 146 * greater than the default mb_group_prealloc. 147 * 148 * The regular allocator (using the buddy cache) supports a few tunables. 149 * 150 * /sys/fs/ext4/<partition>/mb_min_to_scan 151 * /sys/fs/ext4/<partition>/mb_max_to_scan 152 * /sys/fs/ext4/<partition>/mb_order2_req 153 * 154 * The regular allocator uses buddy scan only if the request len is power of 155 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 156 * value of s_mb_order2_reqs can be tuned via 157 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 158 * stripe size (sbi->s_stripe), we try to search for contiguous block in 159 * stripe size. This should result in better allocation on RAID setups. If 160 * not, we search in the specific group using bitmap for best extents. The 161 * tunable min_to_scan and max_to_scan control the behaviour here. 162 * min_to_scan indicate how long the mballoc __must__ look for a best 163 * extent and max_to_scan indicates how long the mballoc __can__ look for a 164 * best extent in the found extents. Searching for the blocks starts with 165 * the group specified as the goal value in allocation context via 166 * ac_g_ex. Each group is first checked based on the criteria whether it 167 * can be used for allocation. ext4_mb_good_group explains how the groups are 168 * checked. 169 * 170 * Both the prealloc space are getting populated as above. So for the first 171 * request we will hit the buddy cache which will result in this prealloc 172 * space getting filled. The prealloc space is then later used for the 173 * subsequent request. 174 */ 175 176/* 177 * mballoc operates on the following data: 178 * - on-disk bitmap 179 * - in-core buddy (actually includes buddy and bitmap) 180 * - preallocation descriptors (PAs) 181 * 182 * there are two types of preallocations: 183 * - inode 184 * assiged to specific inode and can be used for this inode only. 185 * it describes part of inode's space preallocated to specific 186 * physical blocks. any block from that preallocated can be used 187 * independent. the descriptor just tracks number of blocks left 188 * unused. so, before taking some block from descriptor, one must 189 * make sure corresponded logical block isn't allocated yet. this 190 * also means that freeing any block within descriptor's range 191 * must discard all preallocated blocks. 192 * - locality group 193 * assigned to specific locality group which does not translate to 194 * permanent set of inodes: inode can join and leave group. space 195 * from this type of preallocation can be used for any inode. thus 196 * it's consumed from the beginning to the end. 197 * 198 * relation between them can be expressed as: 199 * in-core buddy = on-disk bitmap + preallocation descriptors 200 * 201 * this mean blocks mballoc considers used are: 202 * - allocated blocks (persistent) 203 * - preallocated blocks (non-persistent) 204 * 205 * consistency in mballoc world means that at any time a block is either 206 * free or used in ALL structures. notice: "any time" should not be read 207 * literally -- time is discrete and delimited by locks. 208 * 209 * to keep it simple, we don't use block numbers, instead we count number of 210 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 211 * 212 * all operations can be expressed as: 213 * - init buddy: buddy = on-disk + PAs 214 * - new PA: buddy += N; PA = N 215 * - use inode PA: on-disk += N; PA -= N 216 * - discard inode PA buddy -= on-disk - PA; PA = 0 217 * - use locality group PA on-disk += N; PA -= N 218 * - discard locality group PA buddy -= PA; PA = 0 219 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 220 * is used in real operation because we can't know actual used 221 * bits from PA, only from on-disk bitmap 222 * 223 * if we follow this strict logic, then all operations above should be atomic. 224 * given some of them can block, we'd have to use something like semaphores 225 * killing performance on high-end SMP hardware. let's try to relax it using 226 * the following knowledge: 227 * 1) if buddy is referenced, it's already initialized 228 * 2) while block is used in buddy and the buddy is referenced, 229 * nobody can re-allocate that block 230 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 231 * bit set and PA claims same block, it's OK. IOW, one can set bit in 232 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 233 * block 234 * 235 * so, now we're building a concurrency table: 236 * - init buddy vs. 237 * - new PA 238 * blocks for PA are allocated in the buddy, buddy must be referenced 239 * until PA is linked to allocation group to avoid concurrent buddy init 240 * - use inode PA 241 * we need to make sure that either on-disk bitmap or PA has uptodate data 242 * given (3) we care that PA-=N operation doesn't interfere with init 243 * - discard inode PA 244 * the simplest way would be to have buddy initialized by the discard 245 * - use locality group PA 246 * again PA-=N must be serialized with init 247 * - discard locality group PA 248 * the simplest way would be to have buddy initialized by the discard 249 * - new PA vs. 250 * - use inode PA 251 * i_data_sem serializes them 252 * - discard inode PA 253 * discard process must wait until PA isn't used by another process 254 * - use locality group PA 255 * some mutex should serialize them 256 * - discard locality group PA 257 * discard process must wait until PA isn't used by another process 258 * - use inode PA 259 * - use inode PA 260 * i_data_sem or another mutex should serializes them 261 * - discard inode PA 262 * discard process must wait until PA isn't used by another process 263 * - use locality group PA 264 * nothing wrong here -- they're different PAs covering different blocks 265 * - discard locality group PA 266 * discard process must wait until PA isn't used by another process 267 * 268 * now we're ready to make few consequences: 269 * - PA is referenced and while it is no discard is possible 270 * - PA is referenced until block isn't marked in on-disk bitmap 271 * - PA changes only after on-disk bitmap 272 * - discard must not compete with init. either init is done before 273 * any discard or they're serialized somehow 274 * - buddy init as sum of on-disk bitmap and PAs is done atomically 275 * 276 * a special case when we've used PA to emptiness. no need to modify buddy 277 * in this case, but we should care about concurrent init 278 * 279 */ 280 281 /* 282 * Logic in few words: 283 * 284 * - allocation: 285 * load group 286 * find blocks 287 * mark bits in on-disk bitmap 288 * release group 289 * 290 * - use preallocation: 291 * find proper PA (per-inode or group) 292 * load group 293 * mark bits in on-disk bitmap 294 * release group 295 * release PA 296 * 297 * - free: 298 * load group 299 * mark bits in on-disk bitmap 300 * release group 301 * 302 * - discard preallocations in group: 303 * mark PAs deleted 304 * move them onto local list 305 * load on-disk bitmap 306 * load group 307 * remove PA from object (inode or locality group) 308 * mark free blocks in-core 309 * 310 * - discard inode's preallocations: 311 */ 312 313/* 314 * Locking rules 315 * 316 * Locks: 317 * - bitlock on a group (group) 318 * - object (inode/locality) (object) 319 * - per-pa lock (pa) 320 * 321 * Paths: 322 * - new pa 323 * object 324 * group 325 * 326 * - find and use pa: 327 * pa 328 * 329 * - release consumed pa: 330 * pa 331 * group 332 * object 333 * 334 * - generate in-core bitmap: 335 * group 336 * pa 337 * 338 * - discard all for given object (inode, locality group): 339 * object 340 * pa 341 * group 342 * 343 * - discard all for given group: 344 * group 345 * pa 346 * group 347 * object 348 * 349 */ 350static struct kmem_cache *ext4_pspace_cachep; 351static struct kmem_cache *ext4_ac_cachep; 352static struct kmem_cache *ext4_free_data_cachep; 353 354/* We create slab caches for groupinfo data structures based on the 355 * superblock block size. There will be one per mounted filesystem for 356 * each unique s_blocksize_bits */ 357#define NR_GRPINFO_CACHES 8 358static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 359 360static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 361 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 362 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 363 "ext4_groupinfo_64k", "ext4_groupinfo_128k" 364}; 365 366static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 367 ext4_group_t group); 368static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 369 ext4_group_t group); 370static void ext4_free_data_callback(struct super_block *sb, 371 struct ext4_journal_cb_entry *jce, int rc); 372 373static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 374{ 375#if BITS_PER_LONG == 64 376 *bit += ((unsigned long) addr & 7UL) << 3; 377 addr = (void *) ((unsigned long) addr & ~7UL); 378#elif BITS_PER_LONG == 32 379 *bit += ((unsigned long) addr & 3UL) << 3; 380 addr = (void *) ((unsigned long) addr & ~3UL); 381#else 382#error "how many bits you are?!" 383#endif 384 return addr; 385} 386 387static inline int mb_test_bit(int bit, void *addr) 388{ 389 /* 390 * ext4_test_bit on architecture like powerpc 391 * needs unsigned long aligned address 392 */ 393 addr = mb_correct_addr_and_bit(&bit, addr); 394 return ext4_test_bit(bit, addr); 395} 396 397static inline void mb_set_bit(int bit, void *addr) 398{ 399 addr = mb_correct_addr_and_bit(&bit, addr); 400 ext4_set_bit(bit, addr); 401} 402 403static inline void mb_clear_bit(int bit, void *addr) 404{ 405 addr = mb_correct_addr_and_bit(&bit, addr); 406 ext4_clear_bit(bit, addr); 407} 408 409static inline int mb_test_and_clear_bit(int bit, void *addr) 410{ 411 addr = mb_correct_addr_and_bit(&bit, addr); 412 return ext4_test_and_clear_bit(bit, addr); 413} 414 415static inline int mb_find_next_zero_bit(void *addr, int max, int start) 416{ 417 int fix = 0, ret, tmpmax; 418 addr = mb_correct_addr_and_bit(&fix, addr); 419 tmpmax = max + fix; 420 start += fix; 421 422 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 423 if (ret > max) 424 return max; 425 return ret; 426} 427 428static inline int mb_find_next_bit(void *addr, int max, int start) 429{ 430 int fix = 0, ret, tmpmax; 431 addr = mb_correct_addr_and_bit(&fix, addr); 432 tmpmax = max + fix; 433 start += fix; 434 435 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 436 if (ret > max) 437 return max; 438 return ret; 439} 440 441static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 442{ 443 char *bb; 444 445 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 446 BUG_ON(max == NULL); 447 448 if (order > e4b->bd_blkbits + 1) { 449 *max = 0; 450 return NULL; 451 } 452 453 /* at order 0 we see each particular block */ 454 if (order == 0) { 455 *max = 1 << (e4b->bd_blkbits + 3); 456 return e4b->bd_bitmap; 457 } 458 459 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 460 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 461 462 return bb; 463} 464 465#ifdef DOUBLE_CHECK 466static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 467 int first, int count) 468{ 469 int i; 470 struct super_block *sb = e4b->bd_sb; 471 472 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 473 return; 474 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 475 for (i = 0; i < count; i++) { 476 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 477 ext4_fsblk_t blocknr; 478 479 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 480 blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 481 ext4_grp_locked_error(sb, e4b->bd_group, 482 inode ? inode->i_ino : 0, 483 blocknr, 484 "freeing block already freed " 485 "(bit %u)", 486 first + i); 487 } 488 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 489 } 490} 491 492static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 493{ 494 int i; 495 496 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 497 return; 498 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 499 for (i = 0; i < count; i++) { 500 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 501 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 502 } 503} 504 505static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 506{ 507 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 508 unsigned char *b1, *b2; 509 int i; 510 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 511 b2 = (unsigned char *) bitmap; 512 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 513 if (b1[i] != b2[i]) { 514 ext4_msg(e4b->bd_sb, KERN_ERR, 515 "corruption in group %u " 516 "at byte %u(%u): %x in copy != %x " 517 "on disk/prealloc", 518 e4b->bd_group, i, i * 8, b1[i], b2[i]); 519 BUG(); 520 } 521 } 522 } 523} 524 525#else 526static inline void mb_free_blocks_double(struct inode *inode, 527 struct ext4_buddy *e4b, int first, int count) 528{ 529 return; 530} 531static inline void mb_mark_used_double(struct ext4_buddy *e4b, 532 int first, int count) 533{ 534 return; 535} 536static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 537{ 538 return; 539} 540#endif 541 542#ifdef AGGRESSIVE_CHECK 543 544#define MB_CHECK_ASSERT(assert) \ 545do { \ 546 if (!(assert)) { \ 547 printk(KERN_EMERG \ 548 "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 549 function, file, line, # assert); \ 550 BUG(); \ 551 } \ 552} while (0) 553 554static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 555 const char *function, int line) 556{ 557 struct super_block *sb = e4b->bd_sb; 558 int order = e4b->bd_blkbits + 1; 559 int max; 560 int max2; 561 int i; 562 int j; 563 int k; 564 int count; 565 struct ext4_group_info *grp; 566 int fragments = 0; 567 int fstart; 568 struct list_head *cur; 569 void *buddy; 570 void *buddy2; 571 572 { 573 static int mb_check_counter; 574 if (mb_check_counter++ % 100 != 0) 575 return 0; 576 } 577 578 while (order > 1) { 579 buddy = mb_find_buddy(e4b, order, &max); 580 MB_CHECK_ASSERT(buddy); 581 buddy2 = mb_find_buddy(e4b, order - 1, &max2); 582 MB_CHECK_ASSERT(buddy2); 583 MB_CHECK_ASSERT(buddy != buddy2); 584 MB_CHECK_ASSERT(max * 2 == max2); 585 586 count = 0; 587 for (i = 0; i < max; i++) { 588 589 if (mb_test_bit(i, buddy)) { 590 /* only single bit in buddy2 may be 1 */ 591 if (!mb_test_bit(i << 1, buddy2)) { 592 MB_CHECK_ASSERT( 593 mb_test_bit((i<<1)+1, buddy2)); 594 } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 595 MB_CHECK_ASSERT( 596 mb_test_bit(i << 1, buddy2)); 597 } 598 continue; 599 } 600 601 /* both bits in buddy2 must be 1 */ 602 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 603 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 604 605 for (j = 0; j < (1 << order); j++) { 606 k = (i * (1 << order)) + j; 607 MB_CHECK_ASSERT( 608 !mb_test_bit(k, e4b->bd_bitmap)); 609 } 610 count++; 611 } 612 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 613 order--; 614 } 615 616 fstart = -1; 617 buddy = mb_find_buddy(e4b, 0, &max); 618 for (i = 0; i < max; i++) { 619 if (!mb_test_bit(i, buddy)) { 620 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 621 if (fstart == -1) { 622 fragments++; 623 fstart = i; 624 } 625 continue; 626 } 627 fstart = -1; 628 /* check used bits only */ 629 for (j = 0; j < e4b->bd_blkbits + 1; j++) { 630 buddy2 = mb_find_buddy(e4b, j, &max2); 631 k = i >> j; 632 MB_CHECK_ASSERT(k < max2); 633 MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 634 } 635 } 636 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 637 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 638 639 grp = ext4_get_group_info(sb, e4b->bd_group); 640 list_for_each(cur, &grp->bb_prealloc_list) { 641 ext4_group_t groupnr; 642 struct ext4_prealloc_space *pa; 643 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 644 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 645 MB_CHECK_ASSERT(groupnr == e4b->bd_group); 646 for (i = 0; i < pa->pa_len; i++) 647 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 648 } 649 return 0; 650} 651#undef MB_CHECK_ASSERT 652#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 653 __FILE__, __func__, __LINE__) 654#else 655#define mb_check_buddy(e4b) 656#endif 657 658/* 659 * Divide blocks started from @first with length @len into 660 * smaller chunks with power of 2 blocks. 661 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 662 * then increase bb_counters[] for corresponded chunk size. 663 */ 664static void ext4_mb_mark_free_simple(struct super_block *sb, 665 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 666 struct ext4_group_info *grp) 667{ 668 struct ext4_sb_info *sbi = EXT4_SB(sb); 669 ext4_grpblk_t min; 670 ext4_grpblk_t max; 671 ext4_grpblk_t chunk; 672 unsigned short border; 673 674 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 675 676 border = 2 << sb->s_blocksize_bits; 677 678 while (len > 0) { 679 /* find how many blocks can be covered since this position */ 680 max = ffs(first | border) - 1; 681 682 /* find how many blocks of power 2 we need to mark */ 683 min = fls(len) - 1; 684 685 if (max < min) 686 min = max; 687 chunk = 1 << min; 688 689 /* mark multiblock chunks only */ 690 grp->bb_counters[min]++; 691 if (min > 0) 692 mb_clear_bit(first >> min, 693 buddy + sbi->s_mb_offsets[min]); 694 695 len -= chunk; 696 first += chunk; 697 } 698} 699 700/* 701 * Cache the order of the largest free extent we have available in this block 702 * group. 703 */ 704static void 705mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 706{ 707 int i; 708 int bits; 709 710 grp->bb_largest_free_order = -1; /* uninit */ 711 712 bits = sb->s_blocksize_bits + 1; 713 for (i = bits; i >= 0; i--) { 714 if (grp->bb_counters[i] > 0) { 715 grp->bb_largest_free_order = i; 716 break; 717 } 718 } 719} 720 721static noinline_for_stack 722void ext4_mb_generate_buddy(struct super_block *sb, 723 void *buddy, void *bitmap, ext4_group_t group) 724{ 725 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 726 struct ext4_sb_info *sbi = EXT4_SB(sb); 727 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 728 ext4_grpblk_t i = 0; 729 ext4_grpblk_t first; 730 ext4_grpblk_t len; 731 unsigned free = 0; 732 unsigned fragments = 0; 733 unsigned long long period = get_cycles(); 734 735 /* initialize buddy from bitmap which is aggregation 736 * of on-disk bitmap and preallocations */ 737 i = mb_find_next_zero_bit(bitmap, max, 0); 738 grp->bb_first_free = i; 739 while (i < max) { 740 fragments++; 741 first = i; 742 i = mb_find_next_bit(bitmap, max, i); 743 len = i - first; 744 free += len; 745 if (len > 1) 746 ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 747 else 748 grp->bb_counters[0]++; 749 if (i < max) 750 i = mb_find_next_zero_bit(bitmap, max, i); 751 } 752 grp->bb_fragments = fragments; 753 754 if (free != grp->bb_free) { 755 ext4_grp_locked_error(sb, group, 0, 0, 756 "block bitmap and bg descriptor " 757 "inconsistent: %u vs %u free clusters", 758 free, grp->bb_free); 759 /* 760 * If we intend to continue, we consider group descriptor 761 * corrupt and update bb_free using bitmap value 762 */ 763 grp->bb_free = free; 764 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 765 percpu_counter_sub(&sbi->s_freeclusters_counter, 766 grp->bb_free); 767 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 768 } 769 mb_set_largest_free_order(sb, grp); 770 771 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 772 773 period = get_cycles() - period; 774 spin_lock(&EXT4_SB(sb)->s_bal_lock); 775 EXT4_SB(sb)->s_mb_buddies_generated++; 776 EXT4_SB(sb)->s_mb_generation_time += period; 777 spin_unlock(&EXT4_SB(sb)->s_bal_lock); 778} 779 780static void mb_regenerate_buddy(struct ext4_buddy *e4b) 781{ 782 int count; 783 int order = 1; 784 void *buddy; 785 786 while ((buddy = mb_find_buddy(e4b, order++, &count))) { 787 ext4_set_bits(buddy, 0, count); 788 } 789 e4b->bd_info->bb_fragments = 0; 790 memset(e4b->bd_info->bb_counters, 0, 791 sizeof(*e4b->bd_info->bb_counters) * 792 (e4b->bd_sb->s_blocksize_bits + 2)); 793 794 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, 795 e4b->bd_bitmap, e4b->bd_group); 796} 797 798/* The buddy information is attached the buddy cache inode 799 * for convenience. The information regarding each group 800 * is loaded via ext4_mb_load_buddy. The information involve 801 * block bitmap and buddy information. The information are 802 * stored in the inode as 803 * 804 * { page } 805 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 806 * 807 * 808 * one block each for bitmap and buddy information. 809 * So for each group we take up 2 blocks. A page can 810 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 811 * So it can have information regarding groups_per_page which 812 * is blocks_per_page/2 813 * 814 * Locking note: This routine takes the block group lock of all groups 815 * for this page; do not hold this lock when calling this routine! 816 */ 817 818static int ext4_mb_init_cache(struct page *page, char *incore) 819{ 820 ext4_group_t ngroups; 821 int blocksize; 822 int blocks_per_page; 823 int groups_per_page; 824 int err = 0; 825 int i; 826 ext4_group_t first_group, group; 827 int first_block; 828 struct super_block *sb; 829 struct buffer_head *bhs; 830 struct buffer_head **bh = NULL; 831 struct inode *inode; 832 char *data; 833 char *bitmap; 834 struct ext4_group_info *grinfo; 835 836 mb_debug(1, "init page %lu\n", page->index); 837 838 inode = page->mapping->host; 839 sb = inode->i_sb; 840 ngroups = ext4_get_groups_count(sb); 841 blocksize = 1 << inode->i_blkbits; 842 blocks_per_page = PAGE_CACHE_SIZE / blocksize; 843 844 groups_per_page = blocks_per_page >> 1; 845 if (groups_per_page == 0) 846 groups_per_page = 1; 847 848 /* allocate buffer_heads to read bitmaps */ 849 if (groups_per_page > 1) { 850 i = sizeof(struct buffer_head *) * groups_per_page; 851 bh = kzalloc(i, GFP_NOFS); 852 if (bh == NULL) { 853 err = -ENOMEM; 854 goto out; 855 } 856 } else 857 bh = &bhs; 858 859 first_group = page->index * blocks_per_page / 2; 860 861 /* read all groups the page covers into the cache */ 862 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 863 if (group >= ngroups) 864 break; 865 866 grinfo = ext4_get_group_info(sb, group); 867 /* 868 * If page is uptodate then we came here after online resize 869 * which added some new uninitialized group info structs, so 870 * we must skip all initialized uptodate buddies on the page, 871 * which may be currently in use by an allocating task. 872 */ 873 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 874 bh[i] = NULL; 875 continue; 876 } 877 bh[i] = ext4_read_block_bitmap_nowait(sb, group); 878 if (IS_ERR(bh[i])) { 879 err = PTR_ERR(bh[i]); 880 bh[i] = NULL; 881 goto out; 882 } 883 mb_debug(1, "read bitmap for group %u\n", group); 884 } 885 886 /* wait for I/O completion */ 887 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 888 int err2; 889 890 if (!bh[i]) 891 continue; 892 err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 893 if (!err) 894 err = err2; 895 } 896 897 first_block = page->index * blocks_per_page; 898 for (i = 0; i < blocks_per_page; i++) { 899 group = (first_block + i) >> 1; 900 if (group >= ngroups) 901 break; 902 903 if (!bh[group - first_group]) 904 /* skip initialized uptodate buddy */ 905 continue; 906 907 if (!buffer_verified(bh[group - first_group])) 908 /* Skip faulty bitmaps */ 909 continue; 910 err = 0; 911 912 /* 913 * data carry information regarding this 914 * particular group in the format specified 915 * above 916 * 917 */ 918 data = page_address(page) + (i * blocksize); 919 bitmap = bh[group - first_group]->b_data; 920 921 /* 922 * We place the buddy block and bitmap block 923 * close together 924 */ 925 if ((first_block + i) & 1) { 926 /* this is block of buddy */ 927 BUG_ON(incore == NULL); 928 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 929 group, page->index, i * blocksize); 930 trace_ext4_mb_buddy_bitmap_load(sb, group); 931 grinfo = ext4_get_group_info(sb, group); 932 grinfo->bb_fragments = 0; 933 memset(grinfo->bb_counters, 0, 934 sizeof(*grinfo->bb_counters) * 935 (sb->s_blocksize_bits+2)); 936 /* 937 * incore got set to the group block bitmap below 938 */ 939 ext4_lock_group(sb, group); 940 /* init the buddy */ 941 memset(data, 0xff, blocksize); 942 ext4_mb_generate_buddy(sb, data, incore, group); 943 ext4_unlock_group(sb, group); 944 incore = NULL; 945 } else { 946 /* this is block of bitmap */ 947 BUG_ON(incore != NULL); 948 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 949 group, page->index, i * blocksize); 950 trace_ext4_mb_bitmap_load(sb, group); 951 952 /* see comments in ext4_mb_put_pa() */ 953 ext4_lock_group(sb, group); 954 memcpy(data, bitmap, blocksize); 955 956 /* mark all preallocated blks used in in-core bitmap */ 957 ext4_mb_generate_from_pa(sb, data, group); 958 ext4_mb_generate_from_freelist(sb, data, group); 959 ext4_unlock_group(sb, group); 960 961 /* set incore so that the buddy information can be 962 * generated using this 963 */ 964 incore = data; 965 } 966 } 967 SetPageUptodate(page); 968 969out: 970 if (bh) { 971 for (i = 0; i < groups_per_page; i++) 972 brelse(bh[i]); 973 if (bh != &bhs) 974 kfree(bh); 975 } 976 return err; 977} 978 979/* 980 * Lock the buddy and bitmap pages. This make sure other parallel init_group 981 * on the same buddy page doesn't happen whild holding the buddy page lock. 982 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 983 * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 984 */ 985static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 986 ext4_group_t group, struct ext4_buddy *e4b) 987{ 988 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 989 int block, pnum, poff; 990 int blocks_per_page; 991 struct page *page; 992 993 e4b->bd_buddy_page = NULL; 994 e4b->bd_bitmap_page = NULL; 995 996 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 997 /* 998 * the buddy cache inode stores the block bitmap 999 * and buddy information in consecutive blocks. 1000 * So for each group we need two blocks. 1001 */ 1002 block = group * 2; 1003 pnum = block / blocks_per_page; 1004 poff = block % blocks_per_page; 1005 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1006 if (!page) 1007 return -ENOMEM; 1008 BUG_ON(page->mapping != inode->i_mapping); 1009 e4b->bd_bitmap_page = page; 1010 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1011 1012 if (blocks_per_page >= 2) { 1013 /* buddy and bitmap are on the same page */ 1014 return 0; 1015 } 1016 1017 block++; 1018 pnum = block / blocks_per_page; 1019 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1020 if (!page) 1021 return -ENOMEM; 1022 BUG_ON(page->mapping != inode->i_mapping); 1023 e4b->bd_buddy_page = page; 1024 return 0; 1025} 1026 1027static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 1028{ 1029 if (e4b->bd_bitmap_page) { 1030 unlock_page(e4b->bd_bitmap_page); 1031 page_cache_release(e4b->bd_bitmap_page); 1032 } 1033 if (e4b->bd_buddy_page) { 1034 unlock_page(e4b->bd_buddy_page); 1035 page_cache_release(e4b->bd_buddy_page); 1036 } 1037} 1038 1039/* 1040 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1041 * block group lock of all groups for this page; do not hold the BG lock when 1042 * calling this routine! 1043 */ 1044static noinline_for_stack 1045int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 1046{ 1047 1048 struct ext4_group_info *this_grp; 1049 struct ext4_buddy e4b; 1050 struct page *page; 1051 int ret = 0; 1052 1053 might_sleep(); 1054 mb_debug(1, "init group %u\n", group); 1055 this_grp = ext4_get_group_info(sb, group); 1056 /* 1057 * This ensures that we don't reinit the buddy cache 1058 * page which map to the group from which we are already 1059 * allocating. If we are looking at the buddy cache we would 1060 * have taken a reference using ext4_mb_load_buddy and that 1061 * would have pinned buddy page to page cache. 1062 * The call to ext4_mb_get_buddy_page_lock will mark the 1063 * page accessed. 1064 */ 1065 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); 1066 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1067 /* 1068 * somebody initialized the group 1069 * return without doing anything 1070 */ 1071 goto err; 1072 } 1073 1074 page = e4b.bd_bitmap_page; 1075 ret = ext4_mb_init_cache(page, NULL); 1076 if (ret) 1077 goto err; 1078 if (!PageUptodate(page)) { 1079 ret = -EIO; 1080 goto err; 1081 } 1082 1083 if (e4b.bd_buddy_page == NULL) { 1084 /* 1085 * If both the bitmap and buddy are in 1086 * the same page we don't need to force 1087 * init the buddy 1088 */ 1089 ret = 0; 1090 goto err; 1091 } 1092 /* init buddy cache */ 1093 page = e4b.bd_buddy_page; 1094 ret = ext4_mb_init_cache(page, e4b.bd_bitmap); 1095 if (ret) 1096 goto err; 1097 if (!PageUptodate(page)) { 1098 ret = -EIO; 1099 goto err; 1100 } 1101err: 1102 ext4_mb_put_buddy_page_lock(&e4b); 1103 return ret; 1104} 1105 1106/* 1107 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1108 * block group lock of all groups for this page; do not hold the BG lock when 1109 * calling this routine! 1110 */ 1111static noinline_for_stack int 1112ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1113 struct ext4_buddy *e4b) 1114{ 1115 int blocks_per_page; 1116 int block; 1117 int pnum; 1118 int poff; 1119 struct page *page; 1120 int ret; 1121 struct ext4_group_info *grp; 1122 struct ext4_sb_info *sbi = EXT4_SB(sb); 1123 struct inode *inode = sbi->s_buddy_cache; 1124 1125 might_sleep(); 1126 mb_debug(1, "load group %u\n", group); 1127 1128 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; 1129 grp = ext4_get_group_info(sb, group); 1130 1131 e4b->bd_blkbits = sb->s_blocksize_bits; 1132 e4b->bd_info = grp; 1133 e4b->bd_sb = sb; 1134 e4b->bd_group = group; 1135 e4b->bd_buddy_page = NULL; 1136 e4b->bd_bitmap_page = NULL; 1137 1138 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1139 /* 1140 * we need full data about the group 1141 * to make a good selection 1142 */ 1143 ret = ext4_mb_init_group(sb, group); 1144 if (ret) 1145 return ret; 1146 } 1147 1148 /* 1149 * the buddy cache inode stores the block bitmap 1150 * and buddy information in consecutive blocks. 1151 * So for each group we need two blocks. 1152 */ 1153 block = group * 2; 1154 pnum = block / blocks_per_page; 1155 poff = block % blocks_per_page; 1156 1157 /* we could use find_or_create_page(), but it locks page 1158 * what we'd like to avoid in fast path ... */ 1159 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1160 if (page == NULL || !PageUptodate(page)) { 1161 if (page) 1162 /* 1163 * drop the page reference and try 1164 * to get the page with lock. If we 1165 * are not uptodate that implies 1166 * somebody just created the page but 1167 * is yet to initialize the same. So 1168 * wait for it to initialize. 1169 */ 1170 page_cache_release(page); 1171 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1172 if (page) { 1173 BUG_ON(page->mapping != inode->i_mapping); 1174 if (!PageUptodate(page)) { 1175 ret = ext4_mb_init_cache(page, NULL); 1176 if (ret) { 1177 unlock_page(page); 1178 goto err; 1179 } 1180 mb_cmp_bitmaps(e4b, page_address(page) + 1181 (poff * sb->s_blocksize)); 1182 } 1183 unlock_page(page); 1184 } 1185 } 1186 if (page == NULL) { 1187 ret = -ENOMEM; 1188 goto err; 1189 } 1190 if (!PageUptodate(page)) { 1191 ret = -EIO; 1192 goto err; 1193 } 1194 1195 /* Pages marked accessed already */ 1196 e4b->bd_bitmap_page = page; 1197 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1198 1199 block++; 1200 pnum = block / blocks_per_page; 1201 poff = block % blocks_per_page; 1202 1203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1204 if (page == NULL || !PageUptodate(page)) { 1205 if (page) 1206 page_cache_release(page); 1207 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); 1208 if (page) { 1209 BUG_ON(page->mapping != inode->i_mapping); 1210 if (!PageUptodate(page)) { 1211 ret = ext4_mb_init_cache(page, e4b->bd_bitmap); 1212 if (ret) { 1213 unlock_page(page); 1214 goto err; 1215 } 1216 } 1217 unlock_page(page); 1218 } 1219 } 1220 if (page == NULL) { 1221 ret = -ENOMEM; 1222 goto err; 1223 } 1224 if (!PageUptodate(page)) { 1225 ret = -EIO; 1226 goto err; 1227 } 1228 1229 /* Pages marked accessed already */ 1230 e4b->bd_buddy_page = page; 1231 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1232 1233 BUG_ON(e4b->bd_bitmap_page == NULL); 1234 BUG_ON(e4b->bd_buddy_page == NULL); 1235 1236 return 0; 1237 1238err: 1239 if (page) 1240 page_cache_release(page); 1241 if (e4b->bd_bitmap_page) 1242 page_cache_release(e4b->bd_bitmap_page); 1243 if (e4b->bd_buddy_page) 1244 page_cache_release(e4b->bd_buddy_page); 1245 e4b->bd_buddy = NULL; 1246 e4b->bd_bitmap = NULL; 1247 return ret; 1248} 1249 1250static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1251{ 1252 if (e4b->bd_bitmap_page) 1253 page_cache_release(e4b->bd_bitmap_page); 1254 if (e4b->bd_buddy_page) 1255 page_cache_release(e4b->bd_buddy_page); 1256} 1257 1258 1259static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1260{ 1261 int order = 1; 1262 int bb_incr = 1 << (e4b->bd_blkbits - 1); 1263 void *bb; 1264 1265 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1266 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1267 1268 bb = e4b->bd_buddy; 1269 while (order <= e4b->bd_blkbits + 1) { 1270 block = block >> 1; 1271 if (!mb_test_bit(block, bb)) { 1272 /* this block is part of buddy of order 'order' */ 1273 return order; 1274 } 1275 bb += bb_incr; 1276 bb_incr >>= 1; 1277 order++; 1278 } 1279 return 0; 1280} 1281 1282static void mb_clear_bits(void *bm, int cur, int len) 1283{ 1284 __u32 *addr; 1285 1286 len = cur + len; 1287 while (cur < len) { 1288 if ((cur & 31) == 0 && (len - cur) >= 32) { 1289 /* fast path: clear whole word at once */ 1290 addr = bm + (cur >> 3); 1291 *addr = 0; 1292 cur += 32; 1293 continue; 1294 } 1295 mb_clear_bit(cur, bm); 1296 cur++; 1297 } 1298} 1299 1300/* clear bits in given range 1301 * will return first found zero bit if any, -1 otherwise 1302 */ 1303static int mb_test_and_clear_bits(void *bm, int cur, int len) 1304{ 1305 __u32 *addr; 1306 int zero_bit = -1; 1307 1308 len = cur + len; 1309 while (cur < len) { 1310 if ((cur & 31) == 0 && (len - cur) >= 32) { 1311 /* fast path: clear whole word at once */ 1312 addr = bm + (cur >> 3); 1313 if (*addr != (__u32)(-1) && zero_bit == -1) 1314 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1315 *addr = 0; 1316 cur += 32; 1317 continue; 1318 } 1319 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1320 zero_bit = cur; 1321 cur++; 1322 } 1323 1324 return zero_bit; 1325} 1326 1327void ext4_set_bits(void *bm, int cur, int len) 1328{ 1329 __u32 *addr; 1330 1331 len = cur + len; 1332 while (cur < len) { 1333 if ((cur & 31) == 0 && (len - cur) >= 32) { 1334 /* fast path: set whole word at once */ 1335 addr = bm + (cur >> 3); 1336 *addr = 0xffffffff; 1337 cur += 32; 1338 continue; 1339 } 1340 mb_set_bit(cur, bm); 1341 cur++; 1342 } 1343} 1344 1345/* 1346 * _________________________________________________________________ */ 1347 1348static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1349{ 1350 if (mb_test_bit(*bit + side, bitmap)) { 1351 mb_clear_bit(*bit, bitmap); 1352 (*bit) -= side; 1353 return 1; 1354 } 1355 else { 1356 (*bit) += side; 1357 mb_set_bit(*bit, bitmap); 1358 return -1; 1359 } 1360} 1361 1362static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1363{ 1364 int max; 1365 int order = 1; 1366 void *buddy = mb_find_buddy(e4b, order, &max); 1367 1368 while (buddy) { 1369 void *buddy2; 1370 1371 /* Bits in range [first; last] are known to be set since 1372 * corresponding blocks were allocated. Bits in range 1373 * (first; last) will stay set because they form buddies on 1374 * upper layer. We just deal with borders if they don't 1375 * align with upper layer and then go up. 1376 * Releasing entire group is all about clearing 1377 * single bit of highest order buddy. 1378 */ 1379 1380 /* Example: 1381 * --------------------------------- 1382 * | 1 | 1 | 1 | 1 | 1383 * --------------------------------- 1384 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1385 * --------------------------------- 1386 * 0 1 2 3 4 5 6 7 1387 * \_____________________/ 1388 * 1389 * Neither [1] nor [6] is aligned to above layer. 1390 * Left neighbour [0] is free, so mark it busy, 1391 * decrease bb_counters and extend range to 1392 * [0; 6] 1393 * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1394 * mark [6] free, increase bb_counters and shrink range to 1395 * [0; 5]. 1396 * Then shift range to [0; 2], go up and do the same. 1397 */ 1398 1399 1400 if (first & 1) 1401 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1402 if (!(last & 1)) 1403 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1404 if (first > last) 1405 break; 1406 order++; 1407 1408 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1409 mb_clear_bits(buddy, first, last - first + 1); 1410 e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1411 break; 1412 } 1413 first >>= 1; 1414 last >>= 1; 1415 buddy = buddy2; 1416 } 1417} 1418 1419static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1420 int first, int count) 1421{ 1422 int left_is_free = 0; 1423 int right_is_free = 0; 1424 int block; 1425 int last = first + count - 1; 1426 struct super_block *sb = e4b->bd_sb; 1427 1428 if (WARN_ON(count == 0)) 1429 return; 1430 BUG_ON(last >= (sb->s_blocksize << 3)); 1431 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1432 /* Don't bother if the block group is corrupt. */ 1433 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1434 return; 1435 1436 mb_check_buddy(e4b); 1437 mb_free_blocks_double(inode, e4b, first, count); 1438 1439 e4b->bd_info->bb_free += count; 1440 if (first < e4b->bd_info->bb_first_free) 1441 e4b->bd_info->bb_first_free = first; 1442 1443 /* access memory sequentially: check left neighbour, 1444 * clear range and then check right neighbour 1445 */ 1446 if (first != 0) 1447 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1448 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1449 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1450 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1451 1452 if (unlikely(block != -1)) { 1453 struct ext4_sb_info *sbi = EXT4_SB(sb); 1454 ext4_fsblk_t blocknr; 1455 1456 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1457 blocknr += EXT4_C2B(EXT4_SB(sb), block); 1458 ext4_grp_locked_error(sb, e4b->bd_group, 1459 inode ? inode->i_ino : 0, 1460 blocknr, 1461 "freeing already freed block " 1462 "(bit %u); block bitmap corrupt.", 1463 block); 1464 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)) 1465 percpu_counter_sub(&sbi->s_freeclusters_counter, 1466 e4b->bd_info->bb_free); 1467 /* Mark the block group as corrupt. */ 1468 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1469 &e4b->bd_info->bb_state); 1470 mb_regenerate_buddy(e4b); 1471 goto done; 1472 } 1473 1474 /* let's maintain fragments counter */ 1475 if (left_is_free && right_is_free) 1476 e4b->bd_info->bb_fragments--; 1477 else if (!left_is_free && !right_is_free) 1478 e4b->bd_info->bb_fragments++; 1479 1480 /* buddy[0] == bd_bitmap is a special case, so handle 1481 * it right away and let mb_buddy_mark_free stay free of 1482 * zero order checks. 1483 * Check if neighbours are to be coaleasced, 1484 * adjust bitmap bb_counters and borders appropriately. 1485 */ 1486 if (first & 1) { 1487 first += !left_is_free; 1488 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1489 } 1490 if (!(last & 1)) { 1491 last -= !right_is_free; 1492 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1493 } 1494 1495 if (first <= last) 1496 mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1497 1498done: 1499 mb_set_largest_free_order(sb, e4b->bd_info); 1500 mb_check_buddy(e4b); 1501} 1502 1503static int mb_find_extent(struct ext4_buddy *e4b, int block, 1504 int needed, struct ext4_free_extent *ex) 1505{ 1506 int next = block; 1507 int max, order; 1508 void *buddy; 1509 1510 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1511 BUG_ON(ex == NULL); 1512 1513 buddy = mb_find_buddy(e4b, 0, &max); 1514 BUG_ON(buddy == NULL); 1515 BUG_ON(block >= max); 1516 if (mb_test_bit(block, buddy)) { 1517 ex->fe_len = 0; 1518 ex->fe_start = 0; 1519 ex->fe_group = 0; 1520 return 0; 1521 } 1522 1523 /* find actual order */ 1524 order = mb_find_order_for_block(e4b, block); 1525 block = block >> order; 1526 1527 ex->fe_len = 1 << order; 1528 ex->fe_start = block << order; 1529 ex->fe_group = e4b->bd_group; 1530 1531 /* calc difference from given start */ 1532 next = next - ex->fe_start; 1533 ex->fe_len -= next; 1534 ex->fe_start += next; 1535 1536 while (needed > ex->fe_len && 1537 mb_find_buddy(e4b, order, &max)) { 1538 1539 if (block + 1 >= max) 1540 break; 1541 1542 next = (block + 1) * (1 << order); 1543 if (mb_test_bit(next, e4b->bd_bitmap)) 1544 break; 1545 1546 order = mb_find_order_for_block(e4b, next); 1547 1548 block = next >> order; 1549 ex->fe_len += 1 << order; 1550 } 1551 1552 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); 1553 return ex->fe_len; 1554} 1555 1556static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1557{ 1558 int ord; 1559 int mlen = 0; 1560 int max = 0; 1561 int cur; 1562 int start = ex->fe_start; 1563 int len = ex->fe_len; 1564 unsigned ret = 0; 1565 int len0 = len; 1566 void *buddy; 1567 1568 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1569 BUG_ON(e4b->bd_group != ex->fe_group); 1570 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1571 mb_check_buddy(e4b); 1572 mb_mark_used_double(e4b, start, len); 1573 1574 e4b->bd_info->bb_free -= len; 1575 if (e4b->bd_info->bb_first_free == start) 1576 e4b->bd_info->bb_first_free += len; 1577 1578 /* let's maintain fragments counter */ 1579 if (start != 0) 1580 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1581 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1582 max = !mb_test_bit(start + len, e4b->bd_bitmap); 1583 if (mlen && max) 1584 e4b->bd_info->bb_fragments++; 1585 else if (!mlen && !max) 1586 e4b->bd_info->bb_fragments--; 1587 1588 /* let's maintain buddy itself */ 1589 while (len) { 1590 ord = mb_find_order_for_block(e4b, start); 1591 1592 if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1593 /* the whole chunk may be allocated at once! */ 1594 mlen = 1 << ord; 1595 buddy = mb_find_buddy(e4b, ord, &max); 1596 BUG_ON((start >> ord) >= max); 1597 mb_set_bit(start >> ord, buddy); 1598 e4b->bd_info->bb_counters[ord]--; 1599 start += mlen; 1600 len -= mlen; 1601 BUG_ON(len < 0); 1602 continue; 1603 } 1604 1605 /* store for history */ 1606 if (ret == 0) 1607 ret = len | (ord << 16); 1608 1609 /* we have to split large buddy */ 1610 BUG_ON(ord <= 0); 1611 buddy = mb_find_buddy(e4b, ord, &max); 1612 mb_set_bit(start >> ord, buddy); 1613 e4b->bd_info->bb_counters[ord]--; 1614 1615 ord--; 1616 cur = (start >> ord) & ~1U; 1617 buddy = mb_find_buddy(e4b, ord, &max); 1618 mb_clear_bit(cur, buddy); 1619 mb_clear_bit(cur + 1, buddy); 1620 e4b->bd_info->bb_counters[ord]++; 1621 e4b->bd_info->bb_counters[ord]++; 1622 } 1623 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1624 1625 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1626 mb_check_buddy(e4b); 1627 1628 return ret; 1629} 1630 1631/* 1632 * Must be called under group lock! 1633 */ 1634static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1635 struct ext4_buddy *e4b) 1636{ 1637 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1638 int ret; 1639 1640 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1641 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1642 1643 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1644 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1645 ret = mb_mark_used(e4b, &ac->ac_b_ex); 1646 1647 /* preallocation can change ac_b_ex, thus we store actually 1648 * allocated blocks for history */ 1649 ac->ac_f_ex = ac->ac_b_ex; 1650 1651 ac->ac_status = AC_STATUS_FOUND; 1652 ac->ac_tail = ret & 0xffff; 1653 ac->ac_buddy = ret >> 16; 1654 1655 /* 1656 * take the page reference. We want the page to be pinned 1657 * so that we don't get a ext4_mb_init_cache_call for this 1658 * group until we update the bitmap. That would mean we 1659 * double allocate blocks. The reference is dropped 1660 * in ext4_mb_release_context 1661 */ 1662 ac->ac_bitmap_page = e4b->bd_bitmap_page; 1663 get_page(ac->ac_bitmap_page); 1664 ac->ac_buddy_page = e4b->bd_buddy_page; 1665 get_page(ac->ac_buddy_page); 1666 /* store last allocated for subsequent stream allocation */ 1667 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1668 spin_lock(&sbi->s_md_lock); 1669 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 1670 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 1671 spin_unlock(&sbi->s_md_lock); 1672 } 1673} 1674 1675/* 1676 * regular allocator, for general purposes allocation 1677 */ 1678 1679static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 1680 struct ext4_buddy *e4b, 1681 int finish_group) 1682{ 1683 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1684 struct ext4_free_extent *bex = &ac->ac_b_ex; 1685 struct ext4_free_extent *gex = &ac->ac_g_ex; 1686 struct ext4_free_extent ex; 1687 int max; 1688 1689 if (ac->ac_status == AC_STATUS_FOUND) 1690 return; 1691 /* 1692 * We don't want to scan for a whole year 1693 */ 1694 if (ac->ac_found > sbi->s_mb_max_to_scan && 1695 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1696 ac->ac_status = AC_STATUS_BREAK; 1697 return; 1698 } 1699 1700 /* 1701 * Haven't found good chunk so far, let's continue 1702 */ 1703 if (bex->fe_len < gex->fe_len) 1704 return; 1705 1706 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 1707 && bex->fe_group == e4b->bd_group) { 1708 /* recheck chunk's availability - we don't know 1709 * when it was found (within this lock-unlock 1710 * period or not) */ 1711 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 1712 if (max >= gex->fe_len) { 1713 ext4_mb_use_best_found(ac, e4b); 1714 return; 1715 } 1716 } 1717} 1718 1719/* 1720 * The routine checks whether found extent is good enough. If it is, 1721 * then the extent gets marked used and flag is set to the context 1722 * to stop scanning. Otherwise, the extent is compared with the 1723 * previous found extent and if new one is better, then it's stored 1724 * in the context. Later, the best found extent will be used, if 1725 * mballoc can't find good enough extent. 1726 * 1727 * FIXME: real allocation policy is to be designed yet! 1728 */ 1729static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 1730 struct ext4_free_extent *ex, 1731 struct ext4_buddy *e4b) 1732{ 1733 struct ext4_free_extent *bex = &ac->ac_b_ex; 1734 struct ext4_free_extent *gex = &ac->ac_g_ex; 1735 1736 BUG_ON(ex->fe_len <= 0); 1737 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1738 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 1739 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 1740 1741 ac->ac_found++; 1742 1743 /* 1744 * The special case - take what you catch first 1745 */ 1746 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 1747 *bex = *ex; 1748 ext4_mb_use_best_found(ac, e4b); 1749 return; 1750 } 1751 1752 /* 1753 * Let's check whether the chuck is good enough 1754 */ 1755 if (ex->fe_len == gex->fe_len) { 1756 *bex = *ex; 1757 ext4_mb_use_best_found(ac, e4b); 1758 return; 1759 } 1760 1761 /* 1762 * If this is first found extent, just store it in the context 1763 */ 1764 if (bex->fe_len == 0) { 1765 *bex = *ex; 1766 return; 1767 } 1768 1769 /* 1770 * If new found extent is better, store it in the context 1771 */ 1772 if (bex->fe_len < gex->fe_len) { 1773 /* if the request isn't satisfied, any found extent 1774 * larger than previous best one is better */ 1775 if (ex->fe_len > bex->fe_len) 1776 *bex = *ex; 1777 } else if (ex->fe_len > gex->fe_len) { 1778 /* if the request is satisfied, then we try to find 1779 * an extent that still satisfy the request, but is 1780 * smaller than previous one */ 1781 if (ex->fe_len < bex->fe_len) 1782 *bex = *ex; 1783 } 1784 1785 ext4_mb_check_limits(ac, e4b, 0); 1786} 1787 1788static noinline_for_stack 1789int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 1790 struct ext4_buddy *e4b) 1791{ 1792 struct ext4_free_extent ex = ac->ac_b_ex; 1793 ext4_group_t group = ex.fe_group; 1794 int max; 1795 int err; 1796 1797 BUG_ON(ex.fe_len <= 0); 1798 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1799 if (err) 1800 return err; 1801 1802 ext4_lock_group(ac->ac_sb, group); 1803 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 1804 1805 if (max > 0) { 1806 ac->ac_b_ex = ex; 1807 ext4_mb_use_best_found(ac, e4b); 1808 } 1809 1810 ext4_unlock_group(ac->ac_sb, group); 1811 ext4_mb_unload_buddy(e4b); 1812 1813 return 0; 1814} 1815 1816static noinline_for_stack 1817int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 1818 struct ext4_buddy *e4b) 1819{ 1820 ext4_group_t group = ac->ac_g_ex.fe_group; 1821 int max; 1822 int err; 1823 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1824 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1825 struct ext4_free_extent ex; 1826 1827 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 1828 return 0; 1829 if (grp->bb_free == 0) 1830 return 0; 1831 1832 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 1833 if (err) 1834 return err; 1835 1836 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 1837 ext4_mb_unload_buddy(e4b); 1838 return 0; 1839 } 1840 1841 ext4_lock_group(ac->ac_sb, group); 1842 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 1843 ac->ac_g_ex.fe_len, &ex); 1844 ex.fe_logical = 0xDEADFA11; /* debug value */ 1845 1846 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 1847 ext4_fsblk_t start; 1848 1849 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 1850 ex.fe_start; 1851 /* use do_div to get remainder (would be 64-bit modulo) */ 1852 if (do_div(start, sbi->s_stripe) == 0) { 1853 ac->ac_found++; 1854 ac->ac_b_ex = ex; 1855 ext4_mb_use_best_found(ac, e4b); 1856 } 1857 } else if (max >= ac->ac_g_ex.fe_len) { 1858 BUG_ON(ex.fe_len <= 0); 1859 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1860 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1861 ac->ac_found++; 1862 ac->ac_b_ex = ex; 1863 ext4_mb_use_best_found(ac, e4b); 1864 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 1865 /* Sometimes, caller may want to merge even small 1866 * number of blocks to an existing extent */ 1867 BUG_ON(ex.fe_len <= 0); 1868 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 1869 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 1870 ac->ac_found++; 1871 ac->ac_b_ex = ex; 1872 ext4_mb_use_best_found(ac, e4b); 1873 } 1874 ext4_unlock_group(ac->ac_sb, group); 1875 ext4_mb_unload_buddy(e4b); 1876 1877 return 0; 1878} 1879 1880/* 1881 * The routine scans buddy structures (not bitmap!) from given order 1882 * to max order and tries to find big enough chunk to satisfy the req 1883 */ 1884static noinline_for_stack 1885void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 1886 struct ext4_buddy *e4b) 1887{ 1888 struct super_block *sb = ac->ac_sb; 1889 struct ext4_group_info *grp = e4b->bd_info; 1890 void *buddy; 1891 int i; 1892 int k; 1893 int max; 1894 1895 BUG_ON(ac->ac_2order <= 0); 1896 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { 1897 if (grp->bb_counters[i] == 0) 1898 continue; 1899 1900 buddy = mb_find_buddy(e4b, i, &max); 1901 BUG_ON(buddy == NULL); 1902 1903 k = mb_find_next_zero_bit(buddy, max, 0); 1904 BUG_ON(k >= max); 1905 1906 ac->ac_found++; 1907 1908 ac->ac_b_ex.fe_len = 1 << i; 1909 ac->ac_b_ex.fe_start = k << i; 1910 ac->ac_b_ex.fe_group = e4b->bd_group; 1911 1912 ext4_mb_use_best_found(ac, e4b); 1913 1914 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); 1915 1916 if (EXT4_SB(sb)->s_mb_stats) 1917 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 1918 1919 break; 1920 } 1921} 1922 1923/* 1924 * The routine scans the group and measures all found extents. 1925 * In order to optimize scanning, caller must pass number of 1926 * free blocks in the group, so the routine can know upper limit. 1927 */ 1928static noinline_for_stack 1929void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 1930 struct ext4_buddy *e4b) 1931{ 1932 struct super_block *sb = ac->ac_sb; 1933 void *bitmap = e4b->bd_bitmap; 1934 struct ext4_free_extent ex; 1935 int i; 1936 int free; 1937 1938 free = e4b->bd_info->bb_free; 1939 BUG_ON(free <= 0); 1940 1941 i = e4b->bd_info->bb_first_free; 1942 1943 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1944 i = mb_find_next_zero_bit(bitmap, 1945 EXT4_CLUSTERS_PER_GROUP(sb), i); 1946 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 1947 /* 1948 * IF we have corrupt bitmap, we won't find any 1949 * free blocks even though group info says we 1950 * we have free blocks 1951 */ 1952 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1953 "%d free clusters as per " 1954 "group info. But bitmap says 0", 1955 free); 1956 break; 1957 } 1958 1959 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 1960 BUG_ON(ex.fe_len <= 0); 1961 if (free < ex.fe_len) { 1962 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 1963 "%d free clusters as per " 1964 "group info. But got %d blocks", 1965 free, ex.fe_len); 1966 /* 1967 * The number of free blocks differs. This mostly 1968 * indicate that the bitmap is corrupt. So exit 1969 * without claiming the space. 1970 */ 1971 break; 1972 } 1973 ex.fe_logical = 0xDEADC0DE; /* debug value */ 1974 ext4_mb_measure_extent(ac, &ex, e4b); 1975 1976 i += ex.fe_len; 1977 free -= ex.fe_len; 1978 } 1979 1980 ext4_mb_check_limits(ac, e4b, 1); 1981} 1982 1983/* 1984 * This is a special case for storages like raid5 1985 * we try to find stripe-aligned chunks for stripe-size-multiple requests 1986 */ 1987static noinline_for_stack 1988void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 1989 struct ext4_buddy *e4b) 1990{ 1991 struct super_block *sb = ac->ac_sb; 1992 struct ext4_sb_info *sbi = EXT4_SB(sb); 1993 void *bitmap = e4b->bd_bitmap; 1994 struct ext4_free_extent ex; 1995 ext4_fsblk_t first_group_block; 1996 ext4_fsblk_t a; 1997 ext4_grpblk_t i; 1998 int max; 1999 2000 BUG_ON(sbi->s_stripe == 0); 2001 2002 /* find first stripe-aligned block in group */ 2003 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 2004 2005 a = first_group_block + sbi->s_stripe - 1; 2006 do_div(a, sbi->s_stripe); 2007 i = (a * sbi->s_stripe) - first_group_block; 2008 2009 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2010 if (!mb_test_bit(i, bitmap)) { 2011 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2012 if (max >= sbi->s_stripe) { 2013 ac->ac_found++; 2014 ex.fe_logical = 0xDEADF00D; /* debug value */ 2015 ac->ac_b_ex = ex; 2016 ext4_mb_use_best_found(ac, e4b); 2017 break; 2018 } 2019 } 2020 i += sbi->s_stripe; 2021 } 2022} 2023 2024/* 2025 * This is now called BEFORE we load the buddy bitmap. 2026 * Returns either 1 or 0 indicating that the group is either suitable 2027 * for the allocation or not. In addition it can also return negative 2028 * error code when something goes wrong. 2029 */ 2030static int ext4_mb_good_group(struct ext4_allocation_context *ac, 2031 ext4_group_t group, int cr) 2032{ 2033 unsigned free, fragments; 2034 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2035 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2036 2037 BUG_ON(cr < 0 || cr >= 4); 2038 2039 free = grp->bb_free; 2040 if (free == 0) 2041 return 0; 2042 if (cr <= 2 && free < ac->ac_g_ex.fe_len) 2043 return 0; 2044 2045 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 2046 return 0; 2047 2048 /* We only do this if the grp has never been initialized */ 2049 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2050 int ret = ext4_mb_init_group(ac->ac_sb, group); 2051 if (ret) 2052 return ret; 2053 } 2054 2055 fragments = grp->bb_fragments; 2056 if (fragments == 0) 2057 return 0; 2058 2059 switch (cr) { 2060 case 0: 2061 BUG_ON(ac->ac_2order == 0); 2062 2063 /* Avoid using the first bg of a flexgroup for data files */ 2064 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2065 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2066 ((group % flex_size) == 0)) 2067 return 0; 2068 2069 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || 2070 (free / fragments) >= ac->ac_g_ex.fe_len) 2071 return 1; 2072 2073 if (grp->bb_largest_free_order < ac->ac_2order) 2074 return 0; 2075 2076 return 1; 2077 case 1: 2078 if ((free / fragments) >= ac->ac_g_ex.fe_len) 2079 return 1; 2080 break; 2081 case 2: 2082 if (free >= ac->ac_g_ex.fe_len) 2083 return 1; 2084 break; 2085 case 3: 2086 return 1; 2087 default: 2088 BUG(); 2089 } 2090 2091 return 0; 2092} 2093 2094static noinline_for_stack int 2095ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2096{ 2097 ext4_group_t ngroups, group, i; 2098 int cr; 2099 int err = 0, first_err = 0; 2100 struct ext4_sb_info *sbi; 2101 struct super_block *sb; 2102 struct ext4_buddy e4b; 2103 2104 sb = ac->ac_sb; 2105 sbi = EXT4_SB(sb); 2106 ngroups = ext4_get_groups_count(sb); 2107 /* non-extent files are limited to low blocks/groups */ 2108 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2109 ngroups = sbi->s_blockfile_groups; 2110 2111 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2112 2113 /* first, try the goal */ 2114 err = ext4_mb_find_by_goal(ac, &e4b); 2115 if (err || ac->ac_status == AC_STATUS_FOUND) 2116 goto out; 2117 2118 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2119 goto out; 2120 2121 /* 2122 * ac->ac2_order is set only if the fe_len is a power of 2 2123 * if ac2_order is set we also set criteria to 0 so that we 2124 * try exact allocation using buddy. 2125 */ 2126 i = fls(ac->ac_g_ex.fe_len); 2127 ac->ac_2order = 0; 2128 /* 2129 * We search using buddy data only if the order of the request 2130 * is greater than equal to the sbi_s_mb_order2_reqs 2131 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2132 */ 2133 if (i >= sbi->s_mb_order2_reqs) { 2134 /* 2135 * This should tell if fe_len is exactly power of 2 2136 */ 2137 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 2138 ac->ac_2order = i - 1; 2139 } 2140 2141 /* if stream allocation is enabled, use global goal */ 2142 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2143 /* TBD: may be hot point */ 2144 spin_lock(&sbi->s_md_lock); 2145 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2146 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2147 spin_unlock(&sbi->s_md_lock); 2148 } 2149 2150 /* Let's just scan groups to find more-less suitable blocks */ 2151 cr = ac->ac_2order ? 0 : 1; 2152 /* 2153 * cr == 0 try to get exact allocation, 2154 * cr == 3 try to get anything 2155 */ 2156repeat: 2157 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2158 ac->ac_criteria = cr; 2159 /* 2160 * searching for the right group start 2161 * from the goal value specified 2162 */ 2163 group = ac->ac_g_ex.fe_group; 2164 2165 for (i = 0; i < ngroups; group++, i++) { 2166 int ret = 0; 2167 cond_resched(); 2168 /* 2169 * Artificially restricted ngroups for non-extent 2170 * files makes group > ngroups possible on first loop. 2171 */ 2172 if (group >= ngroups) 2173 group = 0; 2174 2175 /* This now checks without needing the buddy page */ 2176 ret = ext4_mb_good_group(ac, group, cr); 2177 if (ret <= 0) { 2178 if (!first_err) 2179 first_err = ret; 2180 continue; 2181 } 2182 2183 err = ext4_mb_load_buddy(sb, group, &e4b); 2184 if (err) 2185 goto out; 2186 2187 ext4_lock_group(sb, group); 2188 2189 /* 2190 * We need to check again after locking the 2191 * block group 2192 */ 2193 ret = ext4_mb_good_group(ac, group, cr); 2194 if (ret <= 0) { 2195 ext4_unlock_group(sb, group); 2196 ext4_mb_unload_buddy(&e4b); 2197 if (!first_err) 2198 first_err = ret; 2199 continue; 2200 } 2201 2202 ac->ac_groups_scanned++; 2203 if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) 2204 ext4_mb_simple_scan_group(ac, &e4b); 2205 else if (cr == 1 && sbi->s_stripe && 2206 !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2207 ext4_mb_scan_aligned(ac, &e4b); 2208 else 2209 ext4_mb_complex_scan_group(ac, &e4b); 2210 2211 ext4_unlock_group(sb, group); 2212 ext4_mb_unload_buddy(&e4b); 2213 2214 if (ac->ac_status != AC_STATUS_CONTINUE) 2215 break; 2216 } 2217 } 2218 2219 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2220 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2221 /* 2222 * We've been searching too long. Let's try to allocate 2223 * the best chunk we've found so far 2224 */ 2225 2226 ext4_mb_try_best_found(ac, &e4b); 2227 if (ac->ac_status != AC_STATUS_FOUND) { 2228 /* 2229 * Someone more lucky has already allocated it. 2230 * The only thing we can do is just take first 2231 * found block(s) 2232 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); 2233 */ 2234 ac->ac_b_ex.fe_group = 0; 2235 ac->ac_b_ex.fe_start = 0; 2236 ac->ac_b_ex.fe_len = 0; 2237 ac->ac_status = AC_STATUS_CONTINUE; 2238 ac->ac_flags |= EXT4_MB_HINT_FIRST; 2239 cr = 3; 2240 atomic_inc(&sbi->s_mb_lost_chunks); 2241 goto repeat; 2242 } 2243 } 2244out: 2245 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 2246 err = first_err; 2247 return err; 2248} 2249 2250static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2251{ 2252 struct super_block *sb = seq->private; 2253 ext4_group_t group; 2254 2255 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2256 return NULL; 2257 group = *pos + 1; 2258 return (void *) ((unsigned long) group); 2259} 2260 2261static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2262{ 2263 struct super_block *sb = seq->private; 2264 ext4_group_t group; 2265 2266 ++*pos; 2267 if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2268 return NULL; 2269 group = *pos + 1; 2270 return (void *) ((unsigned long) group); 2271} 2272 2273static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2274{ 2275 struct super_block *sb = seq->private; 2276 ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2277 int i; 2278 int err, buddy_loaded = 0; 2279 struct ext4_buddy e4b; 2280 struct ext4_group_info *grinfo; 2281 struct sg { 2282 struct ext4_group_info info; 2283 ext4_grpblk_t counters[16]; 2284 } sg; 2285 2286 group--; 2287 if (group == 0) 2288 seq_puts(seq, "#group: free frags first [" 2289 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2290 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]"); 2291 2292 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2293 sizeof(struct ext4_group_info); 2294 grinfo = ext4_get_group_info(sb, group); 2295 /* Load the group info in memory only if not already loaded. */ 2296 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2297 err = ext4_mb_load_buddy(sb, group, &e4b); 2298 if (err) { 2299 seq_printf(seq, "#%-5u: I/O error\n", group); 2300 return 0; 2301 } 2302 buddy_loaded = 1; 2303 } 2304 2305 memcpy(&sg, ext4_get_group_info(sb, group), i); 2306 2307 if (buddy_loaded) 2308 ext4_mb_unload_buddy(&e4b); 2309 2310 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2311 sg.info.bb_fragments, sg.info.bb_first_free); 2312 for (i = 0; i <= 13; i++) 2313 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? 2314 sg.info.bb_counters[i] : 0); 2315 seq_printf(seq, " ]\n"); 2316 2317 return 0; 2318} 2319 2320static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2321{ 2322} 2323 2324static const struct seq_operations ext4_mb_seq_groups_ops = { 2325 .start = ext4_mb_seq_groups_start, 2326 .next = ext4_mb_seq_groups_next, 2327 .stop = ext4_mb_seq_groups_stop, 2328 .show = ext4_mb_seq_groups_show, 2329}; 2330 2331static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) 2332{ 2333 struct super_block *sb = PDE_DATA(inode); 2334 int rc; 2335 2336 rc = seq_open(file, &ext4_mb_seq_groups_ops); 2337 if (rc == 0) { 2338 struct seq_file *m = file->private_data; 2339 m->private = sb; 2340 } 2341 return rc; 2342 2343} 2344 2345const struct file_operations ext4_seq_mb_groups_fops = { 2346 .owner = THIS_MODULE, 2347 .open = ext4_mb_seq_groups_open, 2348 .read = seq_read, 2349 .llseek = seq_lseek, 2350 .release = seq_release, 2351}; 2352 2353static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 2354{ 2355 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2356 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 2357 2358 BUG_ON(!cachep); 2359 return cachep; 2360} 2361 2362/* 2363 * Allocate the top-level s_group_info array for the specified number 2364 * of groups 2365 */ 2366int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 2367{ 2368 struct ext4_sb_info *sbi = EXT4_SB(sb); 2369 unsigned size; 2370 struct ext4_group_info ***new_groupinfo; 2371 2372 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 2373 EXT4_DESC_PER_BLOCK_BITS(sb); 2374 if (size <= sbi->s_group_info_size) 2375 return 0; 2376 2377 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 2378 new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); 2379 if (!new_groupinfo) { 2380 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 2381 return -ENOMEM; 2382 } 2383 if (sbi->s_group_info) { 2384 memcpy(new_groupinfo, sbi->s_group_info, 2385 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2386 kvfree(sbi->s_group_info); 2387 } 2388 sbi->s_group_info = new_groupinfo; 2389 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2390 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 2391 sbi->s_group_info_size); 2392 return 0; 2393} 2394 2395/* Create and initialize ext4_group_info data for the given group. */ 2396int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 2397 struct ext4_group_desc *desc) 2398{ 2399 int i; 2400 int metalen = 0; 2401 struct ext4_sb_info *sbi = EXT4_SB(sb); 2402 struct ext4_group_info **meta_group_info; 2403 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2404 2405 /* 2406 * First check if this group is the first of a reserved block. 2407 * If it's true, we have to allocate a new table of pointers 2408 * to ext4_group_info structures 2409 */ 2410 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2411 metalen = sizeof(*meta_group_info) << 2412 EXT4_DESC_PER_BLOCK_BITS(sb); 2413 meta_group_info = kmalloc(metalen, GFP_NOFS); 2414 if (meta_group_info == NULL) { 2415 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2416 "for a buddy group"); 2417 goto exit_meta_group_info; 2418 } 2419 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = 2420 meta_group_info; 2421 } 2422 2423 meta_group_info = 2424 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2425 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2426 2427 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 2428 if (meta_group_info[i] == NULL) { 2429 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2430 goto exit_group_info; 2431 } 2432 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 2433 &(meta_group_info[i]->bb_state)); 2434 2435 /* 2436 * initialize bb_free to be able to skip 2437 * empty groups without initialization 2438 */ 2439 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2440 meta_group_info[i]->bb_free = 2441 ext4_free_clusters_after_init(sb, group, desc); 2442 } else { 2443 meta_group_info[i]->bb_free = 2444 ext4_free_group_clusters(sb, desc); 2445 } 2446 2447 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2448 init_rwsem(&meta_group_info[i]->alloc_sem); 2449 meta_group_info[i]->bb_free_root = RB_ROOT; 2450 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 2451 2452#ifdef DOUBLE_CHECK 2453 { 2454 struct buffer_head *bh; 2455 meta_group_info[i]->bb_bitmap = 2456 kmalloc(sb->s_blocksize, GFP_NOFS); 2457 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2458 bh = ext4_read_block_bitmap(sb, group); 2459 BUG_ON(IS_ERR_OR_NULL(bh)); 2460 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, 2461 sb->s_blocksize); 2462 put_bh(bh); 2463 } 2464#endif 2465 2466 return 0; 2467 2468exit_group_info: 2469 /* If a meta_group_info table has been allocated, release it now */ 2470 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2471 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); 2472 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; 2473 } 2474exit_meta_group_info: 2475 return -ENOMEM; 2476} /* ext4_mb_add_groupinfo */ 2477 2478static int ext4_mb_init_backend(struct super_block *sb) 2479{ 2480 ext4_group_t ngroups = ext4_get_groups_count(sb); 2481 ext4_group_t i; 2482 struct ext4_sb_info *sbi = EXT4_SB(sb); 2483 int err; 2484 struct ext4_group_desc *desc; 2485 struct kmem_cache *cachep; 2486 2487 err = ext4_mb_alloc_groupinfo(sb, ngroups); 2488 if (err) 2489 return err; 2490 2491 sbi->s_buddy_cache = new_inode(sb); 2492 if (sbi->s_buddy_cache == NULL) { 2493 ext4_msg(sb, KERN_ERR, "can't get new inode"); 2494 goto err_freesgi; 2495 } 2496 /* To avoid potentially colliding with an valid on-disk inode number, 2497 * use EXT4_BAD_INO for the buddy cache inode number. This inode is 2498 * not in the inode hash, so it should never be found by iget(), but 2499 * this will avoid confusion if it ever shows up during debugging. */ 2500 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 2501 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 2502 for (i = 0; i < ngroups; i++) { 2503 desc = ext4_get_group_desc(sb, i, NULL); 2504 if (desc == NULL) { 2505 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 2506 goto err_freebuddy; 2507 } 2508 if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 2509 goto err_freebuddy; 2510 } 2511 2512 return 0; 2513 2514err_freebuddy: 2515 cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2516 while (i-- > 0) 2517 kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 2518 i = sbi->s_group_info_size; 2519 while (i-- > 0) 2520 kfree(sbi->s_group_info[i]); 2521 iput(sbi->s_buddy_cache); 2522err_freesgi: 2523 kvfree(sbi->s_group_info); 2524 return -ENOMEM; 2525} 2526 2527static void ext4_groupinfo_destroy_slabs(void) 2528{ 2529 int i; 2530 2531 for (i = 0; i < NR_GRPINFO_CACHES; i++) { 2532 if (ext4_groupinfo_caches[i]) 2533 kmem_cache_destroy(ext4_groupinfo_caches[i]); 2534 ext4_groupinfo_caches[i] = NULL; 2535 } 2536} 2537 2538static int ext4_groupinfo_create_slab(size_t size) 2539{ 2540 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 2541 int slab_size; 2542 int blocksize_bits = order_base_2(size); 2543 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 2544 struct kmem_cache *cachep; 2545 2546 if (cache_index >= NR_GRPINFO_CACHES) 2547 return -EINVAL; 2548 2549 if (unlikely(cache_index < 0)) 2550 cache_index = 0; 2551 2552 mutex_lock(&ext4_grpinfo_slab_create_mutex); 2553 if (ext4_groupinfo_caches[cache_index]) { 2554 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2555 return 0; /* Already created */ 2556 } 2557 2558 slab_size = offsetof(struct ext4_group_info, 2559 bb_counters[blocksize_bits + 2]); 2560 2561 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 2562 slab_size, 0, SLAB_RECLAIM_ACCOUNT, 2563 NULL); 2564 2565 ext4_groupinfo_caches[cache_index] = cachep; 2566 2567 mutex_unlock(&ext4_grpinfo_slab_create_mutex); 2568 if (!cachep) { 2569 printk(KERN_EMERG 2570 "EXT4-fs: no memory for groupinfo slab cache\n"); 2571 return -ENOMEM; 2572 } 2573 2574 return 0; 2575} 2576 2577int ext4_mb_init(struct super_block *sb) 2578{ 2579 struct ext4_sb_info *sbi = EXT4_SB(sb); 2580 unsigned i, j; 2581 unsigned offset, offset_incr; 2582 unsigned max; 2583 int ret; 2584 2585 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); 2586 2587 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 2588 if (sbi->s_mb_offsets == NULL) { 2589 ret = -ENOMEM; 2590 goto out; 2591 } 2592 2593 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); 2594 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 2595 if (sbi->s_mb_maxs == NULL) { 2596 ret = -ENOMEM; 2597 goto out; 2598 } 2599 2600 ret = ext4_groupinfo_create_slab(sb->s_blocksize); 2601 if (ret < 0) 2602 goto out; 2603 2604 /* order 0 is regular bitmap */ 2605 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 2606 sbi->s_mb_offsets[0] = 0; 2607 2608 i = 1; 2609 offset = 0; 2610 offset_incr = 1 << (sb->s_blocksize_bits - 1); 2611 max = sb->s_blocksize << 2; 2612 do { 2613 sbi->s_mb_offsets[i] = offset; 2614 sbi->s_mb_maxs[i] = max; 2615 offset += offset_incr; 2616 offset_incr = offset_incr >> 1; 2617 max = max >> 1; 2618 i++; 2619 } while (i <= sb->s_blocksize_bits + 1); 2620 2621 spin_lock_init(&sbi->s_md_lock); 2622 spin_lock_init(&sbi->s_bal_lock); 2623 2624 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 2625 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 2626 sbi->s_mb_stats = MB_DEFAULT_STATS; 2627 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 2628 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 2629 /* 2630 * The default group preallocation is 512, which for 4k block 2631 * sizes translates to 2 megabytes. However for bigalloc file 2632 * systems, this is probably too big (i.e, if the cluster size 2633 * is 1 megabyte, then group preallocation size becomes half a 2634 * gigabyte!). As a default, we will keep a two megabyte 2635 * group pralloc size for cluster sizes up to 64k, and after 2636 * that, we will force a minimum group preallocation size of 2637 * 32 clusters. This translates to 8 megs when the cluster 2638 * size is 256k, and 32 megs when the cluster size is 1 meg, 2639 * which seems reasonable as a default. 2640 */ 2641 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 2642 sbi->s_cluster_bits, 32); 2643 /* 2644 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 2645 * to the lowest multiple of s_stripe which is bigger than 2646 * the s_mb_group_prealloc as determined above. We want 2647 * the preallocation size to be an exact multiple of the 2648 * RAID stripe size so that preallocations don't fragment 2649 * the stripes. 2650 */ 2651 if (sbi->s_stripe > 1) { 2652 sbi->s_mb_group_prealloc = roundup( 2653 sbi->s_mb_group_prealloc, sbi->s_stripe); 2654 } 2655 2656 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 2657 if (sbi->s_locality_groups == NULL) { 2658 ret = -ENOMEM; 2659 goto out; 2660 } 2661 for_each_possible_cpu(i) { 2662 struct ext4_locality_group *lg; 2663 lg = per_cpu_ptr(sbi->s_locality_groups, i); 2664 mutex_init(&lg->lg_mutex); 2665 for (j = 0; j < PREALLOC_TB_SIZE; j++) 2666 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 2667 spin_lock_init(&lg->lg_prealloc_lock); 2668 } 2669 2670 /* init file for buddy data */ 2671 ret = ext4_mb_init_backend(sb); 2672 if (ret != 0) 2673 goto out_free_locality_groups; 2674 2675 return 0; 2676 2677out_free_locality_groups: 2678 free_percpu(sbi->s_locality_groups); 2679 sbi->s_locality_groups = NULL; 2680out: 2681 kfree(sbi->s_mb_offsets); 2682 sbi->s_mb_offsets = NULL; 2683 kfree(sbi->s_mb_maxs); 2684 sbi->s_mb_maxs = NULL; 2685 return ret; 2686} 2687 2688/* need to called with the ext4 group lock held */ 2689static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) 2690{ 2691 struct ext4_prealloc_space *pa; 2692 struct list_head *cur, *tmp; 2693 int count = 0; 2694 2695 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 2696 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 2697 list_del(&pa->pa_group_list); 2698 count++; 2699 kmem_cache_free(ext4_pspace_cachep, pa); 2700 } 2701 if (count) 2702 mb_debug(1, "mballoc: %u PAs left\n", count); 2703 2704} 2705 2706int ext4_mb_release(struct super_block *sb) 2707{ 2708 ext4_group_t ngroups = ext4_get_groups_count(sb); 2709 ext4_group_t i; 2710 int num_meta_group_infos; 2711 struct ext4_group_info *grinfo; 2712 struct ext4_sb_info *sbi = EXT4_SB(sb); 2713 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2714 2715 if (sbi->s_group_info) { 2716 for (i = 0; i < ngroups; i++) { 2717 grinfo = ext4_get_group_info(sb, i); 2718#ifdef DOUBLE_CHECK 2719 kfree(grinfo->bb_bitmap); 2720#endif 2721 ext4_lock_group(sb, i); 2722 ext4_mb_cleanup_pa(grinfo); 2723 ext4_unlock_group(sb, i); 2724 kmem_cache_free(cachep, grinfo); 2725 } 2726 num_meta_group_infos = (ngroups + 2727 EXT4_DESC_PER_BLOCK(sb) - 1) >> 2728 EXT4_DESC_PER_BLOCK_BITS(sb); 2729 for (i = 0; i < num_meta_group_infos; i++) 2730 kfree(sbi->s_group_info[i]); 2731 kvfree(sbi->s_group_info); 2732 } 2733 kfree(sbi->s_mb_offsets); 2734 kfree(sbi->s_mb_maxs); 2735 iput(sbi->s_buddy_cache); 2736 if (sbi->s_mb_stats) { 2737 ext4_msg(sb, KERN_INFO, 2738 "mballoc: %u blocks %u reqs (%u success)", 2739 atomic_read(&sbi->s_bal_allocated), 2740 atomic_read(&sbi->s_bal_reqs), 2741 atomic_read(&sbi->s_bal_success)); 2742 ext4_msg(sb, KERN_INFO, 2743 "mballoc: %u extents scanned, %u goal hits, " 2744 "%u 2^N hits, %u breaks, %u lost", 2745 atomic_read(&sbi->s_bal_ex_scanned), 2746 atomic_read(&sbi->s_bal_goals), 2747 atomic_read(&sbi->s_bal_2orders), 2748 atomic_read(&sbi->s_bal_breaks), 2749 atomic_read(&sbi->s_mb_lost_chunks)); 2750 ext4_msg(sb, KERN_INFO, 2751 "mballoc: %lu generated and it took %Lu", 2752 sbi->s_mb_buddies_generated, 2753 sbi->s_mb_generation_time); 2754 ext4_msg(sb, KERN_INFO, 2755 "mballoc: %u preallocated, %u discarded", 2756 atomic_read(&sbi->s_mb_preallocated), 2757 atomic_read(&sbi->s_mb_discarded)); 2758 } 2759 2760 free_percpu(sbi->s_locality_groups); 2761 2762 return 0; 2763} 2764 2765static inline int ext4_issue_discard(struct super_block *sb, 2766 ext4_group_t block_group, ext4_grpblk_t cluster, int count) 2767{ 2768 ext4_fsblk_t discard_block; 2769 2770 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 2771 ext4_group_first_block_no(sb, block_group)); 2772 count = EXT4_C2B(EXT4_SB(sb), count); 2773 trace_ext4_discard_blocks(sb, 2774 (unsigned long long) discard_block, count); 2775 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 2776} 2777 2778/* 2779 * This function is called by the jbd2 layer once the commit has finished, 2780 * so we know we can free the blocks that were released with that commit. 2781 */ 2782static void ext4_free_data_callback(struct super_block *sb, 2783 struct ext4_journal_cb_entry *jce, 2784 int rc) 2785{ 2786 struct ext4_free_data *entry = (struct ext4_free_data *)jce; 2787 struct ext4_buddy e4b; 2788 struct ext4_group_info *db; 2789 int err, count = 0, count2 = 0; 2790 2791 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2792 entry->efd_count, entry->efd_group, entry); 2793 2794 if (test_opt(sb, DISCARD)) { 2795 err = ext4_issue_discard(sb, entry->efd_group, 2796 entry->efd_start_cluster, 2797 entry->efd_count); 2798 if (err && err != -EOPNOTSUPP) 2799 ext4_msg(sb, KERN_WARNING, "discard request in" 2800 " group:%d block:%d count:%d failed" 2801 " with %d", entry->efd_group, 2802 entry->efd_start_cluster, 2803 entry->efd_count, err); 2804 } 2805 2806 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 2807 /* we expect to find existing buddy because it's pinned */ 2808 BUG_ON(err != 0); 2809 2810 2811 db = e4b.bd_info; 2812 /* there are blocks to put in buddy to make them really free */ 2813 count += entry->efd_count; 2814 count2++; 2815 ext4_lock_group(sb, entry->efd_group); 2816 /* Take it out of per group rb tree */ 2817 rb_erase(&entry->efd_node, &(db->bb_free_root)); 2818 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 2819 2820 /* 2821 * Clear the trimmed flag for the group so that the next 2822 * ext4_trim_fs can trim it. 2823 * If the volume is mounted with -o discard, online discard 2824 * is supported and the free blocks will be trimmed online. 2825 */ 2826 if (!test_opt(sb, DISCARD)) 2827 EXT4_MB_GRP_CLEAR_TRIMMED(db); 2828 2829 if (!db->bb_free_root.rb_node) { 2830 /* No more items in the per group rb tree 2831 * balance refcounts from ext4_mb_free_metadata() 2832 */ 2833 page_cache_release(e4b.bd_buddy_page); 2834 page_cache_release(e4b.bd_bitmap_page); 2835 } 2836 ext4_unlock_group(sb, entry->efd_group); 2837 kmem_cache_free(ext4_free_data_cachep, entry); 2838 ext4_mb_unload_buddy(&e4b); 2839 2840 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2841} 2842 2843int __init ext4_init_mballoc(void) 2844{ 2845 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 2846 SLAB_RECLAIM_ACCOUNT); 2847 if (ext4_pspace_cachep == NULL) 2848 return -ENOMEM; 2849 2850 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 2851 SLAB_RECLAIM_ACCOUNT); 2852 if (ext4_ac_cachep == NULL) { 2853 kmem_cache_destroy(ext4_pspace_cachep); 2854 return -ENOMEM; 2855 } 2856 2857 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 2858 SLAB_RECLAIM_ACCOUNT); 2859 if (ext4_free_data_cachep == NULL) { 2860 kmem_cache_destroy(ext4_pspace_cachep); 2861 kmem_cache_destroy(ext4_ac_cachep); 2862 return -ENOMEM; 2863 } 2864 return 0; 2865} 2866 2867void ext4_exit_mballoc(void) 2868{ 2869 /* 2870 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2871 * before destroying the slab cache. 2872 */ 2873 rcu_barrier(); 2874 kmem_cache_destroy(ext4_pspace_cachep); 2875 kmem_cache_destroy(ext4_ac_cachep); 2876 kmem_cache_destroy(ext4_free_data_cachep); 2877 ext4_groupinfo_destroy_slabs(); 2878} 2879 2880 2881/* 2882 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 2883 * Returns 0 if success or error code 2884 */ 2885static noinline_for_stack int 2886ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 2887 handle_t *handle, unsigned int reserv_clstrs) 2888{ 2889 struct buffer_head *bitmap_bh = NULL; 2890 struct ext4_group_desc *gdp; 2891 struct buffer_head *gdp_bh; 2892 struct ext4_sb_info *sbi; 2893 struct super_block *sb; 2894 ext4_fsblk_t block; 2895 int err, len; 2896 2897 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 2898 BUG_ON(ac->ac_b_ex.fe_len <= 0); 2899 2900 sb = ac->ac_sb; 2901 sbi = EXT4_SB(sb); 2902 2903 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 2904 if (IS_ERR(bitmap_bh)) { 2905 err = PTR_ERR(bitmap_bh); 2906 bitmap_bh = NULL; 2907 goto out_err; 2908 } 2909 2910 BUFFER_TRACE(bitmap_bh, "getting write access"); 2911 err = ext4_journal_get_write_access(handle, bitmap_bh); 2912 if (err) 2913 goto out_err; 2914 2915 err = -EIO; 2916 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 2917 if (!gdp) 2918 goto out_err; 2919 2920 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 2921 ext4_free_group_clusters(sb, gdp)); 2922 2923 BUFFER_TRACE(gdp_bh, "get_write_access"); 2924 err = ext4_journal_get_write_access(handle, gdp_bh); 2925 if (err) 2926 goto out_err; 2927 2928 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 2929 2930 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 2931 if (!ext4_data_block_valid(sbi, block, len)) { 2932 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 2933 "fs metadata", block, block+len); 2934 /* File system mounted not to panic on error 2935 * Fix the bitmap and repeat the block allocation 2936 * We leak some of the blocks here. 2937 */ 2938 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2939 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2940 ac->ac_b_ex.fe_len); 2941 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2942 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2943 if (!err) 2944 err = -EAGAIN; 2945 goto out_err; 2946 } 2947 2948 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 2949#ifdef AGGRESSIVE_CHECK 2950 { 2951 int i; 2952 for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 2953 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 2954 bitmap_bh->b_data)); 2955 } 2956 } 2957#endif 2958 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 2959 ac->ac_b_ex.fe_len); 2960 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 2961 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 2962 ext4_free_group_clusters_set(sb, gdp, 2963 ext4_free_clusters_after_init(sb, 2964 ac->ac_b_ex.fe_group, gdp)); 2965 } 2966 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2967 ext4_free_group_clusters_set(sb, gdp, len); 2968 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 2969 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 2970 2971 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2972 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 2973 /* 2974 * Now reduce the dirty block count also. Should not go negative 2975 */ 2976 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 2977 /* release all the reserved blocks if non delalloc */ 2978 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 2979 reserv_clstrs); 2980 2981 if (sbi->s_log_groups_per_flex) { 2982 ext4_group_t flex_group = ext4_flex_group(sbi, 2983 ac->ac_b_ex.fe_group); 2984 atomic64_sub(ac->ac_b_ex.fe_len, 2985 &sbi->s_flex_groups[flex_group].free_clusters); 2986 } 2987 2988 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 2989 if (err) 2990 goto out_err; 2991 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 2992 2993out_err: 2994 brelse(bitmap_bh); 2995 return err; 2996} 2997 2998/* 2999 * here we normalize request for locality group 3000 * Group request are normalized to s_mb_group_prealloc, which goes to 3001 * s_strip if we set the same via mount option. 3002 * s_mb_group_prealloc can be configured via 3003 * /sys/fs/ext4/<partition>/mb_group_prealloc 3004 * 3005 * XXX: should we try to preallocate more than the group has now? 3006 */ 3007static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 3008{ 3009 struct super_block *sb = ac->ac_sb; 3010 struct ext4_locality_group *lg = ac->ac_lg; 3011 3012 BUG_ON(lg == NULL); 3013 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 3014 mb_debug(1, "#%u: goal %u blocks for locality group\n", 3015 current->pid, ac->ac_g_ex.fe_len); 3016} 3017 3018/* 3019 * Normalization means making request better in terms of 3020 * size and alignment 3021 */ 3022static noinline_for_stack void 3023ext4_mb_normalize_request(struct ext4_allocation_context *ac, 3024 struct ext4_allocation_request *ar) 3025{ 3026 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3027 int bsbits, max; 3028 ext4_lblk_t end; 3029 loff_t size, start_off; 3030 loff_t orig_size __maybe_unused; 3031 ext4_lblk_t start; 3032 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3033 struct ext4_prealloc_space *pa; 3034 3035 /* do normalize only data requests, metadata requests 3036 do not need preallocation */ 3037 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3038 return; 3039 3040 /* sometime caller may want exact blocks */ 3041 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 3042 return; 3043 3044 /* caller may indicate that preallocation isn't 3045 * required (it's a tail, for example) */ 3046 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 3047 return; 3048 3049 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 3050 ext4_mb_normalize_group_request(ac); 3051 return ; 3052 } 3053 3054 bsbits = ac->ac_sb->s_blocksize_bits; 3055 3056 /* first, let's learn actual file size 3057 * given current request is allocated */ 3058 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 3059 size = size << bsbits; 3060 if (size < i_size_read(ac->ac_inode)) 3061 size = i_size_read(ac->ac_inode); 3062 orig_size = size; 3063 3064 /* max size of free chunks */ 3065 max = 2 << bsbits; 3066 3067#define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 3068 (req <= (size) || max <= (chunk_size)) 3069 3070 /* first, try to predict filesize */ 3071 /* XXX: should this table be tunable? */ 3072 start_off = 0; 3073 if (size <= 16 * 1024) { 3074 size = 16 * 1024; 3075 } else if (size <= 32 * 1024) { 3076 size = 32 * 1024; 3077 } else if (size <= 64 * 1024) { 3078 size = 64 * 1024; 3079 } else if (size <= 128 * 1024) { 3080 size = 128 * 1024; 3081 } else if (size <= 256 * 1024) { 3082 size = 256 * 1024; 3083 } else if (size <= 512 * 1024) { 3084 size = 512 * 1024; 3085 } else if (size <= 1024 * 1024) { 3086 size = 1024 * 1024; 3087 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 3088 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3089 (21 - bsbits)) << 21; 3090 size = 2 * 1024 * 1024; 3091 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 3092 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3093 (22 - bsbits)) << 22; 3094 size = 4 * 1024 * 1024; 3095 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 3096 (8<<20)>>bsbits, max, 8 * 1024)) { 3097 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 3098 (23 - bsbits)) << 23; 3099 size = 8 * 1024 * 1024; 3100 } else { 3101 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 3102 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 3103 ac->ac_o_ex.fe_len) << bsbits; 3104 } 3105 size = size >> bsbits; 3106 start = start_off >> bsbits; 3107 3108 /* don't cover already allocated blocks in selected range */ 3109 if (ar->pleft && start <= ar->lleft) { 3110 size -= ar->lleft + 1 - start; 3111 start = ar->lleft + 1; 3112 } 3113 if (ar->pright && start + size - 1 >= ar->lright) 3114 size -= start + size - ar->lright; 3115 3116 end = start + size; 3117 3118 /* check we don't cross already preallocated blocks */ 3119 rcu_read_lock(); 3120 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3121 ext4_lblk_t pa_end; 3122 3123 if (pa->pa_deleted) 3124 continue; 3125 spin_lock(&pa->pa_lock); 3126 if (pa->pa_deleted) { 3127 spin_unlock(&pa->pa_lock); 3128 continue; 3129 } 3130 3131 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3132 pa->pa_len); 3133 3134 /* PA must not overlap original request */ 3135 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 3136 ac->ac_o_ex.fe_logical < pa->pa_lstart)); 3137 3138 /* skip PAs this normalized request doesn't overlap with */ 3139 if (pa->pa_lstart >= end || pa_end <= start) { 3140 spin_unlock(&pa->pa_lock); 3141 continue; 3142 } 3143 BUG_ON(pa->pa_lstart <= start && pa_end >= end); 3144 3145 /* adjust start or end to be adjacent to this pa */ 3146 if (pa_end <= ac->ac_o_ex.fe_logical) { 3147 BUG_ON(pa_end < start); 3148 start = pa_end; 3149 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 3150 BUG_ON(pa->pa_lstart > end); 3151 end = pa->pa_lstart; 3152 } 3153 spin_unlock(&pa->pa_lock); 3154 } 3155 rcu_read_unlock(); 3156 size = end - start; 3157 3158 /* XXX: extra loop to check we really don't overlap preallocations */ 3159 rcu_read_lock(); 3160 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3161 ext4_lblk_t pa_end; 3162 3163 spin_lock(&pa->pa_lock); 3164 if (pa->pa_deleted == 0) { 3165 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 3166 pa->pa_len); 3167 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 3168 } 3169 spin_unlock(&pa->pa_lock); 3170 } 3171 rcu_read_unlock(); 3172 3173 if (start + size <= ac->ac_o_ex.fe_logical && 3174 start > ac->ac_o_ex.fe_logical) { 3175 ext4_msg(ac->ac_sb, KERN_ERR, 3176 "start %lu, size %lu, fe_logical %lu", 3177 (unsigned long) start, (unsigned long) size, 3178 (unsigned long) ac->ac_o_ex.fe_logical); 3179 BUG(); 3180 } 3181 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3182 3183 /* now prepare goal request */ 3184 3185 /* XXX: is it better to align blocks WRT to logical 3186 * placement or satisfy big request as is */ 3187 ac->ac_g_ex.fe_logical = start; 3188 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 3189 3190 /* define goal start in order to merge */ 3191 if (ar->pright && (ar->lright == (start + size))) { 3192 /* merge to the right */ 3193 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 3194 &ac->ac_f_ex.fe_group, 3195 &ac->ac_f_ex.fe_start); 3196 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3197 } 3198 if (ar->pleft && (ar->lleft + 1 == start)) { 3199 /* merge to the left */ 3200 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 3201 &ac->ac_f_ex.fe_group, 3202 &ac->ac_f_ex.fe_start); 3203 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 3204 } 3205 3206 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, 3207 (unsigned) orig_size, (unsigned) start); 3208} 3209 3210static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 3211{ 3212 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3213 3214 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3215 atomic_inc(&sbi->s_bal_reqs); 3216 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3217 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 3218 atomic_inc(&sbi->s_bal_success); 3219 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3220 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3221 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 3222 atomic_inc(&sbi->s_bal_goals); 3223 if (ac->ac_found > sbi->s_mb_max_to_scan) 3224 atomic_inc(&sbi->s_bal_breaks); 3225 } 3226 3227 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 3228 trace_ext4_mballoc_alloc(ac); 3229 else 3230 trace_ext4_mballoc_prealloc(ac); 3231} 3232 3233/* 3234 * Called on failure; free up any blocks from the inode PA for this 3235 * context. We don't need this for MB_GROUP_PA because we only change 3236 * pa_free in ext4_mb_release_context(), but on failure, we've already 3237 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 3238 */ 3239static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3240{ 3241 struct ext4_prealloc_space *pa = ac->ac_pa; 3242 struct ext4_buddy e4b; 3243 int err; 3244 3245 if (pa == NULL) { 3246 if (ac->ac_f_ex.fe_len == 0) 3247 return; 3248 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 3249 if (err) { 3250 /* 3251 * This should never happen since we pin the 3252 * pages in the ext4_allocation_context so 3253 * ext4_mb_load_buddy() should never fail. 3254 */ 3255 WARN(1, "mb_load_buddy failed (%d)", err); 3256 return; 3257 } 3258 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3259 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 3260 ac->ac_f_ex.fe_len); 3261 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 3262 ext4_mb_unload_buddy(&e4b); 3263 return; 3264 } 3265 if (pa->pa_type == MB_INODE_PA) 3266 pa->pa_free += ac->ac_b_ex.fe_len; 3267} 3268 3269/* 3270 * use blocks preallocated to inode 3271 */ 3272static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 3273 struct ext4_prealloc_space *pa) 3274{ 3275 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3276 ext4_fsblk_t start; 3277 ext4_fsblk_t end; 3278 int len; 3279 3280 /* found preallocated blocks, use them */ 3281 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 3282 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 3283 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 3284 len = EXT4_NUM_B2C(sbi, end - start); 3285 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 3286 &ac->ac_b_ex.fe_start); 3287 ac->ac_b_ex.fe_len = len; 3288 ac->ac_status = AC_STATUS_FOUND; 3289 ac->ac_pa = pa; 3290 3291 BUG_ON(start < pa->pa_pstart); 3292 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 3293 BUG_ON(pa->pa_free < len); 3294 pa->pa_free -= len; 3295 3296 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); 3297} 3298 3299/* 3300 * use blocks preallocated to locality group 3301 */ 3302static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 3303 struct ext4_prealloc_space *pa) 3304{ 3305 unsigned int len = ac->ac_o_ex.fe_len; 3306 3307 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 3308 &ac->ac_b_ex.fe_group, 3309 &ac->ac_b_ex.fe_start); 3310 ac->ac_b_ex.fe_len = len; 3311 ac->ac_status = AC_STATUS_FOUND; 3312 ac->ac_pa = pa; 3313 3314 /* we don't correct pa_pstart or pa_plen here to avoid 3315 * possible race when the group is being loaded concurrently 3316 * instead we correct pa later, after blocks are marked 3317 * in on-disk bitmap -- see ext4_mb_release_context() 3318 * Other CPUs are prevented from allocating from this pa by lg_mutex 3319 */ 3320 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); 3321} 3322 3323/* 3324 * Return the prealloc space that have minimal distance 3325 * from the goal block. @cpa is the prealloc 3326 * space that is having currently known minimal distance 3327 * from the goal block. 3328 */ 3329static struct ext4_prealloc_space * 3330ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 3331 struct ext4_prealloc_space *pa, 3332 struct ext4_prealloc_space *cpa) 3333{ 3334 ext4_fsblk_t cur_distance, new_distance; 3335 3336 if (cpa == NULL) { 3337 atomic_inc(&pa->pa_count); 3338 return pa; 3339 } 3340 cur_distance = abs(goal_block - cpa->pa_pstart); 3341 new_distance = abs(goal_block - pa->pa_pstart); 3342 3343 if (cur_distance <= new_distance) 3344 return cpa; 3345 3346 /* drop the previous reference */ 3347 atomic_dec(&cpa->pa_count); 3348 atomic_inc(&pa->pa_count); 3349 return pa; 3350} 3351 3352/* 3353 * search goal blocks in preallocated space 3354 */ 3355static noinline_for_stack int 3356ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 3357{ 3358 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3359 int order, i; 3360 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 3361 struct ext4_locality_group *lg; 3362 struct ext4_prealloc_space *pa, *cpa = NULL; 3363 ext4_fsblk_t goal_block; 3364 3365 /* only data can be preallocated */ 3366 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 3367 return 0; 3368 3369 /* first, try per-file preallocation */ 3370 rcu_read_lock(); 3371 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 3372 3373 /* all fields in this condition don't change, 3374 * so we can skip locking for them */ 3375 if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 3376 ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 3377 EXT4_C2B(sbi, pa->pa_len))) 3378 continue; 3379 3380 /* non-extent files can't have physical blocks past 2^32 */ 3381 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 3382 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 3383 EXT4_MAX_BLOCK_FILE_PHYS)) 3384 continue; 3385 3386 /* found preallocated blocks, use them */ 3387 spin_lock(&pa->pa_lock); 3388 if (pa->pa_deleted == 0 && pa->pa_free) { 3389 atomic_inc(&pa->pa_count); 3390 ext4_mb_use_inode_pa(ac, pa); 3391 spin_unlock(&pa->pa_lock); 3392 ac->ac_criteria = 10; 3393 rcu_read_unlock(); 3394 return 1; 3395 } 3396 spin_unlock(&pa->pa_lock); 3397 } 3398 rcu_read_unlock(); 3399 3400 /* can we use group allocation? */ 3401 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 3402 return 0; 3403 3404 /* inode may have no locality group for some reason */ 3405 lg = ac->ac_lg; 3406 if (lg == NULL) 3407 return 0; 3408 order = fls(ac->ac_o_ex.fe_len) - 1; 3409 if (order > PREALLOC_TB_SIZE - 1) 3410 /* The max size of hash table is PREALLOC_TB_SIZE */ 3411 order = PREALLOC_TB_SIZE - 1; 3412 3413 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 3414 /* 3415 * search for the prealloc space that is having 3416 * minimal distance from the goal block. 3417 */ 3418 for (i = order; i < PREALLOC_TB_SIZE; i++) { 3419 rcu_read_lock(); 3420 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 3421 pa_inode_list) { 3422 spin_lock(&pa->pa_lock); 3423 if (pa->pa_deleted == 0 && 3424 pa->pa_free >= ac->ac_o_ex.fe_len) { 3425 3426 cpa = ext4_mb_check_group_pa(goal_block, 3427 pa, cpa); 3428 } 3429 spin_unlock(&pa->pa_lock); 3430 } 3431 rcu_read_unlock(); 3432 } 3433 if (cpa) { 3434 ext4_mb_use_group_pa(ac, cpa); 3435 ac->ac_criteria = 20; 3436 return 1; 3437 } 3438 return 0; 3439} 3440 3441/* 3442 * the function goes through all block freed in the group 3443 * but not yet committed and marks them used in in-core bitmap. 3444 * buddy must be generated from this bitmap 3445 * Need to be called with the ext4 group lock held 3446 */ 3447static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 3448 ext4_group_t group) 3449{ 3450 struct rb_node *n; 3451 struct ext4_group_info *grp; 3452 struct ext4_free_data *entry; 3453 3454 grp = ext4_get_group_info(sb, group); 3455 n = rb_first(&(grp->bb_free_root)); 3456 3457 while (n) { 3458 entry = rb_entry(n, struct ext4_free_data, efd_node); 3459 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 3460 n = rb_next(n); 3461 } 3462 return; 3463} 3464 3465/* 3466 * the function goes through all preallocation in this group and marks them 3467 * used in in-core bitmap. buddy must be generated from this bitmap 3468 * Need to be called with ext4 group lock held 3469 */ 3470static noinline_for_stack 3471void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 3472 ext4_group_t group) 3473{ 3474 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3475 struct ext4_prealloc_space *pa; 3476 struct list_head *cur; 3477 ext4_group_t groupnr; 3478 ext4_grpblk_t start; 3479 int preallocated = 0; 3480 int len; 3481 3482 /* all form of preallocation discards first load group, 3483 * so the only competing code is preallocation use. 3484 * we don't need any locking here 3485 * notice we do NOT ignore preallocations with pa_deleted 3486 * otherwise we could leave used blocks available for 3487 * allocation in buddy when concurrent ext4_mb_put_pa() 3488 * is dropping preallocation 3489 */ 3490 list_for_each(cur, &grp->bb_prealloc_list) { 3491 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3492 spin_lock(&pa->pa_lock); 3493 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 3494 &groupnr, &start); 3495 len = pa->pa_len; 3496 spin_unlock(&pa->pa_lock); 3497 if (unlikely(len == 0)) 3498 continue; 3499 BUG_ON(groupnr != group); 3500 ext4_set_bits(bitmap, start, len); 3501 preallocated += len; 3502 } 3503 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); 3504} 3505 3506static void ext4_mb_pa_callback(struct rcu_head *head) 3507{ 3508 struct ext4_prealloc_space *pa; 3509 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 3510 3511 BUG_ON(atomic_read(&pa->pa_count)); 3512 BUG_ON(pa->pa_deleted == 0); 3513 kmem_cache_free(ext4_pspace_cachep, pa); 3514} 3515 3516/* 3517 * drops a reference to preallocated space descriptor 3518 * if this was the last reference and the space is consumed 3519 */ 3520static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 3521 struct super_block *sb, struct ext4_prealloc_space *pa) 3522{ 3523 ext4_group_t grp; 3524 ext4_fsblk_t grp_blk; 3525 3526 /* in this short window concurrent discard can set pa_deleted */ 3527 spin_lock(&pa->pa_lock); 3528 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 3529 spin_unlock(&pa->pa_lock); 3530 return; 3531 } 3532 3533 if (pa->pa_deleted == 1) { 3534 spin_unlock(&pa->pa_lock); 3535 return; 3536 } 3537 3538 pa->pa_deleted = 1; 3539 spin_unlock(&pa->pa_lock); 3540 3541 grp_blk = pa->pa_pstart; 3542 /* 3543 * If doing group-based preallocation, pa_pstart may be in the 3544 * next group when pa is used up 3545 */ 3546 if (pa->pa_type == MB_GROUP_PA) 3547 grp_blk--; 3548 3549 grp = ext4_get_group_number(sb, grp_blk); 3550 3551 /* 3552 * possible race: 3553 * 3554 * P1 (buddy init) P2 (regular allocation) 3555 * find block B in PA 3556 * copy on-disk bitmap to buddy 3557 * mark B in on-disk bitmap 3558 * drop PA from group 3559 * mark all PAs in buddy 3560 * 3561 * thus, P1 initializes buddy with B available. to prevent this 3562 * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 3563 * against that pair 3564 */ 3565 ext4_lock_group(sb, grp); 3566 list_del(&pa->pa_group_list); 3567 ext4_unlock_group(sb, grp); 3568 3569 spin_lock(pa->pa_obj_lock); 3570 list_del_rcu(&pa->pa_inode_list); 3571 spin_unlock(pa->pa_obj_lock); 3572 3573 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3574} 3575 3576/* 3577 * creates new preallocated space for given inode 3578 */ 3579static noinline_for_stack int 3580ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 3581{ 3582 struct super_block *sb = ac->ac_sb; 3583 struct ext4_sb_info *sbi = EXT4_SB(sb); 3584 struct ext4_prealloc_space *pa; 3585 struct ext4_group_info *grp; 3586 struct ext4_inode_info *ei; 3587 3588 /* preallocate only when found space is larger then requested */ 3589 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3590 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3591 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3592 3593 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3594 if (pa == NULL) 3595 return -ENOMEM; 3596 3597 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 3598 int winl; 3599 int wins; 3600 int win; 3601 int offs; 3602 3603 /* we can't allocate as much as normalizer wants. 3604 * so, found space must get proper lstart 3605 * to cover original request */ 3606 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 3607 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 3608 3609 /* we're limited by original request in that 3610 * logical block must be covered any way 3611 * winl is window we can move our chunk within */ 3612 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 3613 3614 /* also, we should cover whole original request */ 3615 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 3616 3617 /* the smallest one defines real window */ 3618 win = min(winl, wins); 3619 3620 offs = ac->ac_o_ex.fe_logical % 3621 EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3622 if (offs && offs < win) 3623 win = offs; 3624 3625 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 3626 EXT4_NUM_B2C(sbi, win); 3627 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 3628 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 3629 } 3630 3631 /* preallocation can change ac_b_ex, thus we store actually 3632 * allocated blocks for history */ 3633 ac->ac_f_ex = ac->ac_b_ex; 3634 3635 pa->pa_lstart = ac->ac_b_ex.fe_logical; 3636 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3637 pa->pa_len = ac->ac_b_ex.fe_len; 3638 pa->pa_free = pa->pa_len; 3639 atomic_set(&pa->pa_count, 1); 3640 spin_lock_init(&pa->pa_lock); 3641 INIT_LIST_HEAD(&pa->pa_inode_list); 3642 INIT_LIST_HEAD(&pa->pa_group_list); 3643 pa->pa_deleted = 0; 3644 pa->pa_type = MB_INODE_PA; 3645 3646 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, 3647 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3648 trace_ext4_mb_new_inode_pa(ac, pa); 3649 3650 ext4_mb_use_inode_pa(ac, pa); 3651 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 3652 3653 ei = EXT4_I(ac->ac_inode); 3654 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3655 3656 pa->pa_obj_lock = &ei->i_prealloc_lock; 3657 pa->pa_inode = ac->ac_inode; 3658 3659 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3660 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3661 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3662 3663 spin_lock(pa->pa_obj_lock); 3664 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 3665 spin_unlock(pa->pa_obj_lock); 3666 3667 return 0; 3668} 3669 3670/* 3671 * creates new preallocated space for locality group inodes belongs to 3672 */ 3673static noinline_for_stack int 3674ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 3675{ 3676 struct super_block *sb = ac->ac_sb; 3677 struct ext4_locality_group *lg; 3678 struct ext4_prealloc_space *pa; 3679 struct ext4_group_info *grp; 3680 3681 /* preallocate only when found space is larger then requested */ 3682 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 3683 BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3684 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 3685 3686 BUG_ON(ext4_pspace_cachep == NULL); 3687 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); 3688 if (pa == NULL) 3689 return -ENOMEM; 3690 3691 /* preallocation can change ac_b_ex, thus we store actually 3692 * allocated blocks for history */ 3693 ac->ac_f_ex = ac->ac_b_ex; 3694 3695 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3696 pa->pa_lstart = pa->pa_pstart; 3697 pa->pa_len = ac->ac_b_ex.fe_len; 3698 pa->pa_free = pa->pa_len; 3699 atomic_set(&pa->pa_count, 1); 3700 spin_lock_init(&pa->pa_lock); 3701 INIT_LIST_HEAD(&pa->pa_inode_list); 3702 INIT_LIST_HEAD(&pa->pa_group_list); 3703 pa->pa_deleted = 0; 3704 pa->pa_type = MB_GROUP_PA; 3705 3706 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, 3707 pa->pa_pstart, pa->pa_len, pa->pa_lstart); 3708 trace_ext4_mb_new_group_pa(ac, pa); 3709 3710 ext4_mb_use_group_pa(ac, pa); 3711 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 3712 3713 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 3714 lg = ac->ac_lg; 3715 BUG_ON(lg == NULL); 3716 3717 pa->pa_obj_lock = &lg->lg_prealloc_lock; 3718 pa->pa_inode = NULL; 3719 3720 ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3721 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 3722 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 3723 3724 /* 3725 * We will later add the new pa to the right bucket 3726 * after updating the pa_free in ext4_mb_release_context 3727 */ 3728 return 0; 3729} 3730 3731static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 3732{ 3733 int err; 3734 3735 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 3736 err = ext4_mb_new_group_pa(ac); 3737 else 3738 err = ext4_mb_new_inode_pa(ac); 3739 return err; 3740} 3741 3742/* 3743 * finds all unused blocks in on-disk bitmap, frees them in 3744 * in-core bitmap and buddy. 3745 * @pa must be unlinked from inode and group lists, so that 3746 * nobody else can find/use it. 3747 * the caller MUST hold group/inode locks. 3748 * TODO: optimize the case when there are no in-core structures yet 3749 */ 3750static noinline_for_stack int 3751ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 3752 struct ext4_prealloc_space *pa) 3753{ 3754 struct super_block *sb = e4b->bd_sb; 3755 struct ext4_sb_info *sbi = EXT4_SB(sb); 3756 unsigned int end; 3757 unsigned int next; 3758 ext4_group_t group; 3759 ext4_grpblk_t bit; 3760 unsigned long long grp_blk_start; 3761 int err = 0; 3762 int free = 0; 3763 3764 BUG_ON(pa->pa_deleted == 0); 3765 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3766 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 3767 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3768 end = bit + pa->pa_len; 3769 3770 while (bit < end) { 3771 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3772 if (bit >= end) 3773 break; 3774 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3775 mb_debug(1, " free preallocated %u/%u in group %u\n", 3776 (unsigned) ext4_group_first_block_no(sb, group) + bit, 3777 (unsigned) next - bit, (unsigned) group); 3778 free += next - bit; 3779 3780 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 3781 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 3782 EXT4_C2B(sbi, bit)), 3783 next - bit); 3784 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 3785 bit = next + 1; 3786 } 3787 if (free != pa->pa_free) { 3788 ext4_msg(e4b->bd_sb, KERN_CRIT, 3789 "pa %p: logic %lu, phys. %lu, len %lu", 3790 pa, (unsigned long) pa->pa_lstart, 3791 (unsigned long) pa->pa_pstart, 3792 (unsigned long) pa->pa_len); 3793 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 3794 free, pa->pa_free); 3795 /* 3796 * pa is already deleted so we use the value obtained 3797 * from the bitmap and continue. 3798 */ 3799 } 3800 atomic_add(free, &sbi->s_mb_discarded); 3801 3802 return err; 3803} 3804 3805static noinline_for_stack int 3806ext4_mb_release_group_pa(struct ext4_buddy *e4b, 3807 struct ext4_prealloc_space *pa) 3808{ 3809 struct super_block *sb = e4b->bd_sb; 3810 ext4_group_t group; 3811 ext4_grpblk_t bit; 3812 3813 trace_ext4_mb_release_group_pa(sb, pa); 3814 BUG_ON(pa->pa_deleted == 0); 3815 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 3816 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 3817 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 3818 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 3819 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 3820 3821 return 0; 3822} 3823 3824/* 3825 * releases all preallocations in given group 3826 * 3827 * first, we need to decide discard policy: 3828 * - when do we discard 3829 * 1) ENOSPC 3830 * - how many do we discard 3831 * 1) how many requested 3832 */ 3833static noinline_for_stack int 3834ext4_mb_discard_group_preallocations(struct super_block *sb, 3835 ext4_group_t group, int needed) 3836{ 3837 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 3838 struct buffer_head *bitmap_bh = NULL; 3839 struct ext4_prealloc_space *pa, *tmp; 3840 struct list_head list; 3841 struct ext4_buddy e4b; 3842 int err; 3843 int busy = 0; 3844 int free = 0; 3845 3846 mb_debug(1, "discard preallocation for group %u\n", group); 3847 3848 if (list_empty(&grp->bb_prealloc_list)) 3849 return 0; 3850 3851 bitmap_bh = ext4_read_block_bitmap(sb, group); 3852 if (IS_ERR(bitmap_bh)) { 3853 err = PTR_ERR(bitmap_bh); 3854 ext4_error(sb, "Error %d reading block bitmap for %u", 3855 err, group); 3856 return 0; 3857 } 3858 3859 err = ext4_mb_load_buddy(sb, group, &e4b); 3860 if (err) { 3861 ext4_error(sb, "Error loading buddy information for %u", group); 3862 put_bh(bitmap_bh); 3863 return 0; 3864 } 3865 3866 if (needed == 0) 3867 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 3868 3869 INIT_LIST_HEAD(&list); 3870repeat: 3871 ext4_lock_group(sb, group); 3872 list_for_each_entry_safe(pa, tmp, 3873 &grp->bb_prealloc_list, pa_group_list) { 3874 spin_lock(&pa->pa_lock); 3875 if (atomic_read(&pa->pa_count)) { 3876 spin_unlock(&pa->pa_lock); 3877 busy = 1; 3878 continue; 3879 } 3880 if (pa->pa_deleted) { 3881 spin_unlock(&pa->pa_lock); 3882 continue; 3883 } 3884 3885 /* seems this one can be freed ... */ 3886 pa->pa_deleted = 1; 3887 3888 /* we can trust pa_free ... */ 3889 free += pa->pa_free; 3890 3891 spin_unlock(&pa->pa_lock); 3892 3893 list_del(&pa->pa_group_list); 3894 list_add(&pa->u.pa_tmp_list, &list); 3895 } 3896 3897 /* if we still need more blocks and some PAs were used, try again */ 3898 if (free < needed && busy) { 3899 busy = 0; 3900 ext4_unlock_group(sb, group); 3901 cond_resched(); 3902 goto repeat; 3903 } 3904 3905 /* found anything to free? */ 3906 if (list_empty(&list)) { 3907 BUG_ON(free != 0); 3908 goto out; 3909 } 3910 3911 /* now free all selected PAs */ 3912 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 3913 3914 /* remove from object (inode or locality group) */ 3915 spin_lock(pa->pa_obj_lock); 3916 list_del_rcu(&pa->pa_inode_list); 3917 spin_unlock(pa->pa_obj_lock); 3918 3919 if (pa->pa_type == MB_GROUP_PA) 3920 ext4_mb_release_group_pa(&e4b, pa); 3921 else 3922 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 3923 3924 list_del(&pa->u.pa_tmp_list); 3925 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 3926 } 3927 3928out: 3929 ext4_unlock_group(sb, group); 3930 ext4_mb_unload_buddy(&e4b); 3931 put_bh(bitmap_bh); 3932 return free; 3933} 3934 3935/* 3936 * releases all non-used preallocated blocks for given inode 3937 * 3938 * It's important to discard preallocations under i_data_sem 3939 * We don't want another block to be served from the prealloc 3940 * space when we are discarding the inode prealloc space. 3941 * 3942 * FIXME!! Make sure it is valid at all the call sites 3943 */ 3944void ext4_discard_preallocations(struct inode *inode) 3945{ 3946 struct ext4_inode_info *ei = EXT4_I(inode); 3947 struct super_block *sb = inode->i_sb; 3948 struct buffer_head *bitmap_bh = NULL; 3949 struct ext4_prealloc_space *pa, *tmp; 3950 ext4_group_t group = 0; 3951 struct list_head list; 3952 struct ext4_buddy e4b; 3953 int err; 3954 3955 if (!S_ISREG(inode->i_mode)) { 3956 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 3957 return; 3958 } 3959 3960 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); 3961 trace_ext4_discard_preallocations(inode); 3962 3963 INIT_LIST_HEAD(&list); 3964 3965repeat: 3966 /* first, collect all pa's in the inode */ 3967 spin_lock(&ei->i_prealloc_lock); 3968 while (!list_empty(&ei->i_prealloc_list)) { 3969 pa = list_entry(ei->i_prealloc_list.next, 3970 struct ext4_prealloc_space, pa_inode_list); 3971 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 3972 spin_lock(&pa->pa_lock); 3973 if (atomic_read(&pa->pa_count)) { 3974 /* this shouldn't happen often - nobody should 3975 * use preallocation while we're discarding it */ 3976 spin_unlock(&pa->pa_lock); 3977 spin_unlock(&ei->i_prealloc_lock); 3978 ext4_msg(sb, KERN_ERR, 3979 "uh-oh! used pa while discarding"); 3980 WARN_ON(1); 3981 schedule_timeout_uninterruptible(HZ); 3982 goto repeat; 3983 3984 } 3985 if (pa->pa_deleted == 0) { 3986 pa->pa_deleted = 1; 3987 spin_unlock(&pa->pa_lock); 3988 list_del_rcu(&pa->pa_inode_list); 3989 list_add(&pa->u.pa_tmp_list, &list); 3990 continue; 3991 } 3992 3993 /* someone is deleting pa right now */ 3994 spin_unlock(&pa->pa_lock); 3995 spin_unlock(&ei->i_prealloc_lock); 3996 3997 /* we have to wait here because pa_deleted 3998 * doesn't mean pa is already unlinked from 3999 * the list. as we might be called from 4000 * ->clear_inode() the inode will get freed 4001 * and concurrent thread which is unlinking 4002 * pa from inode's list may access already 4003 * freed memory, bad-bad-bad */ 4004 4005 /* XXX: if this happens too often, we can 4006 * add a flag to force wait only in case 4007 * of ->clear_inode(), but not in case of 4008 * regular truncate */ 4009 schedule_timeout_uninterruptible(HZ); 4010 goto repeat; 4011 } 4012 spin_unlock(&ei->i_prealloc_lock); 4013 4014 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4015 BUG_ON(pa->pa_type != MB_INODE_PA); 4016 group = ext4_get_group_number(sb, pa->pa_pstart); 4017 4018 err = ext4_mb_load_buddy(sb, group, &e4b); 4019 if (err) { 4020 ext4_error(sb, "Error loading buddy information for %u", 4021 group); 4022 continue; 4023 } 4024 4025 bitmap_bh = ext4_read_block_bitmap(sb, group); 4026 if (IS_ERR(bitmap_bh)) { 4027 err = PTR_ERR(bitmap_bh); 4028 ext4_error(sb, "Error %d reading block bitmap for %u", 4029 err, group); 4030 ext4_mb_unload_buddy(&e4b); 4031 continue; 4032 } 4033 4034 ext4_lock_group(sb, group); 4035 list_del(&pa->pa_group_list); 4036 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4037 ext4_unlock_group(sb, group); 4038 4039 ext4_mb_unload_buddy(&e4b); 4040 put_bh(bitmap_bh); 4041 4042 list_del(&pa->u.pa_tmp_list); 4043 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4044 } 4045} 4046 4047#ifdef CONFIG_EXT4_DEBUG 4048static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4049{ 4050 struct super_block *sb = ac->ac_sb; 4051 ext4_group_t ngroups, i; 4052 4053 if (!ext4_mballoc_debug || 4054 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) 4055 return; 4056 4057 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" 4058 " Allocation context details:"); 4059 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", 4060 ac->ac_status, ac->ac_flags); 4061 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " 4062 "goal %lu/%lu/%lu@%lu, " 4063 "best %lu/%lu/%lu@%lu cr %d", 4064 (unsigned long)ac->ac_o_ex.fe_group, 4065 (unsigned long)ac->ac_o_ex.fe_start, 4066 (unsigned long)ac->ac_o_ex.fe_len, 4067 (unsigned long)ac->ac_o_ex.fe_logical, 4068 (unsigned long)ac->ac_g_ex.fe_group, 4069 (unsigned long)ac->ac_g_ex.fe_start, 4070 (unsigned long)ac->ac_g_ex.fe_len, 4071 (unsigned long)ac->ac_g_ex.fe_logical, 4072 (unsigned long)ac->ac_b_ex.fe_group, 4073 (unsigned long)ac->ac_b_ex.fe_start, 4074 (unsigned long)ac->ac_b_ex.fe_len, 4075 (unsigned long)ac->ac_b_ex.fe_logical, 4076 (int)ac->ac_criteria); 4077 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found); 4078 ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); 4079 ngroups = ext4_get_groups_count(sb); 4080 for (i = 0; i < ngroups; i++) { 4081 struct ext4_group_info *grp = ext4_get_group_info(sb, i); 4082 struct ext4_prealloc_space *pa; 4083 ext4_grpblk_t start; 4084 struct list_head *cur; 4085 ext4_lock_group(sb, i); 4086 list_for_each(cur, &grp->bb_prealloc_list) { 4087 pa = list_entry(cur, struct ext4_prealloc_space, 4088 pa_group_list); 4089 spin_lock(&pa->pa_lock); 4090 ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4091 NULL, &start); 4092 spin_unlock(&pa->pa_lock); 4093 printk(KERN_ERR "PA:%u:%d:%u \n", i, 4094 start, pa->pa_len); 4095 } 4096 ext4_unlock_group(sb, i); 4097 4098 if (grp->bb_free == 0) 4099 continue; 4100 printk(KERN_ERR "%u: %d/%d \n", 4101 i, grp->bb_free, grp->bb_fragments); 4102 } 4103 printk(KERN_ERR "\n"); 4104} 4105#else 4106static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 4107{ 4108 return; 4109} 4110#endif 4111 4112/* 4113 * We use locality group preallocation for small size file. The size of the 4114 * file is determined by the current size or the resulting size after 4115 * allocation which ever is larger 4116 * 4117 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 4118 */ 4119static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 4120{ 4121 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4122 int bsbits = ac->ac_sb->s_blocksize_bits; 4123 loff_t size, isize; 4124 4125 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4126 return; 4127 4128 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4129 return; 4130 4131 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4132 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 4133 >> bsbits; 4134 4135 if ((size == isize) && 4136 !ext4_fs_is_busy(sbi) && 4137 (atomic_read(&ac->ac_inode->i_writecount) == 0)) { 4138 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 4139 return; 4140 } 4141 4142 if (sbi->s_mb_group_prealloc <= 0) { 4143 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4144 return; 4145 } 4146 4147 /* don't use group allocation for large files */ 4148 size = max(size, isize); 4149 if (size > sbi->s_mb_stream_request) { 4150 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 4151 return; 4152 } 4153 4154 BUG_ON(ac->ac_lg != NULL); 4155 /* 4156 * locality group prealloc space are per cpu. The reason for having 4157 * per cpu locality group is to reduce the contention between block 4158 * request from multiple CPUs. 4159 */ 4160 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 4161 4162 /* we're going to use group allocation */ 4163 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 4164 4165 /* serialize all allocations in the group */ 4166 mutex_lock(&ac->ac_lg->lg_mutex); 4167} 4168 4169static noinline_for_stack int 4170ext4_mb_initialize_context(struct ext4_allocation_context *ac, 4171 struct ext4_allocation_request *ar) 4172{ 4173 struct super_block *sb = ar->inode->i_sb; 4174 struct ext4_sb_info *sbi = EXT4_SB(sb); 4175 struct ext4_super_block *es = sbi->s_es; 4176 ext4_group_t group; 4177 unsigned int len; 4178 ext4_fsblk_t goal; 4179 ext4_grpblk_t block; 4180 4181 /* we can't allocate > group size */ 4182 len = ar->len; 4183 4184 /* just a dirty hack to filter too big requests */ 4185 if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 4186 len = EXT4_CLUSTERS_PER_GROUP(sb); 4187 4188 /* start searching from the goal */ 4189 goal = ar->goal; 4190 if (goal < le32_to_cpu(es->s_first_data_block) || 4191 goal >= ext4_blocks_count(es)) 4192 goal = le32_to_cpu(es->s_first_data_block); 4193 ext4_get_group_no_and_offset(sb, goal, &group, &block); 4194 4195 /* set up allocation goals */ 4196 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 4197 ac->ac_status = AC_STATUS_CONTINUE; 4198 ac->ac_sb = sb; 4199 ac->ac_inode = ar->inode; 4200 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 4201 ac->ac_o_ex.fe_group = group; 4202 ac->ac_o_ex.fe_start = block; 4203 ac->ac_o_ex.fe_len = len; 4204 ac->ac_g_ex = ac->ac_o_ex; 4205 ac->ac_flags = ar->flags; 4206 4207 /* we have to define context: we'll we work with a file or 4208 * locality group. this is a policy, actually */ 4209 ext4_mb_group_or_file(ac); 4210 4211 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " 4212 "left: %u/%u, right %u/%u to %swritable\n", 4213 (unsigned) ar->len, (unsigned) ar->logical, 4214 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 4215 (unsigned) ar->lleft, (unsigned) ar->pleft, 4216 (unsigned) ar->lright, (unsigned) ar->pright, 4217 atomic_read(&ar->inode->i_writecount) ? "" : "non-"); 4218 return 0; 4219 4220} 4221 4222static noinline_for_stack void 4223ext4_mb_discard_lg_preallocations(struct super_block *sb, 4224 struct ext4_locality_group *lg, 4225 int order, int total_entries) 4226{ 4227 ext4_group_t group = 0; 4228 struct ext4_buddy e4b; 4229 struct list_head discard_list; 4230 struct ext4_prealloc_space *pa, *tmp; 4231 4232 mb_debug(1, "discard locality group preallocation\n"); 4233 4234 INIT_LIST_HEAD(&discard_list); 4235 4236 spin_lock(&lg->lg_prealloc_lock); 4237 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 4238 pa_inode_list) { 4239 spin_lock(&pa->pa_lock); 4240 if (atomic_read(&pa->pa_count)) { 4241 /* 4242 * This is the pa that we just used 4243 * for block allocation. So don't 4244 * free that 4245 */ 4246 spin_unlock(&pa->pa_lock); 4247 continue; 4248 } 4249 if (pa->pa_deleted) { 4250 spin_unlock(&pa->pa_lock); 4251 continue; 4252 } 4253 /* only lg prealloc space */ 4254 BUG_ON(pa->pa_type != MB_GROUP_PA); 4255 4256 /* seems this one can be freed ... */ 4257 pa->pa_deleted = 1; 4258 spin_unlock(&pa->pa_lock); 4259 4260 list_del_rcu(&pa->pa_inode_list); 4261 list_add(&pa->u.pa_tmp_list, &discard_list); 4262 4263 total_entries--; 4264 if (total_entries <= 5) { 4265 /* 4266 * we want to keep only 5 entries 4267 * allowing it to grow to 8. This 4268 * mak sure we don't call discard 4269 * soon for this list. 4270 */ 4271 break; 4272 } 4273 } 4274 spin_unlock(&lg->lg_prealloc_lock); 4275 4276 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4277 4278 group = ext4_get_group_number(sb, pa->pa_pstart); 4279 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4280 ext4_error(sb, "Error loading buddy information for %u", 4281 group); 4282 continue; 4283 } 4284 ext4_lock_group(sb, group); 4285 list_del(&pa->pa_group_list); 4286 ext4_mb_release_group_pa(&e4b, pa); 4287 ext4_unlock_group(sb, group); 4288 4289 ext4_mb_unload_buddy(&e4b); 4290 list_del(&pa->u.pa_tmp_list); 4291 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4292 } 4293} 4294 4295/* 4296 * We have incremented pa_count. So it cannot be freed at this 4297 * point. Also we hold lg_mutex. So no parallel allocation is 4298 * possible from this lg. That means pa_free cannot be updated. 4299 * 4300 * A parallel ext4_mb_discard_group_preallocations is possible. 4301 * which can cause the lg_prealloc_list to be updated. 4302 */ 4303 4304static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 4305{ 4306 int order, added = 0, lg_prealloc_count = 1; 4307 struct super_block *sb = ac->ac_sb; 4308 struct ext4_locality_group *lg = ac->ac_lg; 4309 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 4310 4311 order = fls(pa->pa_free) - 1; 4312 if (order > PREALLOC_TB_SIZE - 1) 4313 /* The max size of hash table is PREALLOC_TB_SIZE */ 4314 order = PREALLOC_TB_SIZE - 1; 4315 /* Add the prealloc space to lg */ 4316 spin_lock(&lg->lg_prealloc_lock); 4317 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 4318 pa_inode_list) { 4319 spin_lock(&tmp_pa->pa_lock); 4320 if (tmp_pa->pa_deleted) { 4321 spin_unlock(&tmp_pa->pa_lock); 4322 continue; 4323 } 4324 if (!added && pa->pa_free < tmp_pa->pa_free) { 4325 /* Add to the tail of the previous entry */ 4326 list_add_tail_rcu(&pa->pa_inode_list, 4327 &tmp_pa->pa_inode_list); 4328 added = 1; 4329 /* 4330 * we want to count the total 4331 * number of entries in the list 4332 */ 4333 } 4334 spin_unlock(&tmp_pa->pa_lock); 4335 lg_prealloc_count++; 4336 } 4337 if (!added) 4338 list_add_tail_rcu(&pa->pa_inode_list, 4339 &lg->lg_prealloc_list[order]); 4340 spin_unlock(&lg->lg_prealloc_lock); 4341 4342 /* Now trim the list to be not more than 8 elements */ 4343 if (lg_prealloc_count > 8) { 4344 ext4_mb_discard_lg_preallocations(sb, lg, 4345 order, lg_prealloc_count); 4346 return; 4347 } 4348 return ; 4349} 4350 4351/* 4352 * release all resource we used in allocation 4353 */ 4354static int ext4_mb_release_context(struct ext4_allocation_context *ac) 4355{ 4356 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4357 struct ext4_prealloc_space *pa = ac->ac_pa; 4358 if (pa) { 4359 if (pa->pa_type == MB_GROUP_PA) { 4360 /* see comment in ext4_mb_use_group_pa() */ 4361 spin_lock(&pa->pa_lock); 4362 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4363 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4364 pa->pa_free -= ac->ac_b_ex.fe_len; 4365 pa->pa_len -= ac->ac_b_ex.fe_len; 4366 spin_unlock(&pa->pa_lock); 4367 } 4368 } 4369 if (pa) { 4370 /* 4371 * We want to add the pa to the right bucket. 4372 * Remove it from the list and while adding 4373 * make sure the list to which we are adding 4374 * doesn't grow big. 4375 */ 4376 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { 4377 spin_lock(pa->pa_obj_lock); 4378 list_del_rcu(&pa->pa_inode_list); 4379 spin_unlock(pa->pa_obj_lock); 4380 ext4_mb_add_n_trim(ac); 4381 } 4382 ext4_mb_put_pa(ac, ac->ac_sb, pa); 4383 } 4384 if (ac->ac_bitmap_page) 4385 page_cache_release(ac->ac_bitmap_page); 4386 if (ac->ac_buddy_page) 4387 page_cache_release(ac->ac_buddy_page); 4388 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 4389 mutex_unlock(&ac->ac_lg->lg_mutex); 4390 ext4_mb_collect_stats(ac); 4391 return 0; 4392} 4393 4394static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 4395{ 4396 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 4397 int ret; 4398 int freed = 0; 4399 4400 trace_ext4_mb_discard_preallocations(sb, needed); 4401 for (i = 0; i < ngroups && needed > 0; i++) { 4402 ret = ext4_mb_discard_group_preallocations(sb, i, needed); 4403 freed += ret; 4404 needed -= ret; 4405 } 4406 4407 return freed; 4408} 4409 4410/* 4411 * Main entry point into mballoc to allocate blocks 4412 * it tries to use preallocation first, then falls back 4413 * to usual allocation 4414 */ 4415ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 4416 struct ext4_allocation_request *ar, int *errp) 4417{ 4418 int freed; 4419 struct ext4_allocation_context *ac = NULL; 4420 struct ext4_sb_info *sbi; 4421 struct super_block *sb; 4422 ext4_fsblk_t block = 0; 4423 unsigned int inquota = 0; 4424 unsigned int reserv_clstrs = 0; 4425 4426 might_sleep(); 4427 sb = ar->inode->i_sb; 4428 sbi = EXT4_SB(sb); 4429 4430 trace_ext4_request_blocks(ar); 4431 4432 /* Allow to use superuser reservation for quota file */ 4433 if (IS_NOQUOTA(ar->inode)) 4434 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4435 4436 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 4437 /* Without delayed allocation we need to verify 4438 * there is enough free blocks to do block allocation 4439 * and verify allocation doesn't exceed the quota limits. 4440 */ 4441 while (ar->len && 4442 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 4443 4444 /* let others to free the space */ 4445 cond_resched(); 4446 ar->len = ar->len >> 1; 4447 } 4448 if (!ar->len) { 4449 *errp = -ENOSPC; 4450 return 0; 4451 } 4452 reserv_clstrs = ar->len; 4453 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 4454 dquot_alloc_block_nofail(ar->inode, 4455 EXT4_C2B(sbi, ar->len)); 4456 } else { 4457 while (ar->len && 4458 dquot_alloc_block(ar->inode, 4459 EXT4_C2B(sbi, ar->len))) { 4460 4461 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4462 ar->len--; 4463 } 4464 } 4465 inquota = ar->len; 4466 if (ar->len == 0) { 4467 *errp = -EDQUOT; 4468 goto out; 4469 } 4470 } 4471 4472 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 4473 if (!ac) { 4474 ar->len = 0; 4475 *errp = -ENOMEM; 4476 goto out; 4477 } 4478 4479 *errp = ext4_mb_initialize_context(ac, ar); 4480 if (*errp) { 4481 ar->len = 0; 4482 goto out; 4483 } 4484 4485 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 4486 if (!ext4_mb_use_preallocated(ac)) { 4487 ac->ac_op = EXT4_MB_HISTORY_ALLOC; 4488 ext4_mb_normalize_request(ac, ar); 4489repeat: 4490 /* allocate space in core */ 4491 *errp = ext4_mb_regular_allocator(ac); 4492 if (*errp) 4493 goto discard_and_exit; 4494 4495 /* as we've just preallocated more space than 4496 * user requested originally, we store allocated 4497 * space in a special descriptor */ 4498 if (ac->ac_status == AC_STATUS_FOUND && 4499 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 4500 *errp = ext4_mb_new_preallocation(ac); 4501 if (*errp) { 4502 discard_and_exit: 4503 ext4_discard_allocated_blocks(ac); 4504 goto errout; 4505 } 4506 } 4507 if (likely(ac->ac_status == AC_STATUS_FOUND)) { 4508 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 4509 if (*errp == -EAGAIN) { 4510 /* 4511 * drop the reference that we took 4512 * in ext4_mb_use_best_found 4513 */ 4514 ext4_mb_release_context(ac); 4515 ac->ac_b_ex.fe_group = 0; 4516 ac->ac_b_ex.fe_start = 0; 4517 ac->ac_b_ex.fe_len = 0; 4518 ac->ac_status = AC_STATUS_CONTINUE; 4519 goto repeat; 4520 } else if (*errp) { 4521 ext4_discard_allocated_blocks(ac); 4522 goto errout; 4523 } else { 4524 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4525 ar->len = ac->ac_b_ex.fe_len; 4526 } 4527 } else { 4528 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 4529 if (freed) 4530 goto repeat; 4531 *errp = -ENOSPC; 4532 } 4533 4534errout: 4535 if (*errp) { 4536 ac->ac_b_ex.fe_len = 0; 4537 ar->len = 0; 4538 ext4_mb_show_ac(ac); 4539 } 4540 ext4_mb_release_context(ac); 4541out: 4542 if (ac) 4543 kmem_cache_free(ext4_ac_cachep, ac); 4544 if (inquota && ar->len < inquota) 4545 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4546 if (!ar->len) { 4547 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 4548 /* release all the reserved blocks if non delalloc */ 4549 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4550 reserv_clstrs); 4551 } 4552 4553 trace_ext4_allocate_blocks(ar, (unsigned long long)block); 4554 4555 return block; 4556} 4557 4558/* 4559 * We can merge two free data extents only if the physical blocks 4560 * are contiguous, AND the extents were freed by the same transaction, 4561 * AND the blocks are associated with the same group. 4562 */ 4563static int can_merge(struct ext4_free_data *entry1, 4564 struct ext4_free_data *entry2) 4565{ 4566 if ((entry1->efd_tid == entry2->efd_tid) && 4567 (entry1->efd_group == entry2->efd_group) && 4568 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) 4569 return 1; 4570 return 0; 4571} 4572 4573static noinline_for_stack int 4574ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 4575 struct ext4_free_data *new_entry) 4576{ 4577 ext4_group_t group = e4b->bd_group; 4578 ext4_grpblk_t cluster; 4579 struct ext4_free_data *entry; 4580 struct ext4_group_info *db = e4b->bd_info; 4581 struct super_block *sb = e4b->bd_sb; 4582 struct ext4_sb_info *sbi = EXT4_SB(sb); 4583 struct rb_node **n = &db->bb_free_root.rb_node, *node; 4584 struct rb_node *parent = NULL, *new_node; 4585 4586 BUG_ON(!ext4_handle_valid(handle)); 4587 BUG_ON(e4b->bd_bitmap_page == NULL); 4588 BUG_ON(e4b->bd_buddy_page == NULL); 4589 4590 new_node = &new_entry->efd_node; 4591 cluster = new_entry->efd_start_cluster; 4592 4593 if (!*n) { 4594 /* first free block exent. We need to 4595 protect buddy cache from being freed, 4596 * otherwise we'll refresh it from 4597 * on-disk bitmap and lose not-yet-available 4598 * blocks */ 4599 page_cache_get(e4b->bd_buddy_page); 4600 page_cache_get(e4b->bd_bitmap_page); 4601 } 4602 while (*n) { 4603 parent = *n; 4604 entry = rb_entry(parent, struct ext4_free_data, efd_node); 4605 if (cluster < entry->efd_start_cluster) 4606 n = &(*n)->rb_left; 4607 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 4608 n = &(*n)->rb_right; 4609 else { 4610 ext4_grp_locked_error(sb, group, 0, 4611 ext4_group_first_block_no(sb, group) + 4612 EXT4_C2B(sbi, cluster), 4613 "Block already on to-be-freed list"); 4614 return 0; 4615 } 4616 } 4617 4618 rb_link_node(new_node, parent, n); 4619 rb_insert_color(new_node, &db->bb_free_root); 4620 4621 /* Now try to see the extent can be merged to left and right */ 4622 node = rb_prev(new_node); 4623 if (node) { 4624 entry = rb_entry(node, struct ext4_free_data, efd_node); 4625 if (can_merge(entry, new_entry) && 4626 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4627 new_entry->efd_start_cluster = entry->efd_start_cluster; 4628 new_entry->efd_count += entry->efd_count; 4629 rb_erase(node, &(db->bb_free_root)); 4630 kmem_cache_free(ext4_free_data_cachep, entry); 4631 } 4632 } 4633 4634 node = rb_next(new_node); 4635 if (node) { 4636 entry = rb_entry(node, struct ext4_free_data, efd_node); 4637 if (can_merge(new_entry, entry) && 4638 ext4_journal_callback_try_del(handle, &entry->efd_jce)) { 4639 new_entry->efd_count += entry->efd_count; 4640 rb_erase(node, &(db->bb_free_root)); 4641 kmem_cache_free(ext4_free_data_cachep, entry); 4642 } 4643 } 4644 /* Add the extent to transaction's private list */ 4645 ext4_journal_callback_add(handle, ext4_free_data_callback, 4646 &new_entry->efd_jce); 4647 return 0; 4648} 4649 4650/** 4651 * ext4_free_blocks() -- Free given blocks and update quota 4652 * @handle: handle for this transaction 4653 * @inode: inode 4654 * @block: start physical block to free 4655 * @count: number of blocks to count 4656 * @flags: flags used by ext4_free_blocks 4657 */ 4658void ext4_free_blocks(handle_t *handle, struct inode *inode, 4659 struct buffer_head *bh, ext4_fsblk_t block, 4660 unsigned long count, int flags) 4661{ 4662 struct buffer_head *bitmap_bh = NULL; 4663 struct super_block *sb = inode->i_sb; 4664 struct ext4_group_desc *gdp; 4665 unsigned int overflow; 4666 ext4_grpblk_t bit; 4667 struct buffer_head *gd_bh; 4668 ext4_group_t block_group; 4669 struct ext4_sb_info *sbi; 4670 struct ext4_buddy e4b; 4671 unsigned int count_clusters; 4672 int err = 0; 4673 int ret; 4674 4675 might_sleep(); 4676 if (bh) { 4677 if (block) 4678 BUG_ON(block != bh->b_blocknr); 4679 else 4680 block = bh->b_blocknr; 4681 } 4682 4683 sbi = EXT4_SB(sb); 4684 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 4685 !ext4_data_block_valid(sbi, block, count)) { 4686 ext4_error(sb, "Freeing blocks not in datazone - " 4687 "block = %llu, count = %lu", block, count); 4688 goto error_return; 4689 } 4690 4691 ext4_debug("freeing block %llu\n", block); 4692 trace_ext4_free_blocks(inode, block, count, flags); 4693 4694 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 4695 BUG_ON(count > 1); 4696 4697 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4698 inode, bh, block); 4699 } 4700 4701 /* 4702 * We need to make sure we don't reuse the freed block until 4703 * after the transaction is committed, which we can do by 4704 * treating the block as metadata, below. We make an 4705 * exception if the inode is to be written in writeback mode 4706 * since writeback mode has weak data consistency guarantees. 4707 */ 4708 if (!ext4_should_writeback_data(inode)) 4709 flags |= EXT4_FREE_BLOCKS_METADATA; 4710 4711 /* 4712 * If the extent to be freed does not begin on a cluster 4713 * boundary, we need to deal with partial clusters at the 4714 * beginning and end of the extent. Normally we will free 4715 * blocks at the beginning or the end unless we are explicitly 4716 * requested to avoid doing so. 4717 */ 4718 overflow = EXT4_PBLK_COFF(sbi, block); 4719 if (overflow) { 4720 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 4721 overflow = sbi->s_cluster_ratio - overflow; 4722 block += overflow; 4723 if (count > overflow) 4724 count -= overflow; 4725 else 4726 return; 4727 } else { 4728 block -= overflow; 4729 count += overflow; 4730 } 4731 } 4732 overflow = EXT4_LBLK_COFF(sbi, count); 4733 if (overflow) { 4734 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 4735 if (count > overflow) 4736 count -= overflow; 4737 else 4738 return; 4739 } else 4740 count += sbi->s_cluster_ratio - overflow; 4741 } 4742 4743 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 4744 int i; 4745 4746 for (i = 0; i < count; i++) { 4747 cond_resched(); 4748 bh = sb_find_get_block(inode->i_sb, block + i); 4749 if (!bh) 4750 continue; 4751 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4752 inode, bh, block + i); 4753 } 4754 } 4755 4756do_more: 4757 overflow = 0; 4758 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4759 4760 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 4761 ext4_get_group_info(sb, block_group)))) 4762 return; 4763 4764 /* 4765 * Check to see if we are freeing blocks across a group 4766 * boundary. 4767 */ 4768 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4769 overflow = EXT4_C2B(sbi, bit) + count - 4770 EXT4_BLOCKS_PER_GROUP(sb); 4771 count -= overflow; 4772 } 4773 count_clusters = EXT4_NUM_B2C(sbi, count); 4774 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4775 if (IS_ERR(bitmap_bh)) { 4776 err = PTR_ERR(bitmap_bh); 4777 bitmap_bh = NULL; 4778 goto error_return; 4779 } 4780 gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 4781 if (!gdp) { 4782 err = -EIO; 4783 goto error_return; 4784 } 4785 4786 if (in_range(ext4_block_bitmap(sb, gdp), block, count) || 4787 in_range(ext4_inode_bitmap(sb, gdp), block, count) || 4788 in_range(block, ext4_inode_table(sb, gdp), 4789 EXT4_SB(sb)->s_itb_per_group) || 4790 in_range(block + count - 1, ext4_inode_table(sb, gdp), 4791 EXT4_SB(sb)->s_itb_per_group)) { 4792 4793 ext4_error(sb, "Freeing blocks in system zone - " 4794 "Block = %llu, count = %lu", block, count); 4795 /* err = 0. ext4_std_error should be a no op */ 4796 goto error_return; 4797 } 4798 4799 BUFFER_TRACE(bitmap_bh, "getting write access"); 4800 err = ext4_journal_get_write_access(handle, bitmap_bh); 4801 if (err) 4802 goto error_return; 4803 4804 /* 4805 * We are about to modify some metadata. Call the journal APIs 4806 * to unshare ->b_data if a currently-committing transaction is 4807 * using it 4808 */ 4809 BUFFER_TRACE(gd_bh, "get_write_access"); 4810 err = ext4_journal_get_write_access(handle, gd_bh); 4811 if (err) 4812 goto error_return; 4813#ifdef AGGRESSIVE_CHECK 4814 { 4815 int i; 4816 for (i = 0; i < count_clusters; i++) 4817 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 4818 } 4819#endif 4820 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 4821 4822 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4823 if (err) 4824 goto error_return; 4825 4826 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { 4827 struct ext4_free_data *new_entry; 4828 /* 4829 * blocks being freed are metadata. these blocks shouldn't 4830 * be used until this transaction is committed 4831 * 4832 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 4833 * to fail. 4834 */ 4835 new_entry = kmem_cache_alloc(ext4_free_data_cachep, 4836 GFP_NOFS|__GFP_NOFAIL); 4837 new_entry->efd_start_cluster = bit; 4838 new_entry->efd_group = block_group; 4839 new_entry->efd_count = count_clusters; 4840 new_entry->efd_tid = handle->h_transaction->t_tid; 4841 4842 ext4_lock_group(sb, block_group); 4843 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4844 ext4_mb_free_metadata(handle, &e4b, new_entry); 4845 } else { 4846 /* need to update group_info->bb_free and bitmap 4847 * with group lock held. generate_buddy look at 4848 * them with group lock_held 4849 */ 4850 if (test_opt(sb, DISCARD)) { 4851 err = ext4_issue_discard(sb, block_group, bit, count); 4852 if (err && err != -EOPNOTSUPP) 4853 ext4_msg(sb, KERN_WARNING, "discard request in" 4854 " group:%d block:%d count:%lu failed" 4855 " with %d", block_group, bit, count, 4856 err); 4857 } else 4858 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 4859 4860 ext4_lock_group(sb, block_group); 4861 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 4862 mb_free_blocks(inode, &e4b, bit, count_clusters); 4863 } 4864 4865 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4866 ext4_free_group_clusters_set(sb, gdp, ret); 4867 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 4868 ext4_group_desc_csum_set(sb, block_group, gdp); 4869 ext4_unlock_group(sb, block_group); 4870 4871 if (sbi->s_log_groups_per_flex) { 4872 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 4873 atomic64_add(count_clusters, 4874 &sbi->s_flex_groups[flex_group].free_clusters); 4875 } 4876 4877 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 4878 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 4879 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4880 4881 ext4_mb_unload_buddy(&e4b); 4882 4883 /* We dirtied the bitmap block */ 4884 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 4885 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 4886 4887 /* And the group descriptor block */ 4888 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 4889 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 4890 if (!err) 4891 err = ret; 4892 4893 if (overflow && !err) { 4894 block += count; 4895 count = overflow; 4896 put_bh(bitmap_bh); 4897 goto do_more; 4898 } 4899error_return: 4900 brelse(bitmap_bh); 4901 ext4_std_error(sb, err); 4902 return; 4903} 4904 4905/** 4906 * ext4_group_add_blocks() -- Add given blocks to an existing group 4907 * @handle: handle to this transaction 4908 * @sb: super block 4909 * @block: start physical block to add to the block group 4910 * @count: number of blocks to free 4911 * 4912 * This marks the blocks as free in the bitmap and buddy. 4913 */ 4914int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 4915 ext4_fsblk_t block, unsigned long count) 4916{ 4917 struct buffer_head *bitmap_bh = NULL; 4918 struct buffer_head *gd_bh; 4919 ext4_group_t block_group; 4920 ext4_grpblk_t bit; 4921 unsigned int i; 4922 struct ext4_group_desc *desc; 4923 struct ext4_sb_info *sbi = EXT4_SB(sb); 4924 struct ext4_buddy e4b; 4925 int err = 0, ret, blk_free_count; 4926 ext4_grpblk_t blocks_freed; 4927 4928 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 4929 4930 if (count == 0) 4931 return 0; 4932 4933 ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 4934 /* 4935 * Check to see if we are freeing blocks across a group 4936 * boundary. 4937 */ 4938 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { 4939 ext4_warning(sb, "too much blocks added to group %u\n", 4940 block_group); 4941 err = -EINVAL; 4942 goto error_return; 4943 } 4944 4945 bitmap_bh = ext4_read_block_bitmap(sb, block_group); 4946 if (IS_ERR(bitmap_bh)) { 4947 err = PTR_ERR(bitmap_bh); 4948 bitmap_bh = NULL; 4949 goto error_return; 4950 } 4951 4952 desc = ext4_get_group_desc(sb, block_group, &gd_bh); 4953 if (!desc) { 4954 err = -EIO; 4955 goto error_return; 4956 } 4957 4958 if (in_range(ext4_block_bitmap(sb, desc), block, count) || 4959 in_range(ext4_inode_bitmap(sb, desc), block, count) || 4960 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || 4961 in_range(block + count - 1, ext4_inode_table(sb, desc), 4962 sbi->s_itb_per_group)) { 4963 ext4_error(sb, "Adding blocks in system zones - " 4964 "Block = %llu, count = %lu", 4965 block, count); 4966 err = -EINVAL; 4967 goto error_return; 4968 } 4969 4970 BUFFER_TRACE(bitmap_bh, "getting write access"); 4971 err = ext4_journal_get_write_access(handle, bitmap_bh); 4972 if (err) 4973 goto error_return; 4974 4975 /* 4976 * We are about to modify some metadata. Call the journal APIs 4977 * to unshare ->b_data if a currently-committing transaction is 4978 * using it 4979 */ 4980 BUFFER_TRACE(gd_bh, "get_write_access"); 4981 err = ext4_journal_get_write_access(handle, gd_bh); 4982 if (err) 4983 goto error_return; 4984 4985 for (i = 0, blocks_freed = 0; i < count; i++) { 4986 BUFFER_TRACE(bitmap_bh, "clear bit"); 4987 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 4988 ext4_error(sb, "bit already cleared for block %llu", 4989 (ext4_fsblk_t)(block + i)); 4990 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 4991 } else { 4992 blocks_freed++; 4993 } 4994 } 4995 4996 err = ext4_mb_load_buddy(sb, block_group, &e4b); 4997 if (err) 4998 goto error_return; 4999 5000 /* 5001 * need to update group_info->bb_free and bitmap 5002 * with group lock held. generate_buddy look at 5003 * them with group lock_held 5004 */ 5005 ext4_lock_group(sb, block_group); 5006 mb_clear_bits(bitmap_bh->b_data, bit, count); 5007 mb_free_blocks(NULL, &e4b, bit, count); 5008 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 5009 ext4_free_group_clusters_set(sb, desc, blk_free_count); 5010 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 5011 ext4_group_desc_csum_set(sb, block_group, desc); 5012 ext4_unlock_group(sb, block_group); 5013 percpu_counter_add(&sbi->s_freeclusters_counter, 5014 EXT4_NUM_B2C(sbi, blocks_freed)); 5015 5016 if (sbi->s_log_groups_per_flex) { 5017 ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 5018 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), 5019 &sbi->s_flex_groups[flex_group].free_clusters); 5020 } 5021 5022 ext4_mb_unload_buddy(&e4b); 5023 5024 /* We dirtied the bitmap block */ 5025 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 5026 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 5027 5028 /* And the group descriptor block */ 5029 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 5030 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 5031 if (!err) 5032 err = ret; 5033 5034error_return: 5035 brelse(bitmap_bh); 5036 ext4_std_error(sb, err); 5037 return err; 5038} 5039 5040/** 5041 * ext4_trim_extent -- function to TRIM one single free extent in the group 5042 * @sb: super block for the file system 5043 * @start: starting block of the free extent in the alloc. group 5044 * @count: number of blocks to TRIM 5045 * @group: alloc. group we are working with 5046 * @e4b: ext4 buddy for the group 5047 * 5048 * Trim "count" blocks starting at "start" in the "group". To assure that no 5049 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5050 * be called with under the group lock. 5051 */ 5052static int ext4_trim_extent(struct super_block *sb, int start, int count, 5053 ext4_group_t group, struct ext4_buddy *e4b) 5054__releases(bitlock) 5055__acquires(bitlock) 5056{ 5057 struct ext4_free_extent ex; 5058 int ret = 0; 5059 5060 trace_ext4_trim_extent(sb, group, start, count); 5061 5062 assert_spin_locked(ext4_group_lock_ptr(sb, group)); 5063 5064 ex.fe_start = start; 5065 ex.fe_group = group; 5066 ex.fe_len = count; 5067 5068 /* 5069 * Mark blocks used, so no one can reuse them while 5070 * being trimmed. 5071 */ 5072 mb_mark_used(e4b, &ex); 5073 ext4_unlock_group(sb, group); 5074 ret = ext4_issue_discard(sb, group, start, count); 5075 ext4_lock_group(sb, group); 5076 mb_free_blocks(NULL, e4b, start, ex.fe_len); 5077 return ret; 5078} 5079 5080/** 5081 * ext4_trim_all_free -- function to trim all free space in alloc. group 5082 * @sb: super block for file system 5083 * @group: group to be trimmed 5084 * @start: first group block to examine 5085 * @max: last group block to examine 5086 * @minblocks: minimum extent block count 5087 * 5088 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5089 * extents. When the free block is found, ext4_trim_extent is called to TRIM 5090 * the extent. 5091 * 5092 * 5093 * ext4_trim_all_free walks through group's block bitmap searching for free 5094 * extents. When the free extent is found, mark it as used in group buddy 5095 * bitmap. Then issue a TRIM command on this extent and free the extent in 5096 * the group buddy bitmap. This is done until whole group is scanned. 5097 */ 5098static ext4_grpblk_t 5099ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 5100 ext4_grpblk_t start, ext4_grpblk_t max, 5101 ext4_grpblk_t minblocks) 5102{ 5103 void *bitmap; 5104 ext4_grpblk_t next, count = 0, free_count = 0; 5105 struct ext4_buddy e4b; 5106 int ret = 0; 5107 5108 trace_ext4_trim_all_free(sb, group, start, max); 5109 5110 ret = ext4_mb_load_buddy(sb, group, &e4b); 5111 if (ret) { 5112 ext4_error(sb, "Error in loading buddy " 5113 "information for %u", group); 5114 return ret; 5115 } 5116 bitmap = e4b.bd_bitmap; 5117 5118 ext4_lock_group(sb, group); 5119 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && 5120 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) 5121 goto out; 5122 5123 start = (e4b.bd_info->bb_first_free > start) ? 5124 e4b.bd_info->bb_first_free : start; 5125 5126 while (start <= max) { 5127 start = mb_find_next_zero_bit(bitmap, max + 1, start); 5128 if (start > max) 5129 break; 5130 next = mb_find_next_bit(bitmap, max + 1, start); 5131 5132 if ((next - start) >= minblocks) { 5133 ret = ext4_trim_extent(sb, start, 5134 next - start, group, &e4b); 5135 if (ret && ret != -EOPNOTSUPP) 5136 break; 5137 ret = 0; 5138 count += next - start; 5139 } 5140 free_count += next - start; 5141 start = next + 1; 5142 5143 if (fatal_signal_pending(current)) { 5144 count = -ERESTARTSYS; 5145 break; 5146 } 5147 5148 if (need_resched()) { 5149 ext4_unlock_group(sb, group); 5150 cond_resched(); 5151 ext4_lock_group(sb, group); 5152 } 5153 5154 if ((e4b.bd_info->bb_free - free_count) < minblocks) 5155 break; 5156 } 5157 5158 if (!ret) { 5159 ret = count; 5160 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 5161 } 5162out: 5163 ext4_unlock_group(sb, group); 5164 ext4_mb_unload_buddy(&e4b); 5165 5166 ext4_debug("trimmed %d blocks in the group %d\n", 5167 count, group); 5168 5169 return ret; 5170} 5171 5172/** 5173 * ext4_trim_fs() -- trim ioctl handle function 5174 * @sb: superblock for filesystem 5175 * @range: fstrim_range structure 5176 * 5177 * start: First Byte to trim 5178 * len: number of Bytes to trim from start 5179 * minlen: minimum extent length in Bytes 5180 * ext4_trim_fs goes through all allocation groups containing Bytes from 5181 * start to start+len. For each such a group ext4_trim_all_free function 5182 * is invoked to trim all free space. 5183 */ 5184int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 5185{ 5186 struct ext4_group_info *grp; 5187 ext4_group_t group, first_group, last_group; 5188 ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 5189 uint64_t start, end, minlen, trimmed = 0; 5190 ext4_fsblk_t first_data_blk = 5191 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 5192 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 5193 int ret = 0; 5194 5195 start = range->start >> sb->s_blocksize_bits; 5196 end = start + (range->len >> sb->s_blocksize_bits) - 1; 5197 minlen = EXT4_NUM_B2C(EXT4_SB(sb), 5198 range->minlen >> sb->s_blocksize_bits); 5199 5200 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 5201 start >= max_blks || 5202 range->len < sb->s_blocksize) 5203 return -EINVAL; 5204 if (end >= max_blks) 5205 end = max_blks - 1; 5206 if (end <= first_data_blk) 5207 goto out; 5208 if (start < first_data_blk) 5209 start = first_data_blk; 5210 5211 /* Determine first and last group to examine based on start and end */ 5212 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 5213 &first_group, &first_cluster); 5214 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 5215 &last_group, &last_cluster); 5216 5217 /* end now represents the last cluster to discard in this group */ 5218 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 5219 5220 for (group = first_group; group <= last_group; group++) { 5221 grp = ext4_get_group_info(sb, group); 5222 /* We only do this if the grp has never been initialized */ 5223 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 5224 ret = ext4_mb_init_group(sb, group); 5225 if (ret) 5226 break; 5227 } 5228 5229 /* 5230 * For all the groups except the last one, last cluster will 5231 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 5232 * change it for the last group, note that last_cluster is 5233 * already computed earlier by ext4_get_group_no_and_offset() 5234 */ 5235 if (group == last_group) 5236 end = last_cluster; 5237 5238 if (grp->bb_free >= minlen) { 5239 cnt = ext4_trim_all_free(sb, group, first_cluster, 5240 end, minlen); 5241 if (cnt < 0) { 5242 ret = cnt; 5243 break; 5244 } 5245 trimmed += cnt; 5246 } 5247 5248 /* 5249 * For every group except the first one, we are sure 5250 * that the first cluster to discard will be cluster #0. 5251 */ 5252 first_cluster = 0; 5253 } 5254 5255 if (!ret) 5256 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); 5257 5258out: 5259 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 5260 return ret; 5261} 5262