This source file includes following definitions.
- f2fs_check_nid_range
- f2fs_available_free_memory
- clear_node_page_dirty
- get_current_nat_page
- get_next_nat_page
- __alloc_nat_entry
- __free_nat_entry
- __init_nat_entry
- __lookup_nat_cache
- __gang_lookup_nat_cache
- __del_from_nat_cache
- __grab_nat_entry_set
- __set_nat_cache_dirty
- __clear_nat_cache_dirty
- __gang_lookup_nat_set
- f2fs_in_warm_node_list
- f2fs_init_fsync_node_info
- f2fs_add_fsync_node_entry
- f2fs_del_fsync_node_entry
- f2fs_reset_fsync_node_info
- f2fs_need_dentry_mark
- f2fs_is_checkpointed_node
- f2fs_need_inode_block_update
- cache_nat_entry
- set_node_addr
- f2fs_try_to_free_nats
- f2fs_get_node_info
- f2fs_ra_node_pages
- f2fs_get_next_page_offset
- get_node_path
- f2fs_get_dnode_of_data
- truncate_node
- truncate_dnode
- truncate_nodes
- truncate_partial_nodes
- f2fs_truncate_inode_blocks
- f2fs_truncate_xattr_node
- f2fs_remove_inode_page
- f2fs_new_inode_page
- f2fs_new_node_page
- read_node_page
- f2fs_ra_node_page
- __get_node_page
- f2fs_get_node_page
- f2fs_get_node_page_ra
- flush_inline_data
- last_fsync_dnode
- __write_node_page
- f2fs_move_node_page
- f2fs_write_node_page
- f2fs_fsync_node_pages
- f2fs_match_ino
- flush_dirty_inode
- f2fs_sync_node_pages
- f2fs_wait_on_node_pages_writeback
- f2fs_write_node_pages
- f2fs_set_node_page_dirty
- __lookup_free_nid_list
- __insert_free_nid
- __remove_free_nid
- __move_free_nid
- update_free_nid_bitmap
- add_free_nid
- remove_free_nid
- scan_nat_page
- scan_curseg_cache
- scan_free_nid_bits
- __f2fs_build_free_nids
- f2fs_build_free_nids
- f2fs_alloc_nid
- f2fs_alloc_nid_done
- f2fs_alloc_nid_failed
- f2fs_try_to_free_nids
- f2fs_recover_inline_xattr
- f2fs_recover_xattr_data
- f2fs_recover_inode_page
- f2fs_restore_node_summary
- remove_nats_in_journal
- __adjust_nat_entry_set
- __update_nat_bits
- __flush_nat_entry_set
- f2fs_flush_nat_entries
- __get_nat_bitmaps
- load_free_nid_bitmap
- init_node_manager
- init_free_nid_cache
- f2fs_build_node_manager
- f2fs_destroy_node_manager
- f2fs_create_node_manager_caches
- f2fs_destroy_node_manager_caches
1
2
3
4
5
6
7
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "trace.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
30
31
32
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 set_sbi_flag(sbi, SBI_NEED_FSCK);
37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38 __func__, nid);
39 return -EFSCORRUPTED;
40 }
41 return 0;
42 }
43
44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
45 {
46 struct f2fs_nm_info *nm_i = NM_I(sbi);
47 struct sysinfo val;
48 unsigned long avail_ram;
49 unsigned long mem_size = 0;
50 bool res = false;
51
52 si_meminfo(&val);
53
54
55 avail_ram = val.totalram - val.totalhigh;
56
57
58
59
60 if (type == FREE_NIDS) {
61 mem_size = (nm_i->nid_cnt[FREE_NID] *
62 sizeof(struct free_nid)) >> PAGE_SHIFT;
63 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
64 } else if (type == NAT_ENTRIES) {
65 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
66 PAGE_SHIFT;
67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
68 if (excess_cached_nats(sbi))
69 res = false;
70 } else if (type == DIRTY_DENTS) {
71 if (sbi->sb->s_bdi->wb.dirty_exceeded)
72 return false;
73 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
74 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
75 } else if (type == INO_ENTRIES) {
76 int i;
77
78 for (i = 0; i < MAX_INO_ENTRY; i++)
79 mem_size += sbi->im[i].ino_num *
80 sizeof(struct ino_entry);
81 mem_size >>= PAGE_SHIFT;
82 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
83 } else if (type == EXTENT_CACHE) {
84 mem_size = (atomic_read(&sbi->total_ext_tree) *
85 sizeof(struct extent_tree) +
86 atomic_read(&sbi->total_ext_node) *
87 sizeof(struct extent_node)) >> PAGE_SHIFT;
88 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
89 } else if (type == INMEM_PAGES) {
90
91 mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
92 res = mem_size < (val.totalram / 5);
93 } else {
94 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
95 return true;
96 }
97 return res;
98 }
99
100 static void clear_node_page_dirty(struct page *page)
101 {
102 if (PageDirty(page)) {
103 f2fs_clear_page_cache_dirty_tag(page);
104 clear_page_dirty_for_io(page);
105 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
106 }
107 ClearPageUptodate(page);
108 }
109
110 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
111 {
112 return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
113 }
114
115 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
116 {
117 struct page *src_page;
118 struct page *dst_page;
119 pgoff_t dst_off;
120 void *src_addr;
121 void *dst_addr;
122 struct f2fs_nm_info *nm_i = NM_I(sbi);
123
124 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
125
126
127 src_page = get_current_nat_page(sbi, nid);
128 if (IS_ERR(src_page))
129 return src_page;
130 dst_page = f2fs_grab_meta_page(sbi, dst_off);
131 f2fs_bug_on(sbi, PageDirty(src_page));
132
133 src_addr = page_address(src_page);
134 dst_addr = page_address(dst_page);
135 memcpy(dst_addr, src_addr, PAGE_SIZE);
136 set_page_dirty(dst_page);
137 f2fs_put_page(src_page, 1);
138
139 set_to_next_nat(nm_i, nid);
140
141 return dst_page;
142 }
143
144 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
145 {
146 struct nat_entry *new;
147
148 if (no_fail)
149 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
150 else
151 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
152 if (new) {
153 nat_set_nid(new, nid);
154 nat_reset_flag(new);
155 }
156 return new;
157 }
158
159 static void __free_nat_entry(struct nat_entry *e)
160 {
161 kmem_cache_free(nat_entry_slab, e);
162 }
163
164
165 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
166 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
167 {
168 if (no_fail)
169 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
170 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
171 return NULL;
172
173 if (raw_ne)
174 node_info_from_raw_nat(&ne->ni, raw_ne);
175
176 spin_lock(&nm_i->nat_list_lock);
177 list_add_tail(&ne->list, &nm_i->nat_entries);
178 spin_unlock(&nm_i->nat_list_lock);
179
180 nm_i->nat_cnt++;
181 return ne;
182 }
183
184 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
185 {
186 struct nat_entry *ne;
187
188 ne = radix_tree_lookup(&nm_i->nat_root, n);
189
190
191 if (ne && !get_nat_flag(ne, IS_DIRTY)) {
192 spin_lock(&nm_i->nat_list_lock);
193 if (!list_empty(&ne->list))
194 list_move_tail(&ne->list, &nm_i->nat_entries);
195 spin_unlock(&nm_i->nat_list_lock);
196 }
197
198 return ne;
199 }
200
201 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
202 nid_t start, unsigned int nr, struct nat_entry **ep)
203 {
204 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
205 }
206
207 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
208 {
209 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
210 nm_i->nat_cnt--;
211 __free_nat_entry(e);
212 }
213
214 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
215 struct nat_entry *ne)
216 {
217 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
218 struct nat_entry_set *head;
219
220 head = radix_tree_lookup(&nm_i->nat_set_root, set);
221 if (!head) {
222 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
223
224 INIT_LIST_HEAD(&head->entry_list);
225 INIT_LIST_HEAD(&head->set_list);
226 head->set = set;
227 head->entry_cnt = 0;
228 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
229 }
230 return head;
231 }
232
233 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
234 struct nat_entry *ne)
235 {
236 struct nat_entry_set *head;
237 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
238
239 if (!new_ne)
240 head = __grab_nat_entry_set(nm_i, ne);
241
242
243
244
245
246
247 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
248 !get_nat_flag(ne, IS_DIRTY)))
249 head->entry_cnt++;
250
251 set_nat_flag(ne, IS_PREALLOC, new_ne);
252
253 if (get_nat_flag(ne, IS_DIRTY))
254 goto refresh_list;
255
256 nm_i->dirty_nat_cnt++;
257 set_nat_flag(ne, IS_DIRTY, true);
258 refresh_list:
259 spin_lock(&nm_i->nat_list_lock);
260 if (new_ne)
261 list_del_init(&ne->list);
262 else
263 list_move_tail(&ne->list, &head->entry_list);
264 spin_unlock(&nm_i->nat_list_lock);
265 }
266
267 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
268 struct nat_entry_set *set, struct nat_entry *ne)
269 {
270 spin_lock(&nm_i->nat_list_lock);
271 list_move_tail(&ne->list, &nm_i->nat_entries);
272 spin_unlock(&nm_i->nat_list_lock);
273
274 set_nat_flag(ne, IS_DIRTY, false);
275 set->entry_cnt--;
276 nm_i->dirty_nat_cnt--;
277 }
278
279 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
280 nid_t start, unsigned int nr, struct nat_entry_set **ep)
281 {
282 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
283 start, nr);
284 }
285
286 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
287 {
288 return NODE_MAPPING(sbi) == page->mapping &&
289 IS_DNODE(page) && is_cold_node(page);
290 }
291
292 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
293 {
294 spin_lock_init(&sbi->fsync_node_lock);
295 INIT_LIST_HEAD(&sbi->fsync_node_list);
296 sbi->fsync_seg_id = 0;
297 sbi->fsync_node_num = 0;
298 }
299
300 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
301 struct page *page)
302 {
303 struct fsync_node_entry *fn;
304 unsigned long flags;
305 unsigned int seq_id;
306
307 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
308
309 get_page(page);
310 fn->page = page;
311 INIT_LIST_HEAD(&fn->list);
312
313 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
314 list_add_tail(&fn->list, &sbi->fsync_node_list);
315 fn->seq_id = sbi->fsync_seg_id++;
316 seq_id = fn->seq_id;
317 sbi->fsync_node_num++;
318 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
319
320 return seq_id;
321 }
322
323 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
324 {
325 struct fsync_node_entry *fn;
326 unsigned long flags;
327
328 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
329 list_for_each_entry(fn, &sbi->fsync_node_list, list) {
330 if (fn->page == page) {
331 list_del(&fn->list);
332 sbi->fsync_node_num--;
333 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
334 kmem_cache_free(fsync_node_entry_slab, fn);
335 put_page(page);
336 return;
337 }
338 }
339 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
340 f2fs_bug_on(sbi, 1);
341 }
342
343 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
344 {
345 unsigned long flags;
346
347 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
348 sbi->fsync_seg_id = 0;
349 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
350 }
351
352 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
353 {
354 struct f2fs_nm_info *nm_i = NM_I(sbi);
355 struct nat_entry *e;
356 bool need = false;
357
358 down_read(&nm_i->nat_tree_lock);
359 e = __lookup_nat_cache(nm_i, nid);
360 if (e) {
361 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
362 !get_nat_flag(e, HAS_FSYNCED_INODE))
363 need = true;
364 }
365 up_read(&nm_i->nat_tree_lock);
366 return need;
367 }
368
369 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
370 {
371 struct f2fs_nm_info *nm_i = NM_I(sbi);
372 struct nat_entry *e;
373 bool is_cp = true;
374
375 down_read(&nm_i->nat_tree_lock);
376 e = __lookup_nat_cache(nm_i, nid);
377 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
378 is_cp = false;
379 up_read(&nm_i->nat_tree_lock);
380 return is_cp;
381 }
382
383 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
384 {
385 struct f2fs_nm_info *nm_i = NM_I(sbi);
386 struct nat_entry *e;
387 bool need_update = true;
388
389 down_read(&nm_i->nat_tree_lock);
390 e = __lookup_nat_cache(nm_i, ino);
391 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
392 (get_nat_flag(e, IS_CHECKPOINTED) ||
393 get_nat_flag(e, HAS_FSYNCED_INODE)))
394 need_update = false;
395 up_read(&nm_i->nat_tree_lock);
396 return need_update;
397 }
398
399
400 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
401 struct f2fs_nat_entry *ne)
402 {
403 struct f2fs_nm_info *nm_i = NM_I(sbi);
404 struct nat_entry *new, *e;
405
406 new = __alloc_nat_entry(nid, false);
407 if (!new)
408 return;
409
410 down_write(&nm_i->nat_tree_lock);
411 e = __lookup_nat_cache(nm_i, nid);
412 if (!e)
413 e = __init_nat_entry(nm_i, new, ne, false);
414 else
415 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
416 nat_get_blkaddr(e) !=
417 le32_to_cpu(ne->block_addr) ||
418 nat_get_version(e) != ne->version);
419 up_write(&nm_i->nat_tree_lock);
420 if (e != new)
421 __free_nat_entry(new);
422 }
423
424 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
425 block_t new_blkaddr, bool fsync_done)
426 {
427 struct f2fs_nm_info *nm_i = NM_I(sbi);
428 struct nat_entry *e;
429 struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
430
431 down_write(&nm_i->nat_tree_lock);
432 e = __lookup_nat_cache(nm_i, ni->nid);
433 if (!e) {
434 e = __init_nat_entry(nm_i, new, NULL, true);
435 copy_node_info(&e->ni, ni);
436 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
437 } else if (new_blkaddr == NEW_ADDR) {
438
439
440
441
442
443 copy_node_info(&e->ni, ni);
444 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
445 }
446
447 if (e != new)
448 __free_nat_entry(new);
449
450
451 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
452 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
453 new_blkaddr == NULL_ADDR);
454 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
455 new_blkaddr == NEW_ADDR);
456 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
457 new_blkaddr == NEW_ADDR);
458
459
460 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
461 unsigned char version = nat_get_version(e);
462 nat_set_version(e, inc_node_version(version));
463 }
464
465
466 nat_set_blkaddr(e, new_blkaddr);
467 if (!__is_valid_data_blkaddr(new_blkaddr))
468 set_nat_flag(e, IS_CHECKPOINTED, false);
469 __set_nat_cache_dirty(nm_i, e);
470
471
472 if (ni->nid != ni->ino)
473 e = __lookup_nat_cache(nm_i, ni->ino);
474 if (e) {
475 if (fsync_done && ni->nid == ni->ino)
476 set_nat_flag(e, HAS_FSYNCED_INODE, true);
477 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
478 }
479 up_write(&nm_i->nat_tree_lock);
480 }
481
482 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
483 {
484 struct f2fs_nm_info *nm_i = NM_I(sbi);
485 int nr = nr_shrink;
486
487 if (!down_write_trylock(&nm_i->nat_tree_lock))
488 return 0;
489
490 spin_lock(&nm_i->nat_list_lock);
491 while (nr_shrink) {
492 struct nat_entry *ne;
493
494 if (list_empty(&nm_i->nat_entries))
495 break;
496
497 ne = list_first_entry(&nm_i->nat_entries,
498 struct nat_entry, list);
499 list_del(&ne->list);
500 spin_unlock(&nm_i->nat_list_lock);
501
502 __del_from_nat_cache(nm_i, ne);
503 nr_shrink--;
504
505 spin_lock(&nm_i->nat_list_lock);
506 }
507 spin_unlock(&nm_i->nat_list_lock);
508
509 up_write(&nm_i->nat_tree_lock);
510 return nr - nr_shrink;
511 }
512
513
514
515
516 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
517 struct node_info *ni)
518 {
519 struct f2fs_nm_info *nm_i = NM_I(sbi);
520 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
521 struct f2fs_journal *journal = curseg->journal;
522 nid_t start_nid = START_NID(nid);
523 struct f2fs_nat_block *nat_blk;
524 struct page *page = NULL;
525 struct f2fs_nat_entry ne;
526 struct nat_entry *e;
527 pgoff_t index;
528 block_t blkaddr;
529 int i;
530
531 ni->nid = nid;
532
533
534 down_read(&nm_i->nat_tree_lock);
535 e = __lookup_nat_cache(nm_i, nid);
536 if (e) {
537 ni->ino = nat_get_ino(e);
538 ni->blk_addr = nat_get_blkaddr(e);
539 ni->version = nat_get_version(e);
540 up_read(&nm_i->nat_tree_lock);
541 return 0;
542 }
543
544 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
545
546
547 down_read(&curseg->journal_rwsem);
548 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
549 if (i >= 0) {
550 ne = nat_in_journal(journal, i);
551 node_info_from_raw_nat(ni, &ne);
552 }
553 up_read(&curseg->journal_rwsem);
554 if (i >= 0) {
555 up_read(&nm_i->nat_tree_lock);
556 goto cache;
557 }
558
559
560 index = current_nat_addr(sbi, nid);
561 up_read(&nm_i->nat_tree_lock);
562
563 page = f2fs_get_meta_page(sbi, index);
564 if (IS_ERR(page))
565 return PTR_ERR(page);
566
567 nat_blk = (struct f2fs_nat_block *)page_address(page);
568 ne = nat_blk->entries[nid - start_nid];
569 node_info_from_raw_nat(ni, &ne);
570 f2fs_put_page(page, 1);
571 cache:
572 blkaddr = le32_to_cpu(ne.block_addr);
573 if (__is_valid_data_blkaddr(blkaddr) &&
574 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
575 return -EFAULT;
576
577
578 cache_nat_entry(sbi, nid, &ne);
579 return 0;
580 }
581
582
583
584
585 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
586 {
587 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
588 struct blk_plug plug;
589 int i, end;
590 nid_t nid;
591
592 blk_start_plug(&plug);
593
594
595 end = start + n;
596 end = min(end, NIDS_PER_BLOCK);
597 for (i = start; i < end; i++) {
598 nid = get_nid(parent, i, false);
599 f2fs_ra_node_page(sbi, nid);
600 }
601
602 blk_finish_plug(&plug);
603 }
604
605 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
606 {
607 const long direct_index = ADDRS_PER_INODE(dn->inode);
608 const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
609 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
610 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
611 int cur_level = dn->cur_level;
612 int max_level = dn->max_level;
613 pgoff_t base = 0;
614
615 if (!dn->max_level)
616 return pgofs + 1;
617
618 while (max_level-- > cur_level)
619 skipped_unit *= NIDS_PER_BLOCK;
620
621 switch (dn->max_level) {
622 case 3:
623 base += 2 * indirect_blks;
624
625 case 2:
626 base += 2 * direct_blks;
627
628 case 1:
629 base += direct_index;
630 break;
631 default:
632 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
633 }
634
635 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
636 }
637
638
639
640
641
642 static int get_node_path(struct inode *inode, long block,
643 int offset[4], unsigned int noffset[4])
644 {
645 const long direct_index = ADDRS_PER_INODE(inode);
646 const long direct_blks = ADDRS_PER_BLOCK(inode);
647 const long dptrs_per_blk = NIDS_PER_BLOCK;
648 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
649 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
650 int n = 0;
651 int level = 0;
652
653 noffset[0] = 0;
654
655 if (block < direct_index) {
656 offset[n] = block;
657 goto got;
658 }
659 block -= direct_index;
660 if (block < direct_blks) {
661 offset[n++] = NODE_DIR1_BLOCK;
662 noffset[n] = 1;
663 offset[n] = block;
664 level = 1;
665 goto got;
666 }
667 block -= direct_blks;
668 if (block < direct_blks) {
669 offset[n++] = NODE_DIR2_BLOCK;
670 noffset[n] = 2;
671 offset[n] = block;
672 level = 1;
673 goto got;
674 }
675 block -= direct_blks;
676 if (block < indirect_blks) {
677 offset[n++] = NODE_IND1_BLOCK;
678 noffset[n] = 3;
679 offset[n++] = block / direct_blks;
680 noffset[n] = 4 + offset[n - 1];
681 offset[n] = block % direct_blks;
682 level = 2;
683 goto got;
684 }
685 block -= indirect_blks;
686 if (block < indirect_blks) {
687 offset[n++] = NODE_IND2_BLOCK;
688 noffset[n] = 4 + dptrs_per_blk;
689 offset[n++] = block / direct_blks;
690 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
691 offset[n] = block % direct_blks;
692 level = 2;
693 goto got;
694 }
695 block -= indirect_blks;
696 if (block < dindirect_blks) {
697 offset[n++] = NODE_DIND_BLOCK;
698 noffset[n] = 5 + (dptrs_per_blk * 2);
699 offset[n++] = block / indirect_blks;
700 noffset[n] = 6 + (dptrs_per_blk * 2) +
701 offset[n - 1] * (dptrs_per_blk + 1);
702 offset[n++] = (block / direct_blks) % dptrs_per_blk;
703 noffset[n] = 7 + (dptrs_per_blk * 2) +
704 offset[n - 2] * (dptrs_per_blk + 1) +
705 offset[n - 1];
706 offset[n] = block % direct_blks;
707 level = 3;
708 goto got;
709 } else {
710 return -E2BIG;
711 }
712 got:
713 return level;
714 }
715
716
717
718
719
720
721
722 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
723 {
724 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
725 struct page *npage[4];
726 struct page *parent = NULL;
727 int offset[4];
728 unsigned int noffset[4];
729 nid_t nids[4];
730 int level, i = 0;
731 int err = 0;
732
733 level = get_node_path(dn->inode, index, offset, noffset);
734 if (level < 0)
735 return level;
736
737 nids[0] = dn->inode->i_ino;
738 npage[0] = dn->inode_page;
739
740 if (!npage[0]) {
741 npage[0] = f2fs_get_node_page(sbi, nids[0]);
742 if (IS_ERR(npage[0]))
743 return PTR_ERR(npage[0]);
744 }
745
746
747 if (f2fs_has_inline_data(dn->inode) && index) {
748 err = -ENOENT;
749 f2fs_put_page(npage[0], 1);
750 goto release_out;
751 }
752
753 parent = npage[0];
754 if (level != 0)
755 nids[1] = get_nid(parent, offset[0], true);
756 dn->inode_page = npage[0];
757 dn->inode_page_locked = true;
758
759
760 for (i = 1; i <= level; i++) {
761 bool done = false;
762
763 if (!nids[i] && mode == ALLOC_NODE) {
764
765 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
766 err = -ENOSPC;
767 goto release_pages;
768 }
769
770 dn->nid = nids[i];
771 npage[i] = f2fs_new_node_page(dn, noffset[i]);
772 if (IS_ERR(npage[i])) {
773 f2fs_alloc_nid_failed(sbi, nids[i]);
774 err = PTR_ERR(npage[i]);
775 goto release_pages;
776 }
777
778 set_nid(parent, offset[i - 1], nids[i], i == 1);
779 f2fs_alloc_nid_done(sbi, nids[i]);
780 done = true;
781 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
782 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
783 if (IS_ERR(npage[i])) {
784 err = PTR_ERR(npage[i]);
785 goto release_pages;
786 }
787 done = true;
788 }
789 if (i == 1) {
790 dn->inode_page_locked = false;
791 unlock_page(parent);
792 } else {
793 f2fs_put_page(parent, 1);
794 }
795
796 if (!done) {
797 npage[i] = f2fs_get_node_page(sbi, nids[i]);
798 if (IS_ERR(npage[i])) {
799 err = PTR_ERR(npage[i]);
800 f2fs_put_page(npage[0], 0);
801 goto release_out;
802 }
803 }
804 if (i < level) {
805 parent = npage[i];
806 nids[i + 1] = get_nid(parent, offset[i], false);
807 }
808 }
809 dn->nid = nids[level];
810 dn->ofs_in_node = offset[level];
811 dn->node_page = npage[level];
812 dn->data_blkaddr = datablock_addr(dn->inode,
813 dn->node_page, dn->ofs_in_node);
814 return 0;
815
816 release_pages:
817 f2fs_put_page(parent, 1);
818 if (i > 1)
819 f2fs_put_page(npage[0], 0);
820 release_out:
821 dn->inode_page = NULL;
822 dn->node_page = NULL;
823 if (err == -ENOENT) {
824 dn->cur_level = i;
825 dn->max_level = level;
826 dn->ofs_in_node = offset[level];
827 }
828 return err;
829 }
830
831 static int truncate_node(struct dnode_of_data *dn)
832 {
833 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
834 struct node_info ni;
835 int err;
836 pgoff_t index;
837
838 err = f2fs_get_node_info(sbi, dn->nid, &ni);
839 if (err)
840 return err;
841
842
843 f2fs_invalidate_blocks(sbi, ni.blk_addr);
844 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
845 set_node_addr(sbi, &ni, NULL_ADDR, false);
846
847 if (dn->nid == dn->inode->i_ino) {
848 f2fs_remove_orphan_inode(sbi, dn->nid);
849 dec_valid_inode_count(sbi);
850 f2fs_inode_synced(dn->inode);
851 }
852
853 clear_node_page_dirty(dn->node_page);
854 set_sbi_flag(sbi, SBI_IS_DIRTY);
855
856 index = dn->node_page->index;
857 f2fs_put_page(dn->node_page, 1);
858
859 invalidate_mapping_pages(NODE_MAPPING(sbi),
860 index, index);
861
862 dn->node_page = NULL;
863 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
864
865 return 0;
866 }
867
868 static int truncate_dnode(struct dnode_of_data *dn)
869 {
870 struct page *page;
871 int err;
872
873 if (dn->nid == 0)
874 return 1;
875
876
877 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
878 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
879 return 1;
880 else if (IS_ERR(page))
881 return PTR_ERR(page);
882
883
884 dn->node_page = page;
885 dn->ofs_in_node = 0;
886 f2fs_truncate_data_blocks(dn);
887 err = truncate_node(dn);
888 if (err)
889 return err;
890
891 return 1;
892 }
893
894 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
895 int ofs, int depth)
896 {
897 struct dnode_of_data rdn = *dn;
898 struct page *page;
899 struct f2fs_node *rn;
900 nid_t child_nid;
901 unsigned int child_nofs;
902 int freed = 0;
903 int i, ret;
904
905 if (dn->nid == 0)
906 return NIDS_PER_BLOCK + 1;
907
908 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
909
910 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
911 if (IS_ERR(page)) {
912 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
913 return PTR_ERR(page);
914 }
915
916 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
917
918 rn = F2FS_NODE(page);
919 if (depth < 3) {
920 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
921 child_nid = le32_to_cpu(rn->in.nid[i]);
922 if (child_nid == 0)
923 continue;
924 rdn.nid = child_nid;
925 ret = truncate_dnode(&rdn);
926 if (ret < 0)
927 goto out_err;
928 if (set_nid(page, i, 0, false))
929 dn->node_changed = true;
930 }
931 } else {
932 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
933 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
934 child_nid = le32_to_cpu(rn->in.nid[i]);
935 if (child_nid == 0) {
936 child_nofs += NIDS_PER_BLOCK + 1;
937 continue;
938 }
939 rdn.nid = child_nid;
940 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
941 if (ret == (NIDS_PER_BLOCK + 1)) {
942 if (set_nid(page, i, 0, false))
943 dn->node_changed = true;
944 child_nofs += ret;
945 } else if (ret < 0 && ret != -ENOENT) {
946 goto out_err;
947 }
948 }
949 freed = child_nofs;
950 }
951
952 if (!ofs) {
953
954 dn->node_page = page;
955 ret = truncate_node(dn);
956 if (ret)
957 goto out_err;
958 freed++;
959 } else {
960 f2fs_put_page(page, 1);
961 }
962 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
963 return freed;
964
965 out_err:
966 f2fs_put_page(page, 1);
967 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
968 return ret;
969 }
970
971 static int truncate_partial_nodes(struct dnode_of_data *dn,
972 struct f2fs_inode *ri, int *offset, int depth)
973 {
974 struct page *pages[2];
975 nid_t nid[3];
976 nid_t child_nid;
977 int err = 0;
978 int i;
979 int idx = depth - 2;
980
981 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
982 if (!nid[0])
983 return 0;
984
985
986 for (i = 0; i < idx + 1; i++) {
987
988 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
989 if (IS_ERR(pages[i])) {
990 err = PTR_ERR(pages[i]);
991 idx = i - 1;
992 goto fail;
993 }
994 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
995 }
996
997 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
998
999
1000 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1001 child_nid = get_nid(pages[idx], i, false);
1002 if (!child_nid)
1003 continue;
1004 dn->nid = child_nid;
1005 err = truncate_dnode(dn);
1006 if (err < 0)
1007 goto fail;
1008 if (set_nid(pages[idx], i, 0, false))
1009 dn->node_changed = true;
1010 }
1011
1012 if (offset[idx + 1] == 0) {
1013 dn->node_page = pages[idx];
1014 dn->nid = nid[idx];
1015 err = truncate_node(dn);
1016 if (err)
1017 goto fail;
1018 } else {
1019 f2fs_put_page(pages[idx], 1);
1020 }
1021 offset[idx]++;
1022 offset[idx + 1] = 0;
1023 idx--;
1024 fail:
1025 for (i = idx; i >= 0; i--)
1026 f2fs_put_page(pages[i], 1);
1027
1028 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1029
1030 return err;
1031 }
1032
1033
1034
1035
1036 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1037 {
1038 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1039 int err = 0, cont = 1;
1040 int level, offset[4], noffset[4];
1041 unsigned int nofs = 0;
1042 struct f2fs_inode *ri;
1043 struct dnode_of_data dn;
1044 struct page *page;
1045
1046 trace_f2fs_truncate_inode_blocks_enter(inode, from);
1047
1048 level = get_node_path(inode, from, offset, noffset);
1049 if (level < 0)
1050 return level;
1051
1052 page = f2fs_get_node_page(sbi, inode->i_ino);
1053 if (IS_ERR(page)) {
1054 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1055 return PTR_ERR(page);
1056 }
1057
1058 set_new_dnode(&dn, inode, page, NULL, 0);
1059 unlock_page(page);
1060
1061 ri = F2FS_INODE(page);
1062 switch (level) {
1063 case 0:
1064 case 1:
1065 nofs = noffset[1];
1066 break;
1067 case 2:
1068 nofs = noffset[1];
1069 if (!offset[level - 1])
1070 goto skip_partial;
1071 err = truncate_partial_nodes(&dn, ri, offset, level);
1072 if (err < 0 && err != -ENOENT)
1073 goto fail;
1074 nofs += 1 + NIDS_PER_BLOCK;
1075 break;
1076 case 3:
1077 nofs = 5 + 2 * NIDS_PER_BLOCK;
1078 if (!offset[level - 1])
1079 goto skip_partial;
1080 err = truncate_partial_nodes(&dn, ri, offset, level);
1081 if (err < 0 && err != -ENOENT)
1082 goto fail;
1083 break;
1084 default:
1085 BUG();
1086 }
1087
1088 skip_partial:
1089 while (cont) {
1090 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1091 switch (offset[0]) {
1092 case NODE_DIR1_BLOCK:
1093 case NODE_DIR2_BLOCK:
1094 err = truncate_dnode(&dn);
1095 break;
1096
1097 case NODE_IND1_BLOCK:
1098 case NODE_IND2_BLOCK:
1099 err = truncate_nodes(&dn, nofs, offset[1], 2);
1100 break;
1101
1102 case NODE_DIND_BLOCK:
1103 err = truncate_nodes(&dn, nofs, offset[1], 3);
1104 cont = 0;
1105 break;
1106
1107 default:
1108 BUG();
1109 }
1110 if (err < 0 && err != -ENOENT)
1111 goto fail;
1112 if (offset[1] == 0 &&
1113 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1114 lock_page(page);
1115 BUG_ON(page->mapping != NODE_MAPPING(sbi));
1116 f2fs_wait_on_page_writeback(page, NODE, true, true);
1117 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1118 set_page_dirty(page);
1119 unlock_page(page);
1120 }
1121 offset[1] = 0;
1122 offset[0]++;
1123 nofs += err;
1124 }
1125 fail:
1126 f2fs_put_page(page, 0);
1127 trace_f2fs_truncate_inode_blocks_exit(inode, err);
1128 return err > 0 ? 0 : err;
1129 }
1130
1131
1132 int f2fs_truncate_xattr_node(struct inode *inode)
1133 {
1134 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1135 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1136 struct dnode_of_data dn;
1137 struct page *npage;
1138 int err;
1139
1140 if (!nid)
1141 return 0;
1142
1143 npage = f2fs_get_node_page(sbi, nid);
1144 if (IS_ERR(npage))
1145 return PTR_ERR(npage);
1146
1147 set_new_dnode(&dn, inode, NULL, npage, nid);
1148 err = truncate_node(&dn);
1149 if (err) {
1150 f2fs_put_page(npage, 1);
1151 return err;
1152 }
1153
1154 f2fs_i_xnid_write(inode, 0);
1155
1156 return 0;
1157 }
1158
1159
1160
1161
1162
1163 int f2fs_remove_inode_page(struct inode *inode)
1164 {
1165 struct dnode_of_data dn;
1166 int err;
1167
1168 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1169 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1170 if (err)
1171 return err;
1172
1173 err = f2fs_truncate_xattr_node(inode);
1174 if (err) {
1175 f2fs_put_dnode(&dn);
1176 return err;
1177 }
1178
1179
1180 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1181 S_ISLNK(inode->i_mode))
1182 f2fs_truncate_data_blocks_range(&dn, 1);
1183
1184
1185 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1186 f2fs_put_dnode(&dn);
1187 return -EIO;
1188 }
1189
1190 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1191 f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
1192 inode->i_ino, (unsigned long long)inode->i_blocks);
1193 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1194 }
1195
1196
1197 err = truncate_node(&dn);
1198 if (err) {
1199 f2fs_put_dnode(&dn);
1200 return err;
1201 }
1202 return 0;
1203 }
1204
1205 struct page *f2fs_new_inode_page(struct inode *inode)
1206 {
1207 struct dnode_of_data dn;
1208
1209
1210 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1211
1212
1213 return f2fs_new_node_page(&dn, 0);
1214 }
1215
1216 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1217 {
1218 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1219 struct node_info new_ni;
1220 struct page *page;
1221 int err;
1222
1223 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1224 return ERR_PTR(-EPERM);
1225
1226 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1227 if (!page)
1228 return ERR_PTR(-ENOMEM);
1229
1230 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1231 goto fail;
1232
1233 #ifdef CONFIG_F2FS_CHECK_FS
1234 err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1235 if (err) {
1236 dec_valid_node_count(sbi, dn->inode, !ofs);
1237 goto fail;
1238 }
1239 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1240 #endif
1241 new_ni.nid = dn->nid;
1242 new_ni.ino = dn->inode->i_ino;
1243 new_ni.blk_addr = NULL_ADDR;
1244 new_ni.flag = 0;
1245 new_ni.version = 0;
1246 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1247
1248 f2fs_wait_on_page_writeback(page, NODE, true, true);
1249 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1250 set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1251 if (!PageUptodate(page))
1252 SetPageUptodate(page);
1253 if (set_page_dirty(page))
1254 dn->node_changed = true;
1255
1256 if (f2fs_has_xattr_block(ofs))
1257 f2fs_i_xnid_write(dn->inode, dn->nid);
1258
1259 if (ofs == 0)
1260 inc_valid_inode_count(sbi);
1261 return page;
1262
1263 fail:
1264 clear_node_page_dirty(page);
1265 f2fs_put_page(page, 1);
1266 return ERR_PTR(err);
1267 }
1268
1269
1270
1271
1272
1273
1274 static int read_node_page(struct page *page, int op_flags)
1275 {
1276 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1277 struct node_info ni;
1278 struct f2fs_io_info fio = {
1279 .sbi = sbi,
1280 .type = NODE,
1281 .op = REQ_OP_READ,
1282 .op_flags = op_flags,
1283 .page = page,
1284 .encrypted_page = NULL,
1285 };
1286 int err;
1287
1288 if (PageUptodate(page)) {
1289 if (!f2fs_inode_chksum_verify(sbi, page)) {
1290 ClearPageUptodate(page);
1291 return -EFSBADCRC;
1292 }
1293 return LOCKED_PAGE;
1294 }
1295
1296 err = f2fs_get_node_info(sbi, page->index, &ni);
1297 if (err)
1298 return err;
1299
1300 if (unlikely(ni.blk_addr == NULL_ADDR) ||
1301 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1302 ClearPageUptodate(page);
1303 return -ENOENT;
1304 }
1305
1306 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1307 return f2fs_submit_page_bio(&fio);
1308 }
1309
1310
1311
1312
1313 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1314 {
1315 struct page *apage;
1316 int err;
1317
1318 if (!nid)
1319 return;
1320 if (f2fs_check_nid_range(sbi, nid))
1321 return;
1322
1323 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1324 if (apage)
1325 return;
1326
1327 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1328 if (!apage)
1329 return;
1330
1331 err = read_node_page(apage, REQ_RAHEAD);
1332 f2fs_put_page(apage, err ? 1 : 0);
1333 }
1334
1335 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1336 struct page *parent, int start)
1337 {
1338 struct page *page;
1339 int err;
1340
1341 if (!nid)
1342 return ERR_PTR(-ENOENT);
1343 if (f2fs_check_nid_range(sbi, nid))
1344 return ERR_PTR(-EINVAL);
1345 repeat:
1346 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1347 if (!page)
1348 return ERR_PTR(-ENOMEM);
1349
1350 err = read_node_page(page, 0);
1351 if (err < 0) {
1352 f2fs_put_page(page, 1);
1353 return ERR_PTR(err);
1354 } else if (err == LOCKED_PAGE) {
1355 err = 0;
1356 goto page_hit;
1357 }
1358
1359 if (parent)
1360 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1361
1362 lock_page(page);
1363
1364 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1365 f2fs_put_page(page, 1);
1366 goto repeat;
1367 }
1368
1369 if (unlikely(!PageUptodate(page))) {
1370 err = -EIO;
1371 goto out_err;
1372 }
1373
1374 if (!f2fs_inode_chksum_verify(sbi, page)) {
1375 err = -EFSBADCRC;
1376 goto out_err;
1377 }
1378 page_hit:
1379 if(unlikely(nid != nid_of_node(page))) {
1380 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1381 nid, nid_of_node(page), ino_of_node(page),
1382 ofs_of_node(page), cpver_of_node(page),
1383 next_blkaddr_of_node(page));
1384 err = -EINVAL;
1385 out_err:
1386 ClearPageUptodate(page);
1387 f2fs_put_page(page, 1);
1388 return ERR_PTR(err);
1389 }
1390 return page;
1391 }
1392
1393 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1394 {
1395 return __get_node_page(sbi, nid, NULL, 0);
1396 }
1397
1398 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1399 {
1400 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1401 nid_t nid = get_nid(parent, start, false);
1402
1403 return __get_node_page(sbi, nid, parent, start);
1404 }
1405
1406 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1407 {
1408 struct inode *inode;
1409 struct page *page;
1410 int ret;
1411
1412
1413 inode = ilookup(sbi->sb, ino);
1414 if (!inode)
1415 return;
1416
1417 page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1418 FGP_LOCK|FGP_NOWAIT, 0);
1419 if (!page)
1420 goto iput_out;
1421
1422 if (!PageUptodate(page))
1423 goto page_out;
1424
1425 if (!PageDirty(page))
1426 goto page_out;
1427
1428 if (!clear_page_dirty_for_io(page))
1429 goto page_out;
1430
1431 ret = f2fs_write_inline_data(inode, page);
1432 inode_dec_dirty_pages(inode);
1433 f2fs_remove_dirty_inode(inode);
1434 if (ret)
1435 set_page_dirty(page);
1436 page_out:
1437 f2fs_put_page(page, 1);
1438 iput_out:
1439 iput(inode);
1440 }
1441
1442 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1443 {
1444 pgoff_t index;
1445 struct pagevec pvec;
1446 struct page *last_page = NULL;
1447 int nr_pages;
1448
1449 pagevec_init(&pvec);
1450 index = 0;
1451
1452 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1453 PAGECACHE_TAG_DIRTY))) {
1454 int i;
1455
1456 for (i = 0; i < nr_pages; i++) {
1457 struct page *page = pvec.pages[i];
1458
1459 if (unlikely(f2fs_cp_error(sbi))) {
1460 f2fs_put_page(last_page, 0);
1461 pagevec_release(&pvec);
1462 return ERR_PTR(-EIO);
1463 }
1464
1465 if (!IS_DNODE(page) || !is_cold_node(page))
1466 continue;
1467 if (ino_of_node(page) != ino)
1468 continue;
1469
1470 lock_page(page);
1471
1472 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1473 continue_unlock:
1474 unlock_page(page);
1475 continue;
1476 }
1477 if (ino_of_node(page) != ino)
1478 goto continue_unlock;
1479
1480 if (!PageDirty(page)) {
1481
1482 goto continue_unlock;
1483 }
1484
1485 if (last_page)
1486 f2fs_put_page(last_page, 0);
1487
1488 get_page(page);
1489 last_page = page;
1490 unlock_page(page);
1491 }
1492 pagevec_release(&pvec);
1493 cond_resched();
1494 }
1495 return last_page;
1496 }
1497
1498 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1499 struct writeback_control *wbc, bool do_balance,
1500 enum iostat_type io_type, unsigned int *seq_id)
1501 {
1502 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1503 nid_t nid;
1504 struct node_info ni;
1505 struct f2fs_io_info fio = {
1506 .sbi = sbi,
1507 .ino = ino_of_node(page),
1508 .type = NODE,
1509 .op = REQ_OP_WRITE,
1510 .op_flags = wbc_to_write_flags(wbc),
1511 .page = page,
1512 .encrypted_page = NULL,
1513 .submitted = false,
1514 .io_type = io_type,
1515 .io_wbc = wbc,
1516 };
1517 unsigned int seq;
1518
1519 trace_f2fs_writepage(page, NODE);
1520
1521 if (unlikely(f2fs_cp_error(sbi)))
1522 goto redirty_out;
1523
1524 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1525 goto redirty_out;
1526
1527 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1528 wbc->sync_mode == WB_SYNC_NONE &&
1529 IS_DNODE(page) && is_cold_node(page))
1530 goto redirty_out;
1531
1532
1533 nid = nid_of_node(page);
1534 f2fs_bug_on(sbi, page->index != nid);
1535
1536 if (f2fs_get_node_info(sbi, nid, &ni))
1537 goto redirty_out;
1538
1539 if (wbc->for_reclaim) {
1540 if (!down_read_trylock(&sbi->node_write))
1541 goto redirty_out;
1542 } else {
1543 down_read(&sbi->node_write);
1544 }
1545
1546
1547 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1548 ClearPageUptodate(page);
1549 dec_page_count(sbi, F2FS_DIRTY_NODES);
1550 up_read(&sbi->node_write);
1551 unlock_page(page);
1552 return 0;
1553 }
1554
1555 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1556 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1557 DATA_GENERIC_ENHANCE)) {
1558 up_read(&sbi->node_write);
1559 goto redirty_out;
1560 }
1561
1562 if (atomic && !test_opt(sbi, NOBARRIER))
1563 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1564
1565
1566 if (f2fs_in_warm_node_list(sbi, page)) {
1567 seq = f2fs_add_fsync_node_entry(sbi, page);
1568 if (seq_id)
1569 *seq_id = seq;
1570 }
1571
1572 set_page_writeback(page);
1573 ClearPageError(page);
1574
1575 fio.old_blkaddr = ni.blk_addr;
1576 f2fs_do_write_node_page(nid, &fio);
1577 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1578 dec_page_count(sbi, F2FS_DIRTY_NODES);
1579 up_read(&sbi->node_write);
1580
1581 if (wbc->for_reclaim) {
1582 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1583 submitted = NULL;
1584 }
1585
1586 unlock_page(page);
1587
1588 if (unlikely(f2fs_cp_error(sbi))) {
1589 f2fs_submit_merged_write(sbi, NODE);
1590 submitted = NULL;
1591 }
1592 if (submitted)
1593 *submitted = fio.submitted;
1594
1595 if (do_balance)
1596 f2fs_balance_fs(sbi, false);
1597 return 0;
1598
1599 redirty_out:
1600 redirty_page_for_writepage(wbc, page);
1601 return AOP_WRITEPAGE_ACTIVATE;
1602 }
1603
1604 int f2fs_move_node_page(struct page *node_page, int gc_type)
1605 {
1606 int err = 0;
1607
1608 if (gc_type == FG_GC) {
1609 struct writeback_control wbc = {
1610 .sync_mode = WB_SYNC_ALL,
1611 .nr_to_write = 1,
1612 .for_reclaim = 0,
1613 };
1614
1615 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1616
1617 set_page_dirty(node_page);
1618
1619 if (!clear_page_dirty_for_io(node_page)) {
1620 err = -EAGAIN;
1621 goto out_page;
1622 }
1623
1624 if (__write_node_page(node_page, false, NULL,
1625 &wbc, false, FS_GC_NODE_IO, NULL)) {
1626 err = -EAGAIN;
1627 unlock_page(node_page);
1628 }
1629 goto release_page;
1630 } else {
1631
1632 if (!PageWriteback(node_page))
1633 set_page_dirty(node_page);
1634 }
1635 out_page:
1636 unlock_page(node_page);
1637 release_page:
1638 f2fs_put_page(node_page, 0);
1639 return err;
1640 }
1641
1642 static int f2fs_write_node_page(struct page *page,
1643 struct writeback_control *wbc)
1644 {
1645 return __write_node_page(page, false, NULL, wbc, false,
1646 FS_NODE_IO, NULL);
1647 }
1648
1649 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1650 struct writeback_control *wbc, bool atomic,
1651 unsigned int *seq_id)
1652 {
1653 pgoff_t index;
1654 struct pagevec pvec;
1655 int ret = 0;
1656 struct page *last_page = NULL;
1657 bool marked = false;
1658 nid_t ino = inode->i_ino;
1659 int nr_pages;
1660 int nwritten = 0;
1661
1662 if (atomic) {
1663 last_page = last_fsync_dnode(sbi, ino);
1664 if (IS_ERR_OR_NULL(last_page))
1665 return PTR_ERR_OR_ZERO(last_page);
1666 }
1667 retry:
1668 pagevec_init(&pvec);
1669 index = 0;
1670
1671 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1672 PAGECACHE_TAG_DIRTY))) {
1673 int i;
1674
1675 for (i = 0; i < nr_pages; i++) {
1676 struct page *page = pvec.pages[i];
1677 bool submitted = false;
1678
1679 if (unlikely(f2fs_cp_error(sbi))) {
1680 f2fs_put_page(last_page, 0);
1681 pagevec_release(&pvec);
1682 ret = -EIO;
1683 goto out;
1684 }
1685
1686 if (!IS_DNODE(page) || !is_cold_node(page))
1687 continue;
1688 if (ino_of_node(page) != ino)
1689 continue;
1690
1691 lock_page(page);
1692
1693 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1694 continue_unlock:
1695 unlock_page(page);
1696 continue;
1697 }
1698 if (ino_of_node(page) != ino)
1699 goto continue_unlock;
1700
1701 if (!PageDirty(page) && page != last_page) {
1702
1703 goto continue_unlock;
1704 }
1705
1706 f2fs_wait_on_page_writeback(page, NODE, true, true);
1707
1708 set_fsync_mark(page, 0);
1709 set_dentry_mark(page, 0);
1710
1711 if (!atomic || page == last_page) {
1712 set_fsync_mark(page, 1);
1713 if (IS_INODE(page)) {
1714 if (is_inode_flag_set(inode,
1715 FI_DIRTY_INODE))
1716 f2fs_update_inode(inode, page);
1717 set_dentry_mark(page,
1718 f2fs_need_dentry_mark(sbi, ino));
1719 }
1720
1721 if (!PageDirty(page))
1722 set_page_dirty(page);
1723 }
1724
1725 if (!clear_page_dirty_for_io(page))
1726 goto continue_unlock;
1727
1728 ret = __write_node_page(page, atomic &&
1729 page == last_page,
1730 &submitted, wbc, true,
1731 FS_NODE_IO, seq_id);
1732 if (ret) {
1733 unlock_page(page);
1734 f2fs_put_page(last_page, 0);
1735 break;
1736 } else if (submitted) {
1737 nwritten++;
1738 }
1739
1740 if (page == last_page) {
1741 f2fs_put_page(page, 0);
1742 marked = true;
1743 break;
1744 }
1745 }
1746 pagevec_release(&pvec);
1747 cond_resched();
1748
1749 if (ret || marked)
1750 break;
1751 }
1752 if (!ret && atomic && !marked) {
1753 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1754 ino, last_page->index);
1755 lock_page(last_page);
1756 f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1757 set_page_dirty(last_page);
1758 unlock_page(last_page);
1759 goto retry;
1760 }
1761 out:
1762 if (nwritten)
1763 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1764 return ret ? -EIO: 0;
1765 }
1766
1767 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1768 {
1769 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1770 bool clean;
1771
1772 if (inode->i_ino != ino)
1773 return 0;
1774
1775 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1776 return 0;
1777
1778 spin_lock(&sbi->inode_lock[DIRTY_META]);
1779 clean = list_empty(&F2FS_I(inode)->gdirty_list);
1780 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1781
1782 if (clean)
1783 return 0;
1784
1785 inode = igrab(inode);
1786 if (!inode)
1787 return 0;
1788 return 1;
1789 }
1790
1791 static bool flush_dirty_inode(struct page *page)
1792 {
1793 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1794 struct inode *inode;
1795 nid_t ino = ino_of_node(page);
1796
1797 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1798 if (!inode)
1799 return false;
1800
1801 f2fs_update_inode(inode, page);
1802 unlock_page(page);
1803
1804 iput(inode);
1805 return true;
1806 }
1807
1808 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1809 struct writeback_control *wbc,
1810 bool do_balance, enum iostat_type io_type)
1811 {
1812 pgoff_t index;
1813 struct pagevec pvec;
1814 int step = 0;
1815 int nwritten = 0;
1816 int ret = 0;
1817 int nr_pages, done = 0;
1818
1819 pagevec_init(&pvec);
1820
1821 next_step:
1822 index = 0;
1823
1824 while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1825 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1826 int i;
1827
1828 for (i = 0; i < nr_pages; i++) {
1829 struct page *page = pvec.pages[i];
1830 bool submitted = false;
1831 bool may_dirty = true;
1832
1833
1834 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1835 wbc->sync_mode == WB_SYNC_NONE) {
1836 done = 1;
1837 break;
1838 }
1839
1840
1841
1842
1843
1844
1845
1846 if (step == 0 && IS_DNODE(page))
1847 continue;
1848 if (step == 1 && (!IS_DNODE(page) ||
1849 is_cold_node(page)))
1850 continue;
1851 if (step == 2 && (!IS_DNODE(page) ||
1852 !is_cold_node(page)))
1853 continue;
1854 lock_node:
1855 if (wbc->sync_mode == WB_SYNC_ALL)
1856 lock_page(page);
1857 else if (!trylock_page(page))
1858 continue;
1859
1860 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1861 continue_unlock:
1862 unlock_page(page);
1863 continue;
1864 }
1865
1866 if (!PageDirty(page)) {
1867
1868 goto continue_unlock;
1869 }
1870
1871
1872 if (is_inline_node(page)) {
1873 clear_inline_node(page);
1874 unlock_page(page);
1875 flush_inline_data(sbi, ino_of_node(page));
1876 goto lock_node;
1877 }
1878
1879
1880 if (IS_INODE(page) && may_dirty) {
1881 may_dirty = false;
1882 if (flush_dirty_inode(page))
1883 goto lock_node;
1884 }
1885
1886 f2fs_wait_on_page_writeback(page, NODE, true, true);
1887
1888 if (!clear_page_dirty_for_io(page))
1889 goto continue_unlock;
1890
1891 set_fsync_mark(page, 0);
1892 set_dentry_mark(page, 0);
1893
1894 ret = __write_node_page(page, false, &submitted,
1895 wbc, do_balance, io_type, NULL);
1896 if (ret)
1897 unlock_page(page);
1898 else if (submitted)
1899 nwritten++;
1900
1901 if (--wbc->nr_to_write == 0)
1902 break;
1903 }
1904 pagevec_release(&pvec);
1905 cond_resched();
1906
1907 if (wbc->nr_to_write == 0) {
1908 step = 2;
1909 break;
1910 }
1911 }
1912
1913 if (step < 2) {
1914 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1915 wbc->sync_mode == WB_SYNC_NONE && step == 1)
1916 goto out;
1917 step++;
1918 goto next_step;
1919 }
1920 out:
1921 if (nwritten)
1922 f2fs_submit_merged_write(sbi, NODE);
1923
1924 if (unlikely(f2fs_cp_error(sbi)))
1925 return -EIO;
1926 return ret;
1927 }
1928
1929 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
1930 unsigned int seq_id)
1931 {
1932 struct fsync_node_entry *fn;
1933 struct page *page;
1934 struct list_head *head = &sbi->fsync_node_list;
1935 unsigned long flags;
1936 unsigned int cur_seq_id = 0;
1937 int ret2, ret = 0;
1938
1939 while (seq_id && cur_seq_id < seq_id) {
1940 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
1941 if (list_empty(head)) {
1942 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1943 break;
1944 }
1945 fn = list_first_entry(head, struct fsync_node_entry, list);
1946 if (fn->seq_id > seq_id) {
1947 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1948 break;
1949 }
1950 cur_seq_id = fn->seq_id;
1951 page = fn->page;
1952 get_page(page);
1953 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1954
1955 f2fs_wait_on_page_writeback(page, NODE, true, false);
1956 if (TestClearPageError(page))
1957 ret = -EIO;
1958
1959 put_page(page);
1960
1961 if (ret)
1962 break;
1963 }
1964
1965 ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1966 if (!ret)
1967 ret = ret2;
1968
1969 return ret;
1970 }
1971
1972 static int f2fs_write_node_pages(struct address_space *mapping,
1973 struct writeback_control *wbc)
1974 {
1975 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1976 struct blk_plug plug;
1977 long diff;
1978
1979 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1980 goto skip_write;
1981
1982
1983 f2fs_balance_fs_bg(sbi);
1984
1985
1986 if (wbc->sync_mode != WB_SYNC_ALL &&
1987 get_pages(sbi, F2FS_DIRTY_NODES) <
1988 nr_pages_to_skip(sbi, NODE))
1989 goto skip_write;
1990
1991 if (wbc->sync_mode == WB_SYNC_ALL)
1992 atomic_inc(&sbi->wb_sync_req[NODE]);
1993 else if (atomic_read(&sbi->wb_sync_req[NODE]))
1994 goto skip_write;
1995
1996 trace_f2fs_writepages(mapping->host, wbc, NODE);
1997
1998 diff = nr_pages_to_write(sbi, NODE, wbc);
1999 blk_start_plug(&plug);
2000 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2001 blk_finish_plug(&plug);
2002 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2003
2004 if (wbc->sync_mode == WB_SYNC_ALL)
2005 atomic_dec(&sbi->wb_sync_req[NODE]);
2006 return 0;
2007
2008 skip_write:
2009 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2010 trace_f2fs_writepages(mapping->host, wbc, NODE);
2011 return 0;
2012 }
2013
2014 static int f2fs_set_node_page_dirty(struct page *page)
2015 {
2016 trace_f2fs_set_page_dirty(page, NODE);
2017
2018 if (!PageUptodate(page))
2019 SetPageUptodate(page);
2020 #ifdef CONFIG_F2FS_CHECK_FS
2021 if (IS_INODE(page))
2022 f2fs_inode_chksum_set(F2FS_P_SB(page), page);
2023 #endif
2024 if (!PageDirty(page)) {
2025 __set_page_dirty_nobuffers(page);
2026 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2027 f2fs_set_page_private(page, 0);
2028 f2fs_trace_pid(page);
2029 return 1;
2030 }
2031 return 0;
2032 }
2033
2034
2035
2036
2037 const struct address_space_operations f2fs_node_aops = {
2038 .writepage = f2fs_write_node_page,
2039 .writepages = f2fs_write_node_pages,
2040 .set_page_dirty = f2fs_set_node_page_dirty,
2041 .invalidatepage = f2fs_invalidate_page,
2042 .releasepage = f2fs_release_page,
2043 #ifdef CONFIG_MIGRATION
2044 .migratepage = f2fs_migrate_page,
2045 #endif
2046 };
2047
2048 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2049 nid_t n)
2050 {
2051 return radix_tree_lookup(&nm_i->free_nid_root, n);
2052 }
2053
2054 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2055 struct free_nid *i, enum nid_state state)
2056 {
2057 struct f2fs_nm_info *nm_i = NM_I(sbi);
2058
2059 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2060 if (err)
2061 return err;
2062
2063 f2fs_bug_on(sbi, state != i->state);
2064 nm_i->nid_cnt[state]++;
2065 if (state == FREE_NID)
2066 list_add_tail(&i->list, &nm_i->free_nid_list);
2067 return 0;
2068 }
2069
2070 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2071 struct free_nid *i, enum nid_state state)
2072 {
2073 struct f2fs_nm_info *nm_i = NM_I(sbi);
2074
2075 f2fs_bug_on(sbi, state != i->state);
2076 nm_i->nid_cnt[state]--;
2077 if (state == FREE_NID)
2078 list_del(&i->list);
2079 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2080 }
2081
2082 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2083 enum nid_state org_state, enum nid_state dst_state)
2084 {
2085 struct f2fs_nm_info *nm_i = NM_I(sbi);
2086
2087 f2fs_bug_on(sbi, org_state != i->state);
2088 i->state = dst_state;
2089 nm_i->nid_cnt[org_state]--;
2090 nm_i->nid_cnt[dst_state]++;
2091
2092 switch (dst_state) {
2093 case PREALLOC_NID:
2094 list_del(&i->list);
2095 break;
2096 case FREE_NID:
2097 list_add_tail(&i->list, &nm_i->free_nid_list);
2098 break;
2099 default:
2100 BUG_ON(1);
2101 }
2102 }
2103
2104 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2105 bool set, bool build)
2106 {
2107 struct f2fs_nm_info *nm_i = NM_I(sbi);
2108 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2109 unsigned int nid_ofs = nid - START_NID(nid);
2110
2111 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2112 return;
2113
2114 if (set) {
2115 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2116 return;
2117 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2118 nm_i->free_nid_count[nat_ofs]++;
2119 } else {
2120 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2121 return;
2122 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2123 if (!build)
2124 nm_i->free_nid_count[nat_ofs]--;
2125 }
2126 }
2127
2128
2129 static bool add_free_nid(struct f2fs_sb_info *sbi,
2130 nid_t nid, bool build, bool update)
2131 {
2132 struct f2fs_nm_info *nm_i = NM_I(sbi);
2133 struct free_nid *i, *e;
2134 struct nat_entry *ne;
2135 int err = -EINVAL;
2136 bool ret = false;
2137
2138
2139 if (unlikely(nid == 0))
2140 return false;
2141
2142 if (unlikely(f2fs_check_nid_range(sbi, nid)))
2143 return false;
2144
2145 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2146 i->nid = nid;
2147 i->state = FREE_NID;
2148
2149 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2150
2151 spin_lock(&nm_i->nid_list_lock);
2152
2153 if (build) {
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 ne = __lookup_nat_cache(nm_i, nid);
2176 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2177 nat_get_blkaddr(ne) != NULL_ADDR))
2178 goto err_out;
2179
2180 e = __lookup_free_nid_list(nm_i, nid);
2181 if (e) {
2182 if (e->state == FREE_NID)
2183 ret = true;
2184 goto err_out;
2185 }
2186 }
2187 ret = true;
2188 err = __insert_free_nid(sbi, i, FREE_NID);
2189 err_out:
2190 if (update) {
2191 update_free_nid_bitmap(sbi, nid, ret, build);
2192 if (!build)
2193 nm_i->available_nids++;
2194 }
2195 spin_unlock(&nm_i->nid_list_lock);
2196 radix_tree_preload_end();
2197
2198 if (err)
2199 kmem_cache_free(free_nid_slab, i);
2200 return ret;
2201 }
2202
2203 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2204 {
2205 struct f2fs_nm_info *nm_i = NM_I(sbi);
2206 struct free_nid *i;
2207 bool need_free = false;
2208
2209 spin_lock(&nm_i->nid_list_lock);
2210 i = __lookup_free_nid_list(nm_i, nid);
2211 if (i && i->state == FREE_NID) {
2212 __remove_free_nid(sbi, i, FREE_NID);
2213 need_free = true;
2214 }
2215 spin_unlock(&nm_i->nid_list_lock);
2216
2217 if (need_free)
2218 kmem_cache_free(free_nid_slab, i);
2219 }
2220
2221 static int scan_nat_page(struct f2fs_sb_info *sbi,
2222 struct page *nat_page, nid_t start_nid)
2223 {
2224 struct f2fs_nm_info *nm_i = NM_I(sbi);
2225 struct f2fs_nat_block *nat_blk = page_address(nat_page);
2226 block_t blk_addr;
2227 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2228 int i;
2229
2230 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2231
2232 i = start_nid % NAT_ENTRY_PER_BLOCK;
2233
2234 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2235 if (unlikely(start_nid >= nm_i->max_nid))
2236 break;
2237
2238 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2239
2240 if (blk_addr == NEW_ADDR)
2241 return -EINVAL;
2242
2243 if (blk_addr == NULL_ADDR) {
2244 add_free_nid(sbi, start_nid, true, true);
2245 } else {
2246 spin_lock(&NM_I(sbi)->nid_list_lock);
2247 update_free_nid_bitmap(sbi, start_nid, false, true);
2248 spin_unlock(&NM_I(sbi)->nid_list_lock);
2249 }
2250 }
2251
2252 return 0;
2253 }
2254
2255 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2256 {
2257 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2258 struct f2fs_journal *journal = curseg->journal;
2259 int i;
2260
2261 down_read(&curseg->journal_rwsem);
2262 for (i = 0; i < nats_in_cursum(journal); i++) {
2263 block_t addr;
2264 nid_t nid;
2265
2266 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2267 nid = le32_to_cpu(nid_in_journal(journal, i));
2268 if (addr == NULL_ADDR)
2269 add_free_nid(sbi, nid, true, false);
2270 else
2271 remove_free_nid(sbi, nid);
2272 }
2273 up_read(&curseg->journal_rwsem);
2274 }
2275
2276 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2277 {
2278 struct f2fs_nm_info *nm_i = NM_I(sbi);
2279 unsigned int i, idx;
2280 nid_t nid;
2281
2282 down_read(&nm_i->nat_tree_lock);
2283
2284 for (i = 0; i < nm_i->nat_blocks; i++) {
2285 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2286 continue;
2287 if (!nm_i->free_nid_count[i])
2288 continue;
2289 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2290 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2291 NAT_ENTRY_PER_BLOCK, idx);
2292 if (idx >= NAT_ENTRY_PER_BLOCK)
2293 break;
2294
2295 nid = i * NAT_ENTRY_PER_BLOCK + idx;
2296 add_free_nid(sbi, nid, true, false);
2297
2298 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2299 goto out;
2300 }
2301 }
2302 out:
2303 scan_curseg_cache(sbi);
2304
2305 up_read(&nm_i->nat_tree_lock);
2306 }
2307
2308 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2309 bool sync, bool mount)
2310 {
2311 struct f2fs_nm_info *nm_i = NM_I(sbi);
2312 int i = 0, ret;
2313 nid_t nid = nm_i->next_scan_nid;
2314
2315 if (unlikely(nid >= nm_i->max_nid))
2316 nid = 0;
2317
2318
2319 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2320 return 0;
2321
2322 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2323 return 0;
2324
2325 if (!mount) {
2326
2327 scan_free_nid_bits(sbi);
2328
2329 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2330 return 0;
2331 }
2332
2333
2334 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2335 META_NAT, true);
2336
2337 down_read(&nm_i->nat_tree_lock);
2338
2339 while (1) {
2340 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2341 nm_i->nat_block_bitmap)) {
2342 struct page *page = get_current_nat_page(sbi, nid);
2343
2344 if (IS_ERR(page)) {
2345 ret = PTR_ERR(page);
2346 } else {
2347 ret = scan_nat_page(sbi, page, nid);
2348 f2fs_put_page(page, 1);
2349 }
2350
2351 if (ret) {
2352 up_read(&nm_i->nat_tree_lock);
2353 f2fs_bug_on(sbi, !mount);
2354 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2355 return ret;
2356 }
2357 }
2358
2359 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2360 if (unlikely(nid >= nm_i->max_nid))
2361 nid = 0;
2362
2363 if (++i >= FREE_NID_PAGES)
2364 break;
2365 }
2366
2367
2368 nm_i->next_scan_nid = nid;
2369
2370
2371 scan_curseg_cache(sbi);
2372
2373 up_read(&nm_i->nat_tree_lock);
2374
2375 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2376 nm_i->ra_nid_pages, META_NAT, false);
2377
2378 return 0;
2379 }
2380
2381 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2382 {
2383 int ret;
2384
2385 mutex_lock(&NM_I(sbi)->build_lock);
2386 ret = __f2fs_build_free_nids(sbi, sync, mount);
2387 mutex_unlock(&NM_I(sbi)->build_lock);
2388
2389 return ret;
2390 }
2391
2392
2393
2394
2395
2396
2397 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2398 {
2399 struct f2fs_nm_info *nm_i = NM_I(sbi);
2400 struct free_nid *i = NULL;
2401 retry:
2402 if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2403 f2fs_show_injection_info(FAULT_ALLOC_NID);
2404 return false;
2405 }
2406
2407 spin_lock(&nm_i->nid_list_lock);
2408
2409 if (unlikely(nm_i->available_nids == 0)) {
2410 spin_unlock(&nm_i->nid_list_lock);
2411 return false;
2412 }
2413
2414
2415 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2416 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2417 i = list_first_entry(&nm_i->free_nid_list,
2418 struct free_nid, list);
2419 *nid = i->nid;
2420
2421 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2422 nm_i->available_nids--;
2423
2424 update_free_nid_bitmap(sbi, *nid, false, false);
2425
2426 spin_unlock(&nm_i->nid_list_lock);
2427 return true;
2428 }
2429 spin_unlock(&nm_i->nid_list_lock);
2430
2431
2432 if (!f2fs_build_free_nids(sbi, true, false))
2433 goto retry;
2434 return false;
2435 }
2436
2437
2438
2439
2440 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2441 {
2442 struct f2fs_nm_info *nm_i = NM_I(sbi);
2443 struct free_nid *i;
2444
2445 spin_lock(&nm_i->nid_list_lock);
2446 i = __lookup_free_nid_list(nm_i, nid);
2447 f2fs_bug_on(sbi, !i);
2448 __remove_free_nid(sbi, i, PREALLOC_NID);
2449 spin_unlock(&nm_i->nid_list_lock);
2450
2451 kmem_cache_free(free_nid_slab, i);
2452 }
2453
2454
2455
2456
2457 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2458 {
2459 struct f2fs_nm_info *nm_i = NM_I(sbi);
2460 struct free_nid *i;
2461 bool need_free = false;
2462
2463 if (!nid)
2464 return;
2465
2466 spin_lock(&nm_i->nid_list_lock);
2467 i = __lookup_free_nid_list(nm_i, nid);
2468 f2fs_bug_on(sbi, !i);
2469
2470 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2471 __remove_free_nid(sbi, i, PREALLOC_NID);
2472 need_free = true;
2473 } else {
2474 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2475 }
2476
2477 nm_i->available_nids++;
2478
2479 update_free_nid_bitmap(sbi, nid, true, false);
2480
2481 spin_unlock(&nm_i->nid_list_lock);
2482
2483 if (need_free)
2484 kmem_cache_free(free_nid_slab, i);
2485 }
2486
2487 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2488 {
2489 struct f2fs_nm_info *nm_i = NM_I(sbi);
2490 struct free_nid *i, *next;
2491 int nr = nr_shrink;
2492
2493 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2494 return 0;
2495
2496 if (!mutex_trylock(&nm_i->build_lock))
2497 return 0;
2498
2499 spin_lock(&nm_i->nid_list_lock);
2500 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2501 if (nr_shrink <= 0 ||
2502 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2503 break;
2504
2505 __remove_free_nid(sbi, i, FREE_NID);
2506 kmem_cache_free(free_nid_slab, i);
2507 nr_shrink--;
2508 }
2509 spin_unlock(&nm_i->nid_list_lock);
2510 mutex_unlock(&nm_i->build_lock);
2511
2512 return nr - nr_shrink;
2513 }
2514
2515 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2516 {
2517 void *src_addr, *dst_addr;
2518 size_t inline_size;
2519 struct page *ipage;
2520 struct f2fs_inode *ri;
2521
2522 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2523 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2524
2525 ri = F2FS_INODE(page);
2526 if (ri->i_inline & F2FS_INLINE_XATTR) {
2527 set_inode_flag(inode, FI_INLINE_XATTR);
2528 } else {
2529 clear_inode_flag(inode, FI_INLINE_XATTR);
2530 goto update_inode;
2531 }
2532
2533 dst_addr = inline_xattr_addr(inode, ipage);
2534 src_addr = inline_xattr_addr(inode, page);
2535 inline_size = inline_xattr_size(inode);
2536
2537 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2538 memcpy(dst_addr, src_addr, inline_size);
2539 update_inode:
2540 f2fs_update_inode(inode, ipage);
2541 f2fs_put_page(ipage, 1);
2542 }
2543
2544 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2545 {
2546 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2547 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2548 nid_t new_xnid;
2549 struct dnode_of_data dn;
2550 struct node_info ni;
2551 struct page *xpage;
2552 int err;
2553
2554 if (!prev_xnid)
2555 goto recover_xnid;
2556
2557
2558 err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2559 if (err)
2560 return err;
2561
2562 f2fs_invalidate_blocks(sbi, ni.blk_addr);
2563 dec_valid_node_count(sbi, inode, false);
2564 set_node_addr(sbi, &ni, NULL_ADDR, false);
2565
2566 recover_xnid:
2567
2568 if (!f2fs_alloc_nid(sbi, &new_xnid))
2569 return -ENOSPC;
2570
2571 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2572 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2573 if (IS_ERR(xpage)) {
2574 f2fs_alloc_nid_failed(sbi, new_xnid);
2575 return PTR_ERR(xpage);
2576 }
2577
2578 f2fs_alloc_nid_done(sbi, new_xnid);
2579 f2fs_update_inode_page(inode);
2580
2581
2582 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2583
2584 set_page_dirty(xpage);
2585 f2fs_put_page(xpage, 1);
2586
2587 return 0;
2588 }
2589
2590 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2591 {
2592 struct f2fs_inode *src, *dst;
2593 nid_t ino = ino_of_node(page);
2594 struct node_info old_ni, new_ni;
2595 struct page *ipage;
2596 int err;
2597
2598 err = f2fs_get_node_info(sbi, ino, &old_ni);
2599 if (err)
2600 return err;
2601
2602 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2603 return -EINVAL;
2604 retry:
2605 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2606 if (!ipage) {
2607 congestion_wait(BLK_RW_ASYNC, HZ/50);
2608 goto retry;
2609 }
2610
2611
2612 remove_free_nid(sbi, ino);
2613
2614 if (!PageUptodate(ipage))
2615 SetPageUptodate(ipage);
2616 fill_node_footer(ipage, ino, ino, 0, true);
2617 set_cold_node(ipage, false);
2618
2619 src = F2FS_INODE(page);
2620 dst = F2FS_INODE(ipage);
2621
2622 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2623 dst->i_size = 0;
2624 dst->i_blocks = cpu_to_le64(1);
2625 dst->i_links = cpu_to_le32(1);
2626 dst->i_xattr_nid = 0;
2627 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2628 if (dst->i_inline & F2FS_EXTRA_ATTR) {
2629 dst->i_extra_isize = src->i_extra_isize;
2630
2631 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2632 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2633 i_inline_xattr_size))
2634 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2635
2636 if (f2fs_sb_has_project_quota(sbi) &&
2637 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2638 i_projid))
2639 dst->i_projid = src->i_projid;
2640
2641 if (f2fs_sb_has_inode_crtime(sbi) &&
2642 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2643 i_crtime_nsec)) {
2644 dst->i_crtime = src->i_crtime;
2645 dst->i_crtime_nsec = src->i_crtime_nsec;
2646 }
2647 }
2648
2649 new_ni = old_ni;
2650 new_ni.ino = ino;
2651
2652 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2653 WARN_ON(1);
2654 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2655 inc_valid_inode_count(sbi);
2656 set_page_dirty(ipage);
2657 f2fs_put_page(ipage, 1);
2658 return 0;
2659 }
2660
2661 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2662 unsigned int segno, struct f2fs_summary_block *sum)
2663 {
2664 struct f2fs_node *rn;
2665 struct f2fs_summary *sum_entry;
2666 block_t addr;
2667 int i, idx, last_offset, nrpages;
2668
2669
2670 last_offset = sbi->blocks_per_seg;
2671 addr = START_BLOCK(sbi, segno);
2672 sum_entry = &sum->entries[0];
2673
2674 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2675 nrpages = min(last_offset - i, BIO_MAX_PAGES);
2676
2677
2678 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2679
2680 for (idx = addr; idx < addr + nrpages; idx++) {
2681 struct page *page = f2fs_get_tmp_page(sbi, idx);
2682
2683 if (IS_ERR(page))
2684 return PTR_ERR(page);
2685
2686 rn = F2FS_NODE(page);
2687 sum_entry->nid = rn->footer.nid;
2688 sum_entry->version = 0;
2689 sum_entry->ofs_in_node = 0;
2690 sum_entry++;
2691 f2fs_put_page(page, 1);
2692 }
2693
2694 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2695 addr + nrpages);
2696 }
2697 return 0;
2698 }
2699
2700 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2701 {
2702 struct f2fs_nm_info *nm_i = NM_I(sbi);
2703 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2704 struct f2fs_journal *journal = curseg->journal;
2705 int i;
2706
2707 down_write(&curseg->journal_rwsem);
2708 for (i = 0; i < nats_in_cursum(journal); i++) {
2709 struct nat_entry *ne;
2710 struct f2fs_nat_entry raw_ne;
2711 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2712
2713 raw_ne = nat_in_journal(journal, i);
2714
2715 ne = __lookup_nat_cache(nm_i, nid);
2716 if (!ne) {
2717 ne = __alloc_nat_entry(nid, true);
2718 __init_nat_entry(nm_i, ne, &raw_ne, true);
2719 }
2720
2721
2722
2723
2724
2725
2726 if (!get_nat_flag(ne, IS_DIRTY) &&
2727 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2728 spin_lock(&nm_i->nid_list_lock);
2729 nm_i->available_nids--;
2730 spin_unlock(&nm_i->nid_list_lock);
2731 }
2732
2733 __set_nat_cache_dirty(nm_i, ne);
2734 }
2735 update_nats_in_cursum(journal, -i);
2736 up_write(&curseg->journal_rwsem);
2737 }
2738
2739 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2740 struct list_head *head, int max)
2741 {
2742 struct nat_entry_set *cur;
2743
2744 if (nes->entry_cnt >= max)
2745 goto add_out;
2746
2747 list_for_each_entry(cur, head, set_list) {
2748 if (cur->entry_cnt >= nes->entry_cnt) {
2749 list_add(&nes->set_list, cur->set_list.prev);
2750 return;
2751 }
2752 }
2753 add_out:
2754 list_add_tail(&nes->set_list, head);
2755 }
2756
2757 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2758 struct page *page)
2759 {
2760 struct f2fs_nm_info *nm_i = NM_I(sbi);
2761 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2762 struct f2fs_nat_block *nat_blk = page_address(page);
2763 int valid = 0;
2764 int i = 0;
2765
2766 if (!enabled_nat_bits(sbi, NULL))
2767 return;
2768
2769 if (nat_index == 0) {
2770 valid = 1;
2771 i = 1;
2772 }
2773 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2774 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2775 valid++;
2776 }
2777 if (valid == 0) {
2778 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2779 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2780 return;
2781 }
2782
2783 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2784 if (valid == NAT_ENTRY_PER_BLOCK)
2785 __set_bit_le(nat_index, nm_i->full_nat_bits);
2786 else
2787 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2788 }
2789
2790 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2791 struct nat_entry_set *set, struct cp_control *cpc)
2792 {
2793 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2794 struct f2fs_journal *journal = curseg->journal;
2795 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2796 bool to_journal = true;
2797 struct f2fs_nat_block *nat_blk;
2798 struct nat_entry *ne, *cur;
2799 struct page *page = NULL;
2800
2801
2802
2803
2804
2805
2806 if (enabled_nat_bits(sbi, cpc) ||
2807 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2808 to_journal = false;
2809
2810 if (to_journal) {
2811 down_write(&curseg->journal_rwsem);
2812 } else {
2813 page = get_next_nat_page(sbi, start_nid);
2814 if (IS_ERR(page))
2815 return PTR_ERR(page);
2816
2817 nat_blk = page_address(page);
2818 f2fs_bug_on(sbi, !nat_blk);
2819 }
2820
2821
2822 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2823 struct f2fs_nat_entry *raw_ne;
2824 nid_t nid = nat_get_nid(ne);
2825 int offset;
2826
2827 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2828
2829 if (to_journal) {
2830 offset = f2fs_lookup_journal_in_cursum(journal,
2831 NAT_JOURNAL, nid, 1);
2832 f2fs_bug_on(sbi, offset < 0);
2833 raw_ne = &nat_in_journal(journal, offset);
2834 nid_in_journal(journal, offset) = cpu_to_le32(nid);
2835 } else {
2836 raw_ne = &nat_blk->entries[nid - start_nid];
2837 }
2838 raw_nat_from_node_info(raw_ne, &ne->ni);
2839 nat_reset_flag(ne);
2840 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
2841 if (nat_get_blkaddr(ne) == NULL_ADDR) {
2842 add_free_nid(sbi, nid, false, true);
2843 } else {
2844 spin_lock(&NM_I(sbi)->nid_list_lock);
2845 update_free_nid_bitmap(sbi, nid, false, false);
2846 spin_unlock(&NM_I(sbi)->nid_list_lock);
2847 }
2848 }
2849
2850 if (to_journal) {
2851 up_write(&curseg->journal_rwsem);
2852 } else {
2853 __update_nat_bits(sbi, start_nid, page);
2854 f2fs_put_page(page, 1);
2855 }
2856
2857
2858 if (!set->entry_cnt) {
2859 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2860 kmem_cache_free(nat_entry_set_slab, set);
2861 }
2862 return 0;
2863 }
2864
2865
2866
2867
2868 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2869 {
2870 struct f2fs_nm_info *nm_i = NM_I(sbi);
2871 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2872 struct f2fs_journal *journal = curseg->journal;
2873 struct nat_entry_set *setvec[SETVEC_SIZE];
2874 struct nat_entry_set *set, *tmp;
2875 unsigned int found;
2876 nid_t set_idx = 0;
2877 LIST_HEAD(sets);
2878 int err = 0;
2879
2880
2881 if (enabled_nat_bits(sbi, cpc)) {
2882 down_write(&nm_i->nat_tree_lock);
2883 remove_nats_in_journal(sbi);
2884 up_write(&nm_i->nat_tree_lock);
2885 }
2886
2887 if (!nm_i->dirty_nat_cnt)
2888 return 0;
2889
2890 down_write(&nm_i->nat_tree_lock);
2891
2892
2893
2894
2895
2896
2897 if (enabled_nat_bits(sbi, cpc) ||
2898 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2899 remove_nats_in_journal(sbi);
2900
2901 while ((found = __gang_lookup_nat_set(nm_i,
2902 set_idx, SETVEC_SIZE, setvec))) {
2903 unsigned idx;
2904 set_idx = setvec[found - 1]->set + 1;
2905 for (idx = 0; idx < found; idx++)
2906 __adjust_nat_entry_set(setvec[idx], &sets,
2907 MAX_NAT_JENTRIES(journal));
2908 }
2909
2910
2911 list_for_each_entry_safe(set, tmp, &sets, set_list) {
2912 err = __flush_nat_entry_set(sbi, set, cpc);
2913 if (err)
2914 break;
2915 }
2916
2917 up_write(&nm_i->nat_tree_lock);
2918
2919
2920 return err;
2921 }
2922
2923 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2924 {
2925 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2926 struct f2fs_nm_info *nm_i = NM_I(sbi);
2927 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2928 unsigned int i;
2929 __u64 cp_ver = cur_cp_version(ckpt);
2930 block_t nat_bits_addr;
2931
2932 if (!enabled_nat_bits(sbi, NULL))
2933 return 0;
2934
2935 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2936 nm_i->nat_bits = f2fs_kzalloc(sbi,
2937 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
2938 if (!nm_i->nat_bits)
2939 return -ENOMEM;
2940
2941 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2942 nm_i->nat_bits_blocks;
2943 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
2944 struct page *page;
2945
2946 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
2947 if (IS_ERR(page))
2948 return PTR_ERR(page);
2949
2950 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2951 page_address(page), F2FS_BLKSIZE);
2952 f2fs_put_page(page, 1);
2953 }
2954
2955 cp_ver |= (cur_cp_crc(ckpt) << 32);
2956 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2957 disable_nat_bits(sbi, true);
2958 return 0;
2959 }
2960
2961 nm_i->full_nat_bits = nm_i->nat_bits + 8;
2962 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2963
2964 f2fs_notice(sbi, "Found nat_bits in checkpoint");
2965 return 0;
2966 }
2967
2968 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2969 {
2970 struct f2fs_nm_info *nm_i = NM_I(sbi);
2971 unsigned int i = 0;
2972 nid_t nid, last_nid;
2973
2974 if (!enabled_nat_bits(sbi, NULL))
2975 return;
2976
2977 for (i = 0; i < nm_i->nat_blocks; i++) {
2978 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2979 if (i >= nm_i->nat_blocks)
2980 break;
2981
2982 __set_bit_le(i, nm_i->nat_block_bitmap);
2983
2984 nid = i * NAT_ENTRY_PER_BLOCK;
2985 last_nid = nid + NAT_ENTRY_PER_BLOCK;
2986
2987 spin_lock(&NM_I(sbi)->nid_list_lock);
2988 for (; nid < last_nid; nid++)
2989 update_free_nid_bitmap(sbi, nid, true, true);
2990 spin_unlock(&NM_I(sbi)->nid_list_lock);
2991 }
2992
2993 for (i = 0; i < nm_i->nat_blocks; i++) {
2994 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2995 if (i >= nm_i->nat_blocks)
2996 break;
2997
2998 __set_bit_le(i, nm_i->nat_block_bitmap);
2999 }
3000 }
3001
3002 static int init_node_manager(struct f2fs_sb_info *sbi)
3003 {
3004 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3005 struct f2fs_nm_info *nm_i = NM_I(sbi);
3006 unsigned char *version_bitmap;
3007 unsigned int nat_segs;
3008 int err;
3009
3010 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3011
3012
3013 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3014 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3015 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3016
3017
3018 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3019 F2FS_RESERVED_NODE_NUM;
3020 nm_i->nid_cnt[FREE_NID] = 0;
3021 nm_i->nid_cnt[PREALLOC_NID] = 0;
3022 nm_i->nat_cnt = 0;
3023 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3024 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3025 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3026
3027 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3028 INIT_LIST_HEAD(&nm_i->free_nid_list);
3029 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3030 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3031 INIT_LIST_HEAD(&nm_i->nat_entries);
3032 spin_lock_init(&nm_i->nat_list_lock);
3033
3034 mutex_init(&nm_i->build_lock);
3035 spin_lock_init(&nm_i->nid_list_lock);
3036 init_rwsem(&nm_i->nat_tree_lock);
3037
3038 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3039 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3040 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3041 if (!version_bitmap)
3042 return -EFAULT;
3043
3044 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3045 GFP_KERNEL);
3046 if (!nm_i->nat_bitmap)
3047 return -ENOMEM;
3048
3049 err = __get_nat_bitmaps(sbi);
3050 if (err)
3051 return err;
3052
3053 #ifdef CONFIG_F2FS_CHECK_FS
3054 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3055 GFP_KERNEL);
3056 if (!nm_i->nat_bitmap_mir)
3057 return -ENOMEM;
3058 #endif
3059
3060 return 0;
3061 }
3062
3063 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3064 {
3065 struct f2fs_nm_info *nm_i = NM_I(sbi);
3066 int i;
3067
3068 nm_i->free_nid_bitmap =
3069 f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
3070 nm_i->nat_blocks),
3071 GFP_KERNEL);
3072 if (!nm_i->free_nid_bitmap)
3073 return -ENOMEM;
3074
3075 for (i = 0; i < nm_i->nat_blocks; i++) {
3076 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3077 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3078 if (!nm_i->free_nid_bitmap[i])
3079 return -ENOMEM;
3080 }
3081
3082 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3083 GFP_KERNEL);
3084 if (!nm_i->nat_block_bitmap)
3085 return -ENOMEM;
3086
3087 nm_i->free_nid_count =
3088 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3089 nm_i->nat_blocks),
3090 GFP_KERNEL);
3091 if (!nm_i->free_nid_count)
3092 return -ENOMEM;
3093 return 0;
3094 }
3095
3096 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3097 {
3098 int err;
3099
3100 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3101 GFP_KERNEL);
3102 if (!sbi->nm_info)
3103 return -ENOMEM;
3104
3105 err = init_node_manager(sbi);
3106 if (err)
3107 return err;
3108
3109 err = init_free_nid_cache(sbi);
3110 if (err)
3111 return err;
3112
3113
3114 load_free_nid_bitmap(sbi);
3115
3116 return f2fs_build_free_nids(sbi, true, true);
3117 }
3118
3119 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3120 {
3121 struct f2fs_nm_info *nm_i = NM_I(sbi);
3122 struct free_nid *i, *next_i;
3123 struct nat_entry *natvec[NATVEC_SIZE];
3124 struct nat_entry_set *setvec[SETVEC_SIZE];
3125 nid_t nid = 0;
3126 unsigned int found;
3127
3128 if (!nm_i)
3129 return;
3130
3131
3132 spin_lock(&nm_i->nid_list_lock);
3133 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3134 __remove_free_nid(sbi, i, FREE_NID);
3135 spin_unlock(&nm_i->nid_list_lock);
3136 kmem_cache_free(free_nid_slab, i);
3137 spin_lock(&nm_i->nid_list_lock);
3138 }
3139 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3140 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3141 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3142 spin_unlock(&nm_i->nid_list_lock);
3143
3144
3145 down_write(&nm_i->nat_tree_lock);
3146 while ((found = __gang_lookup_nat_cache(nm_i,
3147 nid, NATVEC_SIZE, natvec))) {
3148 unsigned idx;
3149
3150 nid = nat_get_nid(natvec[found - 1]) + 1;
3151 for (idx = 0; idx < found; idx++) {
3152 spin_lock(&nm_i->nat_list_lock);
3153 list_del(&natvec[idx]->list);
3154 spin_unlock(&nm_i->nat_list_lock);
3155
3156 __del_from_nat_cache(nm_i, natvec[idx]);
3157 }
3158 }
3159 f2fs_bug_on(sbi, nm_i->nat_cnt);
3160
3161
3162 nid = 0;
3163 while ((found = __gang_lookup_nat_set(nm_i,
3164 nid, SETVEC_SIZE, setvec))) {
3165 unsigned idx;
3166
3167 nid = setvec[found - 1]->set + 1;
3168 for (idx = 0; idx < found; idx++) {
3169
3170 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3171 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3172 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3173 }
3174 }
3175 up_write(&nm_i->nat_tree_lock);
3176
3177 kvfree(nm_i->nat_block_bitmap);
3178 if (nm_i->free_nid_bitmap) {
3179 int i;
3180
3181 for (i = 0; i < nm_i->nat_blocks; i++)
3182 kvfree(nm_i->free_nid_bitmap[i]);
3183 kvfree(nm_i->free_nid_bitmap);
3184 }
3185 kvfree(nm_i->free_nid_count);
3186
3187 kvfree(nm_i->nat_bitmap);
3188 kvfree(nm_i->nat_bits);
3189 #ifdef CONFIG_F2FS_CHECK_FS
3190 kvfree(nm_i->nat_bitmap_mir);
3191 #endif
3192 sbi->nm_info = NULL;
3193 kvfree(nm_i);
3194 }
3195
3196 int __init f2fs_create_node_manager_caches(void)
3197 {
3198 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
3199 sizeof(struct nat_entry));
3200 if (!nat_entry_slab)
3201 goto fail;
3202
3203 free_nid_slab = f2fs_kmem_cache_create("free_nid",
3204 sizeof(struct free_nid));
3205 if (!free_nid_slab)
3206 goto destroy_nat_entry;
3207
3208 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
3209 sizeof(struct nat_entry_set));
3210 if (!nat_entry_set_slab)
3211 goto destroy_free_nid;
3212
3213 fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
3214 sizeof(struct fsync_node_entry));
3215 if (!fsync_node_entry_slab)
3216 goto destroy_nat_entry_set;
3217 return 0;
3218
3219 destroy_nat_entry_set:
3220 kmem_cache_destroy(nat_entry_set_slab);
3221 destroy_free_nid:
3222 kmem_cache_destroy(free_nid_slab);
3223 destroy_nat_entry:
3224 kmem_cache_destroy(nat_entry_slab);
3225 fail:
3226 return -ENOMEM;
3227 }
3228
3229 void f2fs_destroy_node_manager_caches(void)
3230 {
3231 kmem_cache_destroy(fsync_node_entry_slab);
3232 kmem_cache_destroy(nat_entry_set_slab);
3233 kmem_cache_destroy(free_nid_slab);
3234 kmem_cache_destroy(nat_entry_slab);
3235 }