Lines Matching refs:nid
97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page() argument
99 pgoff_t index = current_nat_addr(sbi, nid); in get_current_nat_page()
103 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_page() argument
113 src_off = current_nat_addr(sbi, nid); in get_next_nat_page()
127 set_to_next_nat(nm_i, nid); in get_next_nat_page()
154 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); in __set_nat_cache_dirty()
179 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); in __clear_nat_cache_dirty()
198 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) in is_checkpointed_node() argument
205 e = __lookup_nat_cache(nm_i, nid); in is_checkpointed_node()
242 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) in grab_nat_entry() argument
247 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); in grab_nat_entry()
249 nat_set_nid(new, nid); in grab_nat_entry()
256 static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, in cache_nat_entry() argument
262 e = __lookup_nat_cache(nm_i, nid); in cache_nat_entry()
264 e = grab_nat_entry(nm_i, nid); in cache_nat_entry()
277 e = __lookup_nat_cache(nm_i, ni->nid); in set_node_addr()
279 e = grab_nat_entry(nm_i, ni->nid); in set_node_addr()
317 if (fsync_done && ni->nid == ni->ino) in set_node_addr()
346 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) in get_node_info() argument
351 nid_t start_nid = START_NID(nid); in get_node_info()
358 ni->nid = nid; in get_node_info()
362 e = __lookup_nat_cache(nm_i, nid); in get_node_info()
376 i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); in get_node_info()
388 ne = nat_blk->entries[nid - start_nid]; in get_node_info()
393 cache_nat_entry(NM_I(sbi), nid, &ne); in get_node_info()
526 dn->nid = nids[i]; in get_dnode_of_data()
565 dn->nid = nids[level]; in get_dnode_of_data()
586 get_node_info(sbi, dn->nid, &ni); in truncate_node()
598 if (dn->nid == dn->inode->i_ino) { in truncate_node()
599 remove_orphan_inode(sbi, dn->nid); in truncate_node()
614 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); in truncate_node()
621 if (dn->nid == 0) in truncate_dnode()
625 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); in truncate_dnode()
650 if (dn->nid == 0) in truncate_nodes()
653 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); in truncate_nodes()
655 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); in truncate_nodes()
664 child_nid = le32_to_cpu(rn->in.nid[i]); in truncate_nodes()
667 rdn.nid = child_nid; in truncate_nodes()
676 child_nid = le32_to_cpu(rn->in.nid[i]); in truncate_nodes()
681 rdn.nid = child_nid; in truncate_nodes()
714 nid_t nid[3]; in truncate_partial_nodes() local
720 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); in truncate_partial_nodes()
721 if (!nid[0]) in truncate_partial_nodes()
727 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); in truncate_partial_nodes()
733 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); in truncate_partial_nodes()
741 dn->nid = child_nid; in truncate_partial_nodes()
750 dn->nid = nid[idx]; in truncate_partial_nodes()
762 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); in truncate_partial_nodes()
822 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); in truncate_inode_blocks()
869 nid_t nid = F2FS_I(inode)->i_xattr_nid; in truncate_xattr_node() local
873 if (!nid) in truncate_xattr_node()
876 npage = get_node_page(sbi, nid); in truncate_xattr_node()
885 set_new_dnode(&dn, inode, page, npage, nid); in truncate_xattr_node()
945 page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); in new_node_page()
954 get_node_info(sbi, dn->nid, &old_ni); in new_node_page()
963 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); in new_node_page()
969 F2FS_I(dn->inode)->i_xattr_nid = dn->nid; in new_node_page()
1020 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) in ra_node_page() argument
1025 apage = find_get_page(NODE_MAPPING(sbi), nid); in ra_node_page()
1032 apage = grab_cache_page(NODE_MAPPING(sbi), nid); in ra_node_page()
1043 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) in get_node_page() argument
1048 page = grab_cache_page(NODE_MAPPING(sbi), nid); in get_node_page()
1058 if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { in get_node_page()
1080 nid_t nid; in get_node_page_ra() local
1083 nid = get_nid(parent, start, false); in get_node_page_ra()
1084 if (!nid) in get_node_page_ra()
1087 page = grab_cache_page(NODE_MAPPING(sbi), nid); in get_node_page_ra()
1103 nid = get_nid(parent, i, false); in get_node_page_ra()
1104 if (!nid) in get_node_page_ra()
1106 ra_node_page(sbi, nid); in get_node_page_ra()
1293 nid_t nid; in f2fs_write_node_page() local
1310 nid = nid_of_node(page); in f2fs_write_node_page()
1311 f2fs_bug_on(sbi, page->index != nid); in f2fs_write_node_page()
1313 get_node_info(sbi, nid, &ni); in f2fs_write_node_page()
1332 write_node_page(sbi, page, nid, &fio); in f2fs_write_node_page()
1410 radix_tree_delete(&nm_i->free_nid_root, i->nid); in __del_from_free_nid_list()
1413 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) in add_free_nid() argument
1424 if (unlikely(nid == 0)) in add_free_nid()
1430 ne = __lookup_nat_cache(nm_i, nid); in add_free_nid()
1441 i->nid = nid; in add_free_nid()
1450 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { in add_free_nid()
1463 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) in remove_free_nid() argument
1469 i = __lookup_free_nid_list(nm_i, nid); in remove_free_nid()
1511 nid_t nid = nm_i->next_scan_nid; in build_free_nids() local
1518 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT); in build_free_nids()
1521 struct page *page = get_current_nat_page(sbi, nid); in build_free_nids()
1523 scan_nat_page(sbi, page, nid); in build_free_nids()
1526 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); in build_free_nids()
1527 if (unlikely(nid >= nm_i->max_nid)) in build_free_nids()
1528 nid = 0; in build_free_nids()
1535 nm_i->next_scan_nid = nid; in build_free_nids()
1541 nid = le32_to_cpu(nid_in_journal(sum, i)); in build_free_nids()
1543 add_free_nid(sbi, nid, true); in build_free_nids()
1545 remove_free_nid(nm_i, nid); in build_free_nids()
1555 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) in alloc_nid() argument
1573 *nid = i->nid; in alloc_nid()
1591 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) in alloc_nid_done() argument
1597 i = __lookup_free_nid_list(nm_i, nid); in alloc_nid_done()
1608 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) in alloc_nid_failed() argument
1614 if (!nid) in alloc_nid_failed()
1618 i = __lookup_free_nid_list(nm_i, nid); in alloc_nid_failed()
1764 sum_entry->nid = rn->footer.nid; in restore_node_summary()
1788 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); in remove_nats_in_journal() local
1793 ne = __lookup_nat_cache(nm_i, nid); in remove_nats_in_journal()
1795 ne = grab_nat_entry(nm_i, nid); in remove_nats_in_journal()
1854 nid_t nid = nat_get_nid(ne); in __flush_nat_entry_set() local
1862 NAT_JOURNAL, nid, 1); in __flush_nat_entry_set()
1865 nid_in_journal(sum, offset) = cpu_to_le32(nid); in __flush_nat_entry_set()
1867 raw_ne = &nat_blk->entries[nid - start_nid]; in __flush_nat_entry_set()
1877 add_free_nid(sbi, nid, false); in __flush_nat_entry_set()
2001 nid_t nid = 0; in destroy_node_manager() local
2023 nid, NATVEC_SIZE, natvec))) { in destroy_node_manager()
2026 nid = nat_get_nid(natvec[found - 1]) + 1; in destroy_node_manager()
2033 nid = 0; in destroy_node_manager()
2035 nid, SETVEC_SIZE, setvec))) { in destroy_node_manager()
2038 nid = setvec[found - 1]->set + 1; in destroy_node_manager()