root/fs/f2fs/inline.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. f2fs_may_inline_data
  2. f2fs_may_inline_dentry
  3. f2fs_do_read_inline_data
  4. f2fs_truncate_inline_inode
  5. f2fs_read_inline_data
  6. f2fs_convert_inline_page
  7. f2fs_convert_inline_inode
  8. f2fs_write_inline_data
  9. f2fs_recover_inline_data
  10. f2fs_find_in_inline_dir
  11. f2fs_make_empty_inline_dir
  12. f2fs_move_inline_dirents
  13. f2fs_add_inline_entries
  14. f2fs_move_rehashed_dirents
  15. f2fs_convert_inline_dir
  16. f2fs_add_inline_entry
  17. f2fs_delete_inline_entry
  18. f2fs_empty_inline_dir
  19. f2fs_read_inline_dir
  20. f2fs_inline_data_fiemap

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * fs/f2fs/inline.c
   4  * Copyright (c) 2013, Intel Corporation
   5  * Authors: Huajun Li <huajun.li@intel.com>
   6  *          Haicheng Li <haicheng.li@intel.com>
   7  */
   8 
   9 #include <linux/fs.h>
  10 #include <linux/f2fs_fs.h>
  11 
  12 #include "f2fs.h"
  13 #include "node.h"
  14 
  15 bool f2fs_may_inline_data(struct inode *inode)
  16 {
  17         if (f2fs_is_atomic_file(inode))
  18                 return false;
  19 
  20         if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
  21                 return false;
  22 
  23         if (i_size_read(inode) > MAX_INLINE_DATA(inode))
  24                 return false;
  25 
  26         if (f2fs_post_read_required(inode))
  27                 return false;
  28 
  29         return true;
  30 }
  31 
  32 bool f2fs_may_inline_dentry(struct inode *inode)
  33 {
  34         if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
  35                 return false;
  36 
  37         if (!S_ISDIR(inode->i_mode))
  38                 return false;
  39 
  40         return true;
  41 }
  42 
  43 void f2fs_do_read_inline_data(struct page *page, struct page *ipage)
  44 {
  45         struct inode *inode = page->mapping->host;
  46         void *src_addr, *dst_addr;
  47 
  48         if (PageUptodate(page))
  49                 return;
  50 
  51         f2fs_bug_on(F2FS_P_SB(page), page->index);
  52 
  53         zero_user_segment(page, MAX_INLINE_DATA(inode), PAGE_SIZE);
  54 
  55         /* Copy the whole inline data block */
  56         src_addr = inline_data_addr(inode, ipage);
  57         dst_addr = kmap_atomic(page);
  58         memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
  59         flush_dcache_page(page);
  60         kunmap_atomic(dst_addr);
  61         if (!PageUptodate(page))
  62                 SetPageUptodate(page);
  63 }
  64 
  65 void f2fs_truncate_inline_inode(struct inode *inode,
  66                                         struct page *ipage, u64 from)
  67 {
  68         void *addr;
  69 
  70         if (from >= MAX_INLINE_DATA(inode))
  71                 return;
  72 
  73         addr = inline_data_addr(inode, ipage);
  74 
  75         f2fs_wait_on_page_writeback(ipage, NODE, true, true);
  76         memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
  77         set_page_dirty(ipage);
  78 
  79         if (from == 0)
  80                 clear_inode_flag(inode, FI_DATA_EXIST);
  81 }
  82 
  83 int f2fs_read_inline_data(struct inode *inode, struct page *page)
  84 {
  85         struct page *ipage;
  86 
  87         ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
  88         if (IS_ERR(ipage)) {
  89                 unlock_page(page);
  90                 return PTR_ERR(ipage);
  91         }
  92 
  93         if (!f2fs_has_inline_data(inode)) {
  94                 f2fs_put_page(ipage, 1);
  95                 return -EAGAIN;
  96         }
  97 
  98         if (page->index)
  99                 zero_user_segment(page, 0, PAGE_SIZE);
 100         else
 101                 f2fs_do_read_inline_data(page, ipage);
 102 
 103         if (!PageUptodate(page))
 104                 SetPageUptodate(page);
 105         f2fs_put_page(ipage, 1);
 106         unlock_page(page);
 107         return 0;
 108 }
 109 
 110 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
 111 {
 112         struct f2fs_io_info fio = {
 113                 .sbi = F2FS_I_SB(dn->inode),
 114                 .ino = dn->inode->i_ino,
 115                 .type = DATA,
 116                 .op = REQ_OP_WRITE,
 117                 .op_flags = REQ_SYNC | REQ_PRIO,
 118                 .page = page,
 119                 .encrypted_page = NULL,
 120                 .io_type = FS_DATA_IO,
 121         };
 122         struct node_info ni;
 123         int dirty, err;
 124 
 125         if (!f2fs_exist_data(dn->inode))
 126                 goto clear_out;
 127 
 128         err = f2fs_reserve_block(dn, 0);
 129         if (err)
 130                 return err;
 131 
 132         err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
 133         if (err) {
 134                 f2fs_truncate_data_blocks_range(dn, 1);
 135                 f2fs_put_dnode(dn);
 136                 return err;
 137         }
 138 
 139         fio.version = ni.version;
 140 
 141         if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
 142                 f2fs_put_dnode(dn);
 143                 set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
 144                 f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
 145                           __func__, dn->inode->i_ino, dn->data_blkaddr);
 146                 return -EFSCORRUPTED;
 147         }
 148 
 149         f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
 150 
 151         f2fs_do_read_inline_data(page, dn->inode_page);
 152         set_page_dirty(page);
 153 
 154         /* clear dirty state */
 155         dirty = clear_page_dirty_for_io(page);
 156 
 157         /* write data page to try to make data consistent */
 158         set_page_writeback(page);
 159         ClearPageError(page);
 160         fio.old_blkaddr = dn->data_blkaddr;
 161         set_inode_flag(dn->inode, FI_HOT_DATA);
 162         f2fs_outplace_write_data(dn, &fio);
 163         f2fs_wait_on_page_writeback(page, DATA, true, true);
 164         if (dirty) {
 165                 inode_dec_dirty_pages(dn->inode);
 166                 f2fs_remove_dirty_inode(dn->inode);
 167         }
 168 
 169         /* this converted inline_data should be recovered. */
 170         set_inode_flag(dn->inode, FI_APPEND_WRITE);
 171 
 172         /* clear inline data and flag after data writeback */
 173         f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
 174         clear_inline_node(dn->inode_page);
 175 clear_out:
 176         stat_dec_inline_inode(dn->inode);
 177         clear_inode_flag(dn->inode, FI_INLINE_DATA);
 178         f2fs_put_dnode(dn);
 179         return 0;
 180 }
 181 
 182 int f2fs_convert_inline_inode(struct inode *inode)
 183 {
 184         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 185         struct dnode_of_data dn;
 186         struct page *ipage, *page;
 187         int err = 0;
 188 
 189         if (!f2fs_has_inline_data(inode))
 190                 return 0;
 191 
 192         page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
 193         if (!page)
 194                 return -ENOMEM;
 195 
 196         f2fs_lock_op(sbi);
 197 
 198         ipage = f2fs_get_node_page(sbi, inode->i_ino);
 199         if (IS_ERR(ipage)) {
 200                 err = PTR_ERR(ipage);
 201                 goto out;
 202         }
 203 
 204         set_new_dnode(&dn, inode, ipage, ipage, 0);
 205 
 206         if (f2fs_has_inline_data(inode))
 207                 err = f2fs_convert_inline_page(&dn, page);
 208 
 209         f2fs_put_dnode(&dn);
 210 out:
 211         f2fs_unlock_op(sbi);
 212 
 213         f2fs_put_page(page, 1);
 214 
 215         f2fs_balance_fs(sbi, dn.node_changed);
 216 
 217         return err;
 218 }
 219 
 220 int f2fs_write_inline_data(struct inode *inode, struct page *page)
 221 {
 222         void *src_addr, *dst_addr;
 223         struct dnode_of_data dn;
 224         int err;
 225 
 226         set_new_dnode(&dn, inode, NULL, NULL, 0);
 227         err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
 228         if (err)
 229                 return err;
 230 
 231         if (!f2fs_has_inline_data(inode)) {
 232                 f2fs_put_dnode(&dn);
 233                 return -EAGAIN;
 234         }
 235 
 236         f2fs_bug_on(F2FS_I_SB(inode), page->index);
 237 
 238         f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
 239         src_addr = kmap_atomic(page);
 240         dst_addr = inline_data_addr(inode, dn.inode_page);
 241         memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
 242         kunmap_atomic(src_addr);
 243         set_page_dirty(dn.inode_page);
 244 
 245         f2fs_clear_page_cache_dirty_tag(page);
 246 
 247         set_inode_flag(inode, FI_APPEND_WRITE);
 248         set_inode_flag(inode, FI_DATA_EXIST);
 249 
 250         clear_inline_node(dn.inode_page);
 251         f2fs_put_dnode(&dn);
 252         return 0;
 253 }
 254 
 255 bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
 256 {
 257         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 258         struct f2fs_inode *ri = NULL;
 259         void *src_addr, *dst_addr;
 260         struct page *ipage;
 261 
 262         /*
 263          * The inline_data recovery policy is as follows.
 264          * [prev.] [next] of inline_data flag
 265          *    o       o  -> recover inline_data
 266          *    o       x  -> remove inline_data, and then recover data blocks
 267          *    x       o  -> remove inline_data, and then recover inline_data
 268          *    x       x  -> recover data blocks
 269          */
 270         if (IS_INODE(npage))
 271                 ri = F2FS_INODE(npage);
 272 
 273         if (f2fs_has_inline_data(inode) &&
 274                         ri && (ri->i_inline & F2FS_INLINE_DATA)) {
 275 process_inline:
 276                 ipage = f2fs_get_node_page(sbi, inode->i_ino);
 277                 f2fs_bug_on(sbi, IS_ERR(ipage));
 278 
 279                 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 280 
 281                 src_addr = inline_data_addr(inode, npage);
 282                 dst_addr = inline_data_addr(inode, ipage);
 283                 memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
 284 
 285                 set_inode_flag(inode, FI_INLINE_DATA);
 286                 set_inode_flag(inode, FI_DATA_EXIST);
 287 
 288                 set_page_dirty(ipage);
 289                 f2fs_put_page(ipage, 1);
 290                 return true;
 291         }
 292 
 293         if (f2fs_has_inline_data(inode)) {
 294                 ipage = f2fs_get_node_page(sbi, inode->i_ino);
 295                 f2fs_bug_on(sbi, IS_ERR(ipage));
 296                 f2fs_truncate_inline_inode(inode, ipage, 0);
 297                 clear_inode_flag(inode, FI_INLINE_DATA);
 298                 f2fs_put_page(ipage, 1);
 299         } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
 300                 if (f2fs_truncate_blocks(inode, 0, false))
 301                         return false;
 302                 goto process_inline;
 303         }
 304         return false;
 305 }
 306 
 307 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
 308                         struct fscrypt_name *fname, struct page **res_page)
 309 {
 310         struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
 311         struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
 312         struct f2fs_dir_entry *de;
 313         struct f2fs_dentry_ptr d;
 314         struct page *ipage;
 315         void *inline_dentry;
 316         f2fs_hash_t namehash;
 317 
 318         ipage = f2fs_get_node_page(sbi, dir->i_ino);
 319         if (IS_ERR(ipage)) {
 320                 *res_page = ipage;
 321                 return NULL;
 322         }
 323 
 324         namehash = f2fs_dentry_hash(dir, &name, fname);
 325 
 326         inline_dentry = inline_data_addr(dir, ipage);
 327 
 328         make_dentry_ptr_inline(dir, &d, inline_dentry);
 329         de = f2fs_find_target_dentry(fname, namehash, NULL, &d);
 330         unlock_page(ipage);
 331         if (de)
 332                 *res_page = ipage;
 333         else
 334                 f2fs_put_page(ipage, 0);
 335 
 336         return de;
 337 }
 338 
 339 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
 340                                                         struct page *ipage)
 341 {
 342         struct f2fs_dentry_ptr d;
 343         void *inline_dentry;
 344 
 345         inline_dentry = inline_data_addr(inode, ipage);
 346 
 347         make_dentry_ptr_inline(inode, &d, inline_dentry);
 348         f2fs_do_make_empty_dir(inode, parent, &d);
 349 
 350         set_page_dirty(ipage);
 351 
 352         /* update i_size to MAX_INLINE_DATA */
 353         if (i_size_read(inode) < MAX_INLINE_DATA(inode))
 354                 f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
 355         return 0;
 356 }
 357 
 358 /*
 359  * NOTE: ipage is grabbed by caller, but if any error occurs, we should
 360  * release ipage in this function.
 361  */
 362 static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
 363                                                         void *inline_dentry)
 364 {
 365         struct page *page;
 366         struct dnode_of_data dn;
 367         struct f2fs_dentry_block *dentry_blk;
 368         struct f2fs_dentry_ptr src, dst;
 369         int err;
 370 
 371         page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
 372         if (!page) {
 373                 f2fs_put_page(ipage, 1);
 374                 return -ENOMEM;
 375         }
 376 
 377         set_new_dnode(&dn, dir, ipage, NULL, 0);
 378         err = f2fs_reserve_block(&dn, 0);
 379         if (err)
 380                 goto out;
 381 
 382         if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
 383                 f2fs_put_dnode(&dn);
 384                 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
 385                 f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
 386                           __func__, dir->i_ino, dn.data_blkaddr);
 387                 err = -EFSCORRUPTED;
 388                 goto out;
 389         }
 390 
 391         f2fs_wait_on_page_writeback(page, DATA, true, true);
 392 
 393         dentry_blk = page_address(page);
 394 
 395         make_dentry_ptr_inline(dir, &src, inline_dentry);
 396         make_dentry_ptr_block(dir, &dst, dentry_blk);
 397 
 398         /* copy data from inline dentry block to new dentry block */
 399         memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
 400         memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
 401         /*
 402          * we do not need to zero out remainder part of dentry and filename
 403          * field, since we have used bitmap for marking the usage status of
 404          * them, besides, we can also ignore copying/zeroing reserved space
 405          * of dentry block, because them haven't been used so far.
 406          */
 407         memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
 408         memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
 409 
 410         if (!PageUptodate(page))
 411                 SetPageUptodate(page);
 412         set_page_dirty(page);
 413 
 414         /* clear inline dir and flag after data writeback */
 415         f2fs_truncate_inline_inode(dir, ipage, 0);
 416 
 417         stat_dec_inline_dir(dir);
 418         clear_inode_flag(dir, FI_INLINE_DENTRY);
 419 
 420         /*
 421          * should retrieve reserved space which was used to keep
 422          * inline_dentry's structure for backward compatibility.
 423          */
 424         if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
 425                         !f2fs_has_inline_xattr(dir))
 426                 F2FS_I(dir)->i_inline_xattr_size = 0;
 427 
 428         f2fs_i_depth_write(dir, 1);
 429         if (i_size_read(dir) < PAGE_SIZE)
 430                 f2fs_i_size_write(dir, PAGE_SIZE);
 431 out:
 432         f2fs_put_page(page, 1);
 433         return err;
 434 }
 435 
 436 static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
 437 {
 438         struct f2fs_dentry_ptr d;
 439         unsigned long bit_pos = 0;
 440         int err = 0;
 441 
 442         make_dentry_ptr_inline(dir, &d, inline_dentry);
 443 
 444         while (bit_pos < d.max) {
 445                 struct f2fs_dir_entry *de;
 446                 struct qstr new_name;
 447                 nid_t ino;
 448                 umode_t fake_mode;
 449 
 450                 if (!test_bit_le(bit_pos, d.bitmap)) {
 451                         bit_pos++;
 452                         continue;
 453                 }
 454 
 455                 de = &d.dentry[bit_pos];
 456 
 457                 if (unlikely(!de->name_len)) {
 458                         bit_pos++;
 459                         continue;
 460                 }
 461 
 462                 new_name.name = d.filename[bit_pos];
 463                 new_name.len = le16_to_cpu(de->name_len);
 464 
 465                 ino = le32_to_cpu(de->ino);
 466                 fake_mode = f2fs_get_de_type(de) << S_SHIFT;
 467 
 468                 err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
 469                                                         ino, fake_mode);
 470                 if (err)
 471                         goto punch_dentry_pages;
 472 
 473                 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
 474         }
 475         return 0;
 476 punch_dentry_pages:
 477         truncate_inode_pages(&dir->i_data, 0);
 478         f2fs_truncate_blocks(dir, 0, false);
 479         f2fs_remove_dirty_inode(dir);
 480         return err;
 481 }
 482 
 483 static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
 484                                                         void *inline_dentry)
 485 {
 486         void *backup_dentry;
 487         int err;
 488 
 489         backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
 490                                 MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
 491         if (!backup_dentry) {
 492                 f2fs_put_page(ipage, 1);
 493                 return -ENOMEM;
 494         }
 495 
 496         memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
 497         f2fs_truncate_inline_inode(dir, ipage, 0);
 498 
 499         unlock_page(ipage);
 500 
 501         err = f2fs_add_inline_entries(dir, backup_dentry);
 502         if (err)
 503                 goto recover;
 504 
 505         lock_page(ipage);
 506 
 507         stat_dec_inline_dir(dir);
 508         clear_inode_flag(dir, FI_INLINE_DENTRY);
 509 
 510         /*
 511          * should retrieve reserved space which was used to keep
 512          * inline_dentry's structure for backward compatibility.
 513          */
 514         if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
 515                         !f2fs_has_inline_xattr(dir))
 516                 F2FS_I(dir)->i_inline_xattr_size = 0;
 517 
 518         kvfree(backup_dentry);
 519         return 0;
 520 recover:
 521         lock_page(ipage);
 522         f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 523         memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
 524         f2fs_i_depth_write(dir, 0);
 525         f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
 526         set_page_dirty(ipage);
 527         f2fs_put_page(ipage, 1);
 528 
 529         kvfree(backup_dentry);
 530         return err;
 531 }
 532 
 533 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
 534                                                         void *inline_dentry)
 535 {
 536         if (!F2FS_I(dir)->i_dir_level)
 537                 return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
 538         else
 539                 return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
 540 }
 541 
 542 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
 543                                 const struct qstr *orig_name,
 544                                 struct inode *inode, nid_t ino, umode_t mode)
 545 {
 546         struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
 547         struct page *ipage;
 548         unsigned int bit_pos;
 549         f2fs_hash_t name_hash;
 550         void *inline_dentry = NULL;
 551         struct f2fs_dentry_ptr d;
 552         int slots = GET_DENTRY_SLOTS(new_name->len);
 553         struct page *page = NULL;
 554         int err = 0;
 555 
 556         ipage = f2fs_get_node_page(sbi, dir->i_ino);
 557         if (IS_ERR(ipage))
 558                 return PTR_ERR(ipage);
 559 
 560         inline_dentry = inline_data_addr(dir, ipage);
 561         make_dentry_ptr_inline(dir, &d, inline_dentry);
 562 
 563         bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
 564         if (bit_pos >= d.max) {
 565                 err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
 566                 if (err)
 567                         return err;
 568                 err = -EAGAIN;
 569                 goto out;
 570         }
 571 
 572         if (inode) {
 573                 down_write(&F2FS_I(inode)->i_sem);
 574                 page = f2fs_init_inode_metadata(inode, dir, new_name,
 575                                                 orig_name, ipage);
 576                 if (IS_ERR(page)) {
 577                         err = PTR_ERR(page);
 578                         goto fail;
 579                 }
 580         }
 581 
 582         f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 583 
 584         name_hash = f2fs_dentry_hash(dir, new_name, NULL);
 585         f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
 586 
 587         set_page_dirty(ipage);
 588 
 589         /* we don't need to mark_inode_dirty now */
 590         if (inode) {
 591                 f2fs_i_pino_write(inode, dir->i_ino);
 592 
 593                 /* synchronize inode page's data from inode cache */
 594                 if (is_inode_flag_set(inode, FI_NEW_INODE))
 595                         f2fs_update_inode(inode, page);
 596 
 597                 f2fs_put_page(page, 1);
 598         }
 599 
 600         f2fs_update_parent_metadata(dir, inode, 0);
 601 fail:
 602         if (inode)
 603                 up_write(&F2FS_I(inode)->i_sem);
 604 out:
 605         f2fs_put_page(ipage, 1);
 606         return err;
 607 }
 608 
 609 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
 610                                         struct inode *dir, struct inode *inode)
 611 {
 612         struct f2fs_dentry_ptr d;
 613         void *inline_dentry;
 614         int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
 615         unsigned int bit_pos;
 616         int i;
 617 
 618         lock_page(page);
 619         f2fs_wait_on_page_writeback(page, NODE, true, true);
 620 
 621         inline_dentry = inline_data_addr(dir, page);
 622         make_dentry_ptr_inline(dir, &d, inline_dentry);
 623 
 624         bit_pos = dentry - d.dentry;
 625         for (i = 0; i < slots; i++)
 626                 __clear_bit_le(bit_pos + i, d.bitmap);
 627 
 628         set_page_dirty(page);
 629         f2fs_put_page(page, 1);
 630 
 631         dir->i_ctime = dir->i_mtime = current_time(dir);
 632         f2fs_mark_inode_dirty_sync(dir, false);
 633 
 634         if (inode)
 635                 f2fs_drop_nlink(dir, inode);
 636 }
 637 
 638 bool f2fs_empty_inline_dir(struct inode *dir)
 639 {
 640         struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
 641         struct page *ipage;
 642         unsigned int bit_pos = 2;
 643         void *inline_dentry;
 644         struct f2fs_dentry_ptr d;
 645 
 646         ipage = f2fs_get_node_page(sbi, dir->i_ino);
 647         if (IS_ERR(ipage))
 648                 return false;
 649 
 650         inline_dentry = inline_data_addr(dir, ipage);
 651         make_dentry_ptr_inline(dir, &d, inline_dentry);
 652 
 653         bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
 654 
 655         f2fs_put_page(ipage, 1);
 656 
 657         if (bit_pos < d.max)
 658                 return false;
 659 
 660         return true;
 661 }
 662 
 663 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
 664                                 struct fscrypt_str *fstr)
 665 {
 666         struct inode *inode = file_inode(file);
 667         struct page *ipage = NULL;
 668         struct f2fs_dentry_ptr d;
 669         void *inline_dentry = NULL;
 670         int err;
 671 
 672         make_dentry_ptr_inline(inode, &d, inline_dentry);
 673 
 674         if (ctx->pos == d.max)
 675                 return 0;
 676 
 677         ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 678         if (IS_ERR(ipage))
 679                 return PTR_ERR(ipage);
 680 
 681         /*
 682          * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
 683          * ipage without page's lock held.
 684          */
 685         unlock_page(ipage);
 686 
 687         inline_dentry = inline_data_addr(inode, ipage);
 688 
 689         make_dentry_ptr_inline(inode, &d, inline_dentry);
 690 
 691         err = f2fs_fill_dentries(ctx, &d, 0, fstr);
 692         if (!err)
 693                 ctx->pos = d.max;
 694 
 695         f2fs_put_page(ipage, 0);
 696         return err < 0 ? err : 0;
 697 }
 698 
 699 int f2fs_inline_data_fiemap(struct inode *inode,
 700                 struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
 701 {
 702         __u64 byteaddr, ilen;
 703         __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
 704                 FIEMAP_EXTENT_LAST;
 705         struct node_info ni;
 706         struct page *ipage;
 707         int err = 0;
 708 
 709         ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 710         if (IS_ERR(ipage))
 711                 return PTR_ERR(ipage);
 712 
 713         if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
 714                                 !f2fs_has_inline_data(inode)) {
 715                 err = -EAGAIN;
 716                 goto out;
 717         }
 718 
 719         if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) {
 720                 err = -EAGAIN;
 721                 goto out;
 722         }
 723 
 724         ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
 725         if (start >= ilen)
 726                 goto out;
 727         if (start + len < ilen)
 728                 ilen = start + len;
 729         ilen -= start;
 730 
 731         err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
 732         if (err)
 733                 goto out;
 734 
 735         byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
 736         byteaddr += (char *)inline_data_addr(inode, ipage) -
 737                                         (char *)F2FS_INODE(ipage);
 738         err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
 739 out:
 740         f2fs_put_page(ipage, 1);
 741         return err;
 742 }

/* [<][>][^][v][top][bottom][index][help] */