root/fs/erofs/data.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. erofs_readendio
  2. erofs_get_meta_page
  3. erofs_map_blocks_flatmode
  4. erofs_map_blocks
  5. erofs_read_raw_page
  6. erofs_raw_access_readpage
  7. erofs_raw_access_readpages
  8. erofs_get_block
  9. erofs_bmap

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2017-2018 HUAWEI, Inc.
   4  *             http://www.huawei.com/
   5  * Created by Gao Xiang <gaoxiang25@huawei.com>
   6  */
   7 #include "internal.h"
   8 #include <linux/prefetch.h>
   9 
  10 #include <trace/events/erofs.h>
  11 
  12 static void erofs_readendio(struct bio *bio)
  13 {
  14         struct bio_vec *bvec;
  15         blk_status_t err = bio->bi_status;
  16         struct bvec_iter_all iter_all;
  17 
  18         bio_for_each_segment_all(bvec, bio, iter_all) {
  19                 struct page *page = bvec->bv_page;
  20 
  21                 /* page is already locked */
  22                 DBG_BUGON(PageUptodate(page));
  23 
  24                 if (err)
  25                         SetPageError(page);
  26                 else
  27                         SetPageUptodate(page);
  28 
  29                 unlock_page(page);
  30                 /* page could be reclaimed now */
  31         }
  32         bio_put(bio);
  33 }
  34 
  35 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
  36 {
  37         struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
  38         struct page *page;
  39 
  40         page = read_cache_page_gfp(mapping, blkaddr,
  41                                    mapping_gfp_constraint(mapping, ~__GFP_FS));
  42         /* should already be PageUptodate */
  43         if (!IS_ERR(page))
  44                 lock_page(page);
  45         return page;
  46 }
  47 
  48 static int erofs_map_blocks_flatmode(struct inode *inode,
  49                                      struct erofs_map_blocks *map,
  50                                      int flags)
  51 {
  52         int err = 0;
  53         erofs_blk_t nblocks, lastblk;
  54         u64 offset = map->m_la;
  55         struct erofs_inode *vi = EROFS_I(inode);
  56         bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
  57 
  58         trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
  59 
  60         nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
  61         lastblk = nblocks - tailendpacking;
  62 
  63         if (offset >= inode->i_size) {
  64                 /* leave out-of-bound access unmapped */
  65                 map->m_flags = 0;
  66                 map->m_plen = 0;
  67                 goto out;
  68         }
  69 
  70         /* there is no hole in flatmode */
  71         map->m_flags = EROFS_MAP_MAPPED;
  72 
  73         if (offset < blknr_to_addr(lastblk)) {
  74                 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
  75                 map->m_plen = blknr_to_addr(lastblk) - offset;
  76         } else if (tailendpacking) {
  77                 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
  78                 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
  79 
  80                 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
  81                         vi->xattr_isize + erofs_blkoff(map->m_la);
  82                 map->m_plen = inode->i_size - offset;
  83 
  84                 /* inline data should be located in one meta block */
  85                 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
  86                         erofs_err(inode->i_sb,
  87                                   "inline data cross block boundary @ nid %llu",
  88                                   vi->nid);
  89                         DBG_BUGON(1);
  90                         err = -EFSCORRUPTED;
  91                         goto err_out;
  92                 }
  93 
  94                 map->m_flags |= EROFS_MAP_META;
  95         } else {
  96                 erofs_err(inode->i_sb,
  97                           "internal error @ nid: %llu (size %llu), m_la 0x%llx",
  98                           vi->nid, inode->i_size, map->m_la);
  99                 DBG_BUGON(1);
 100                 err = -EIO;
 101                 goto err_out;
 102         }
 103 
 104 out:
 105         map->m_llen = map->m_plen;
 106 
 107 err_out:
 108         trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
 109         return err;
 110 }
 111 
 112 int erofs_map_blocks(struct inode *inode,
 113                      struct erofs_map_blocks *map, int flags)
 114 {
 115         if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
 116                 int err = z_erofs_map_blocks_iter(inode, map, flags);
 117 
 118                 if (map->mpage) {
 119                         put_page(map->mpage);
 120                         map->mpage = NULL;
 121                 }
 122                 return err;
 123         }
 124         return erofs_map_blocks_flatmode(inode, map, flags);
 125 }
 126 
 127 static inline struct bio *erofs_read_raw_page(struct bio *bio,
 128                                               struct address_space *mapping,
 129                                               struct page *page,
 130                                               erofs_off_t *last_block,
 131                                               unsigned int nblocks,
 132                                               bool ra)
 133 {
 134         struct inode *const inode = mapping->host;
 135         struct super_block *const sb = inode->i_sb;
 136         erofs_off_t current_block = (erofs_off_t)page->index;
 137         int err;
 138 
 139         DBG_BUGON(!nblocks);
 140 
 141         if (PageUptodate(page)) {
 142                 err = 0;
 143                 goto has_updated;
 144         }
 145 
 146         /* note that for readpage case, bio also equals to NULL */
 147         if (bio &&
 148             /* not continuous */
 149             *last_block + 1 != current_block) {
 150 submit_bio_retry:
 151                 submit_bio(bio);
 152                 bio = NULL;
 153         }
 154 
 155         if (!bio) {
 156                 struct erofs_map_blocks map = {
 157                         .m_la = blknr_to_addr(current_block),
 158                 };
 159                 erofs_blk_t blknr;
 160                 unsigned int blkoff;
 161 
 162                 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
 163                 if (err)
 164                         goto err_out;
 165 
 166                 /* zero out the holed page */
 167                 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
 168                         zero_user_segment(page, 0, PAGE_SIZE);
 169                         SetPageUptodate(page);
 170 
 171                         /* imply err = 0, see erofs_map_blocks */
 172                         goto has_updated;
 173                 }
 174 
 175                 /* for RAW access mode, m_plen must be equal to m_llen */
 176                 DBG_BUGON(map.m_plen != map.m_llen);
 177 
 178                 blknr = erofs_blknr(map.m_pa);
 179                 blkoff = erofs_blkoff(map.m_pa);
 180 
 181                 /* deal with inline page */
 182                 if (map.m_flags & EROFS_MAP_META) {
 183                         void *vsrc, *vto;
 184                         struct page *ipage;
 185 
 186                         DBG_BUGON(map.m_plen > PAGE_SIZE);
 187 
 188                         ipage = erofs_get_meta_page(inode->i_sb, blknr);
 189 
 190                         if (IS_ERR(ipage)) {
 191                                 err = PTR_ERR(ipage);
 192                                 goto err_out;
 193                         }
 194 
 195                         vsrc = kmap_atomic(ipage);
 196                         vto = kmap_atomic(page);
 197                         memcpy(vto, vsrc + blkoff, map.m_plen);
 198                         memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
 199                         kunmap_atomic(vto);
 200                         kunmap_atomic(vsrc);
 201                         flush_dcache_page(page);
 202 
 203                         SetPageUptodate(page);
 204                         /* TODO: could we unlock the page earlier? */
 205                         unlock_page(ipage);
 206                         put_page(ipage);
 207 
 208                         /* imply err = 0, see erofs_map_blocks */
 209                         goto has_updated;
 210                 }
 211 
 212                 /* pa must be block-aligned for raw reading */
 213                 DBG_BUGON(erofs_blkoff(map.m_pa));
 214 
 215                 /* max # of continuous pages */
 216                 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
 217                         nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
 218                 if (nblocks > BIO_MAX_PAGES)
 219                         nblocks = BIO_MAX_PAGES;
 220 
 221                 bio = bio_alloc(GFP_NOIO, nblocks);
 222 
 223                 bio->bi_end_io = erofs_readendio;
 224                 bio_set_dev(bio, sb->s_bdev);
 225                 bio->bi_iter.bi_sector = (sector_t)blknr <<
 226                         LOG_SECTORS_PER_BLOCK;
 227                 bio->bi_opf = REQ_OP_READ;
 228         }
 229 
 230         err = bio_add_page(bio, page, PAGE_SIZE, 0);
 231         /* out of the extent or bio is full */
 232         if (err < PAGE_SIZE)
 233                 goto submit_bio_retry;
 234 
 235         *last_block = current_block;
 236 
 237         /* shift in advance in case of it followed by too many gaps */
 238         if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
 239                 /* err should reassign to 0 after submitting */
 240                 err = 0;
 241                 goto submit_bio_out;
 242         }
 243 
 244         return bio;
 245 
 246 err_out:
 247         /* for sync reading, set page error immediately */
 248         if (!ra) {
 249                 SetPageError(page);
 250                 ClearPageUptodate(page);
 251         }
 252 has_updated:
 253         unlock_page(page);
 254 
 255         /* if updated manually, continuous pages has a gap */
 256         if (bio)
 257 submit_bio_out:
 258                 submit_bio(bio);
 259         return err ? ERR_PTR(err) : NULL;
 260 }
 261 
 262 /*
 263  * since we dont have write or truncate flows, so no inode
 264  * locking needs to be held at the moment.
 265  */
 266 static int erofs_raw_access_readpage(struct file *file, struct page *page)
 267 {
 268         erofs_off_t last_block;
 269         struct bio *bio;
 270 
 271         trace_erofs_readpage(page, true);
 272 
 273         bio = erofs_read_raw_page(NULL, page->mapping,
 274                                   page, &last_block, 1, false);
 275 
 276         if (IS_ERR(bio))
 277                 return PTR_ERR(bio);
 278 
 279         DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
 280         return 0;
 281 }
 282 
 283 static int erofs_raw_access_readpages(struct file *filp,
 284                                       struct address_space *mapping,
 285                                       struct list_head *pages,
 286                                       unsigned int nr_pages)
 287 {
 288         erofs_off_t last_block;
 289         struct bio *bio = NULL;
 290         gfp_t gfp = readahead_gfp_mask(mapping);
 291         struct page *page = list_last_entry(pages, struct page, lru);
 292 
 293         trace_erofs_readpages(mapping->host, page, nr_pages, true);
 294 
 295         for (; nr_pages; --nr_pages) {
 296                 page = list_entry(pages->prev, struct page, lru);
 297 
 298                 prefetchw(&page->flags);
 299                 list_del(&page->lru);
 300 
 301                 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
 302                         bio = erofs_read_raw_page(bio, mapping, page,
 303                                                   &last_block, nr_pages, true);
 304 
 305                         /* all the page errors are ignored when readahead */
 306                         if (IS_ERR(bio)) {
 307                                 pr_err("%s, readahead error at page %lu of nid %llu\n",
 308                                        __func__, page->index,
 309                                        EROFS_I(mapping->host)->nid);
 310 
 311                                 bio = NULL;
 312                         }
 313                 }
 314 
 315                 /* pages could still be locked */
 316                 put_page(page);
 317         }
 318         DBG_BUGON(!list_empty(pages));
 319 
 320         /* the rare case (end in gaps) */
 321         if (bio)
 322                 submit_bio(bio);
 323         return 0;
 324 }
 325 
 326 static int erofs_get_block(struct inode *inode, sector_t iblock,
 327                            struct buffer_head *bh, int create)
 328 {
 329         struct erofs_map_blocks map = {
 330                 .m_la = iblock << 9,
 331         };
 332         int err;
 333 
 334         err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
 335         if (err)
 336                 return err;
 337 
 338         if (map.m_flags & EROFS_MAP_MAPPED)
 339                 bh->b_blocknr = erofs_blknr(map.m_pa);
 340 
 341         return err;
 342 }
 343 
 344 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
 345 {
 346         struct inode *inode = mapping->host;
 347 
 348         if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
 349                 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
 350 
 351                 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
 352                         return 0;
 353         }
 354 
 355         return generic_block_bmap(mapping, block, erofs_get_block);
 356 }
 357 
 358 /* for uncompressed (aligned) files and raw access for other files */
 359 const struct address_space_operations erofs_raw_access_aops = {
 360         .readpage = erofs_raw_access_readpage,
 361         .readpages = erofs_raw_access_readpages,
 362         .bmap = erofs_bmap,
 363 };
 364 

/* [<][>][^][v][top][bottom][index][help] */