This source file includes following definitions.
- ext4_dax_read_iter
 
- ext4_file_read_iter
 
- ext4_release_file
 
- ext4_unwritten_wait
 
- ext4_unaligned_aio
 
- ext4_overwrite_io
 
- ext4_write_checks
 
- ext4_dax_write_iter
 
- ext4_file_write_iter
 
- ext4_dax_huge_fault
 
- ext4_dax_fault
 
- ext4_file_mmap
 
- ext4_sample_last_mounted
 
- ext4_file_open
 
- ext4_llseek
 
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 #include <linux/time.h>
  23 #include <linux/fs.h>
  24 #include <linux/iomap.h>
  25 #include <linux/mount.h>
  26 #include <linux/path.h>
  27 #include <linux/dax.h>
  28 #include <linux/quotaops.h>
  29 #include <linux/pagevec.h>
  30 #include <linux/uio.h>
  31 #include <linux/mman.h>
  32 #include "ext4.h"
  33 #include "ext4_jbd2.h"
  34 #include "xattr.h"
  35 #include "acl.h"
  36 
  37 #ifdef CONFIG_FS_DAX
  38 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  39 {
  40         struct inode *inode = file_inode(iocb->ki_filp);
  41         ssize_t ret;
  42 
  43         if (iocb->ki_flags & IOCB_NOWAIT) {
  44                 if (!inode_trylock_shared(inode))
  45                         return -EAGAIN;
  46         } else {
  47                 inode_lock_shared(inode);
  48         }
  49         
  50 
  51 
  52 
  53         if (!IS_DAX(inode)) {
  54                 inode_unlock_shared(inode);
  55                 
  56                 return generic_file_read_iter(iocb, to);
  57         }
  58         ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
  59         inode_unlock_shared(inode);
  60 
  61         file_accessed(iocb->ki_filp);
  62         return ret;
  63 }
  64 #endif
  65 
  66 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  67 {
  68         if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
  69                 return -EIO;
  70 
  71         if (!iov_iter_count(to))
  72                 return 0; 
  73 
  74 #ifdef CONFIG_FS_DAX
  75         if (IS_DAX(file_inode(iocb->ki_filp)))
  76                 return ext4_dax_read_iter(iocb, to);
  77 #endif
  78         return generic_file_read_iter(iocb, to);
  79 }
  80 
  81 
  82 
  83 
  84 
  85 
  86 static int ext4_release_file(struct inode *inode, struct file *filp)
  87 {
  88         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
  89                 ext4_alloc_da_blocks(inode);
  90                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
  91         }
  92         
  93         if ((filp->f_mode & FMODE_WRITE) &&
  94                         (atomic_read(&inode->i_writecount) == 1) &&
  95                         !EXT4_I(inode)->i_reserved_data_blocks)
  96         {
  97                 down_write(&EXT4_I(inode)->i_data_sem);
  98                 ext4_discard_preallocations(inode);
  99                 up_write(&EXT4_I(inode)->i_data_sem);
 100         }
 101         if (is_dx(inode) && filp->private_data)
 102                 ext4_htree_free_dir_info(filp->private_data);
 103 
 104         return 0;
 105 }
 106 
 107 static void ext4_unwritten_wait(struct inode *inode)
 108 {
 109         wait_queue_head_t *wq = ext4_ioend_wq(inode);
 110 
 111         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
 112 }
 113 
 114 
 115 
 116 
 117 
 118 
 119 
 120 
 121 
 122 
 123 static int
 124 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
 125 {
 126         struct super_block *sb = inode->i_sb;
 127         int blockmask = sb->s_blocksize - 1;
 128 
 129         if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
 130                 return 0;
 131 
 132         if ((pos | iov_iter_alignment(from)) & blockmask)
 133                 return 1;
 134 
 135         return 0;
 136 }
 137 
 138 
 139 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
 140 {
 141         struct ext4_map_blocks map;
 142         unsigned int blkbits = inode->i_blkbits;
 143         int err, blklen;
 144 
 145         if (pos + len > i_size_read(inode))
 146                 return false;
 147 
 148         map.m_lblk = pos >> blkbits;
 149         map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
 150         blklen = map.m_len;
 151 
 152         err = ext4_map_blocks(NULL, inode, &map, 0);
 153         
 154 
 155 
 156 
 157 
 158         return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
 159 }
 160 
 161 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
 162 {
 163         struct inode *inode = file_inode(iocb->ki_filp);
 164         ssize_t ret;
 165 
 166         ret = generic_write_checks(iocb, from);
 167         if (ret <= 0)
 168                 return ret;
 169 
 170         if (unlikely(IS_IMMUTABLE(inode)))
 171                 return -EPERM;
 172 
 173         
 174 
 175 
 176 
 177         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
 178                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 179 
 180                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
 181                         return -EFBIG;
 182                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
 183         }
 184         return iov_iter_count(from);
 185 }
 186 
 187 #ifdef CONFIG_FS_DAX
 188 static ssize_t
 189 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 190 {
 191         struct inode *inode = file_inode(iocb->ki_filp);
 192         ssize_t ret;
 193 
 194         if (iocb->ki_flags & IOCB_NOWAIT) {
 195                 if (!inode_trylock(inode))
 196                         return -EAGAIN;
 197         } else {
 198                 inode_lock(inode);
 199         }
 200         ret = ext4_write_checks(iocb, from);
 201         if (ret <= 0)
 202                 goto out;
 203         ret = file_remove_privs(iocb->ki_filp);
 204         if (ret)
 205                 goto out;
 206         ret = file_update_time(iocb->ki_filp);
 207         if (ret)
 208                 goto out;
 209 
 210         ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
 211 out:
 212         inode_unlock(inode);
 213         if (ret > 0)
 214                 ret = generic_write_sync(iocb, ret);
 215         return ret;
 216 }
 217 #endif
 218 
 219 static ssize_t
 220 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 221 {
 222         struct inode *inode = file_inode(iocb->ki_filp);
 223         int o_direct = iocb->ki_flags & IOCB_DIRECT;
 224         int unaligned_aio = 0;
 225         int overwrite = 0;
 226         ssize_t ret;
 227 
 228         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 229                 return -EIO;
 230 
 231 #ifdef CONFIG_FS_DAX
 232         if (IS_DAX(inode))
 233                 return ext4_dax_write_iter(iocb, from);
 234 #endif
 235 
 236         if (!inode_trylock(inode)) {
 237                 if (iocb->ki_flags & IOCB_NOWAIT)
 238                         return -EAGAIN;
 239                 inode_lock(inode);
 240         }
 241 
 242         ret = ext4_write_checks(iocb, from);
 243         if (ret <= 0)
 244                 goto out;
 245 
 246         
 247 
 248 
 249 
 250 
 251         if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
 252             !is_sync_kiocb(iocb) &&
 253             ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
 254                 unaligned_aio = 1;
 255                 ext4_unwritten_wait(inode);
 256         }
 257 
 258         iocb->private = &overwrite;
 259         
 260         if (o_direct && !unaligned_aio) {
 261                 if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
 262                         if (ext4_should_dioread_nolock(inode))
 263                                 overwrite = 1;
 264                 } else if (iocb->ki_flags & IOCB_NOWAIT) {
 265                         ret = -EAGAIN;
 266                         goto out;
 267                 }
 268         }
 269 
 270         ret = __generic_file_write_iter(iocb, from);
 271         
 272 
 273 
 274 
 275 
 276         if (ret == -EIOCBQUEUED && unaligned_aio)
 277                 ext4_unwritten_wait(inode);
 278         inode_unlock(inode);
 279 
 280         if (ret > 0)
 281                 ret = generic_write_sync(iocb, ret);
 282 
 283         return ret;
 284 
 285 out:
 286         inode_unlock(inode);
 287         return ret;
 288 }
 289 
 290 #ifdef CONFIG_FS_DAX
 291 static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
 292                 enum page_entry_size pe_size)
 293 {
 294         int error = 0;
 295         vm_fault_t result;
 296         int retries = 0;
 297         handle_t *handle = NULL;
 298         struct inode *inode = file_inode(vmf->vma->vm_file);
 299         struct super_block *sb = inode->i_sb;
 300 
 301         
 302 
 303 
 304 
 305 
 306 
 307 
 308 
 309 
 310 
 311 
 312         bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
 313                 (vmf->vma->vm_flags & VM_SHARED);
 314         pfn_t pfn;
 315 
 316         if (write) {
 317                 sb_start_pagefault(sb);
 318                 file_update_time(vmf->vma->vm_file);
 319                 down_read(&EXT4_I(inode)->i_mmap_sem);
 320 retry:
 321                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
 322                                                EXT4_DATA_TRANS_BLOCKS(sb));
 323                 if (IS_ERR(handle)) {
 324                         up_read(&EXT4_I(inode)->i_mmap_sem);
 325                         sb_end_pagefault(sb);
 326                         return VM_FAULT_SIGBUS;
 327                 }
 328         } else {
 329                 down_read(&EXT4_I(inode)->i_mmap_sem);
 330         }
 331         result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
 332         if (write) {
 333                 ext4_journal_stop(handle);
 334 
 335                 if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
 336                     ext4_should_retry_alloc(sb, &retries))
 337                         goto retry;
 338                 
 339                 if (result & VM_FAULT_NEEDDSYNC)
 340                         result = dax_finish_sync_fault(vmf, pe_size, pfn);
 341                 up_read(&EXT4_I(inode)->i_mmap_sem);
 342                 sb_end_pagefault(sb);
 343         } else {
 344                 up_read(&EXT4_I(inode)->i_mmap_sem);
 345         }
 346 
 347         return result;
 348 }
 349 
 350 static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
 351 {
 352         return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
 353 }
 354 
 355 static const struct vm_operations_struct ext4_dax_vm_ops = {
 356         .fault          = ext4_dax_fault,
 357         .huge_fault     = ext4_dax_huge_fault,
 358         .page_mkwrite   = ext4_dax_fault,
 359         .pfn_mkwrite    = ext4_dax_fault,
 360 };
 361 #else
 362 #define ext4_dax_vm_ops ext4_file_vm_ops
 363 #endif
 364 
 365 static const struct vm_operations_struct ext4_file_vm_ops = {
 366         .fault          = ext4_filemap_fault,
 367         .map_pages      = filemap_map_pages,
 368         .page_mkwrite   = ext4_page_mkwrite,
 369 };
 370 
 371 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
 372 {
 373         struct inode *inode = file->f_mapping->host;
 374         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
 375         struct dax_device *dax_dev = sbi->s_daxdev;
 376 
 377         if (unlikely(ext4_forced_shutdown(sbi)))
 378                 return -EIO;
 379 
 380         
 381 
 382 
 383 
 384         if (!daxdev_mapping_supported(vma, dax_dev))
 385                 return -EOPNOTSUPP;
 386 
 387         file_accessed(file);
 388         if (IS_DAX(file_inode(file))) {
 389                 vma->vm_ops = &ext4_dax_vm_ops;
 390                 vma->vm_flags |= VM_HUGEPAGE;
 391         } else {
 392                 vma->vm_ops = &ext4_file_vm_ops;
 393         }
 394         return 0;
 395 }
 396 
 397 static int ext4_sample_last_mounted(struct super_block *sb,
 398                                     struct vfsmount *mnt)
 399 {
 400         struct ext4_sb_info *sbi = EXT4_SB(sb);
 401         struct path path;
 402         char buf[64], *cp;
 403         handle_t *handle;
 404         int err;
 405 
 406         if (likely(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED))
 407                 return 0;
 408 
 409         if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
 410                 return 0;
 411 
 412         sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
 413         
 414 
 415 
 416 
 417 
 418 
 419         memset(buf, 0, sizeof(buf));
 420         path.mnt = mnt;
 421         path.dentry = mnt->mnt_root;
 422         cp = d_path(&path, buf, sizeof(buf));
 423         err = 0;
 424         if (IS_ERR(cp))
 425                 goto out;
 426 
 427         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
 428         err = PTR_ERR(handle);
 429         if (IS_ERR(handle))
 430                 goto out;
 431         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
 432         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
 433         if (err)
 434                 goto out_journal;
 435         strlcpy(sbi->s_es->s_last_mounted, cp,
 436                 sizeof(sbi->s_es->s_last_mounted));
 437         ext4_handle_dirty_super(handle, sb);
 438 out_journal:
 439         ext4_journal_stop(handle);
 440 out:
 441         sb_end_intwrite(sb);
 442         return err;
 443 }
 444 
 445 static int ext4_file_open(struct inode * inode, struct file * filp)
 446 {
 447         int ret;
 448 
 449         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 450                 return -EIO;
 451 
 452         ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
 453         if (ret)
 454                 return ret;
 455 
 456         ret = fscrypt_file_open(inode, filp);
 457         if (ret)
 458                 return ret;
 459 
 460         ret = fsverity_file_open(inode, filp);
 461         if (ret)
 462                 return ret;
 463 
 464         
 465 
 466 
 467 
 468         if (filp->f_mode & FMODE_WRITE) {
 469                 ret = ext4_inode_attach_jinode(inode);
 470                 if (ret < 0)
 471                         return ret;
 472         }
 473 
 474         filp->f_mode |= FMODE_NOWAIT;
 475         return dquot_file_open(inode, filp);
 476 }
 477 
 478 
 479 
 480 
 481 
 482 
 483 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 484 {
 485         struct inode *inode = file->f_mapping->host;
 486         loff_t maxbytes;
 487 
 488         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
 489                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
 490         else
 491                 maxbytes = inode->i_sb->s_maxbytes;
 492 
 493         switch (whence) {
 494         default:
 495                 return generic_file_llseek_size(file, offset, whence,
 496                                                 maxbytes, i_size_read(inode));
 497         case SEEK_HOLE:
 498                 inode_lock_shared(inode);
 499                 offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
 500                 inode_unlock_shared(inode);
 501                 break;
 502         case SEEK_DATA:
 503                 inode_lock_shared(inode);
 504                 offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
 505                 inode_unlock_shared(inode);
 506                 break;
 507         }
 508 
 509         if (offset < 0)
 510                 return offset;
 511         return vfs_setpos(file, offset, maxbytes);
 512 }
 513 
 514 const struct file_operations ext4_file_operations = {
 515         .llseek         = ext4_llseek,
 516         .read_iter      = ext4_file_read_iter,
 517         .write_iter     = ext4_file_write_iter,
 518         .unlocked_ioctl = ext4_ioctl,
 519 #ifdef CONFIG_COMPAT
 520         .compat_ioctl   = ext4_compat_ioctl,
 521 #endif
 522         .mmap           = ext4_file_mmap,
 523         .mmap_supported_flags = MAP_SYNC,
 524         .open           = ext4_file_open,
 525         .release        = ext4_release_file,
 526         .fsync          = ext4_sync_file,
 527         .get_unmapped_area = thp_get_unmapped_area,
 528         .splice_read    = generic_file_splice_read,
 529         .splice_write   = iter_file_splice_write,
 530         .fallocate      = ext4_fallocate,
 531 };
 532 
 533 const struct inode_operations ext4_file_inode_operations = {
 534         .setattr        = ext4_setattr,
 535         .getattr        = ext4_file_getattr,
 536         .listxattr      = ext4_listxattr,
 537         .get_acl        = ext4_get_acl,
 538         .set_acl        = ext4_set_acl,
 539         .fiemap         = ext4_fiemap,
 540 };
 541