root/include/linux/iomap.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. iomap_sector
  2. to_iomap_page

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef LINUX_IOMAP_H
   3 #define LINUX_IOMAP_H 1
   4 
   5 #include <linux/atomic.h>
   6 #include <linux/bitmap.h>
   7 #include <linux/mm.h>
   8 #include <linux/types.h>
   9 #include <linux/mm_types.h>
  10 #include <linux/blkdev.h>
  11 
  12 struct address_space;
  13 struct fiemap_extent_info;
  14 struct inode;
  15 struct iov_iter;
  16 struct kiocb;
  17 struct page;
  18 struct vm_area_struct;
  19 struct vm_fault;
  20 
  21 /*
  22  * Types of block ranges for iomap mappings:
  23  */
  24 #define IOMAP_HOLE      0x01    /* no blocks allocated, need allocation */
  25 #define IOMAP_DELALLOC  0x02    /* delayed allocation blocks */
  26 #define IOMAP_MAPPED    0x03    /* blocks allocated at @addr */
  27 #define IOMAP_UNWRITTEN 0x04    /* blocks allocated at @addr in unwritten state */
  28 #define IOMAP_INLINE    0x05    /* data inline in the inode */
  29 
  30 /*
  31  * Flags for all iomap mappings:
  32  *
  33  * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
  34  * written data and requires fdatasync to commit them to persistent storage.
  35  */
  36 #define IOMAP_F_NEW             0x01    /* blocks have been newly allocated */
  37 #define IOMAP_F_DIRTY           0x02    /* uncommitted metadata */
  38 #define IOMAP_F_BUFFER_HEAD     0x04    /* file system requires buffer heads */
  39 #define IOMAP_F_SIZE_CHANGED    0x08    /* file size has changed */
  40 
  41 /*
  42  * Flags that only need to be reported for IOMAP_REPORT requests:
  43  */
  44 #define IOMAP_F_MERGED          0x10    /* contains multiple blocks/extents */
  45 #define IOMAP_F_SHARED          0x20    /* block shared with another file */
  46 
  47 /*
  48  * Flags from 0x1000 up are for file system specific usage:
  49  */
  50 #define IOMAP_F_PRIVATE         0x1000
  51 
  52 
  53 /*
  54  * Magic value for addr:
  55  */
  56 #define IOMAP_NULL_ADDR -1ULL   /* addr is not valid */
  57 
  58 struct iomap_page_ops;
  59 
  60 struct iomap {
  61         u64                     addr; /* disk offset of mapping, bytes */
  62         loff_t                  offset; /* file offset of mapping, bytes */
  63         u64                     length; /* length of mapping, bytes */
  64         u16                     type;   /* type of mapping */
  65         u16                     flags;  /* flags for mapping */
  66         struct block_device     *bdev;  /* block device for I/O */
  67         struct dax_device       *dax_dev; /* dax_dev for dax operations */
  68         void                    *inline_data;
  69         void                    *private; /* filesystem private */
  70         const struct iomap_page_ops *page_ops;
  71 };
  72 
  73 static inline sector_t
  74 iomap_sector(struct iomap *iomap, loff_t pos)
  75 {
  76         return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
  77 }
  78 
  79 /*
  80  * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
  81  * and page_done will be called for each page written to.  This only applies to
  82  * buffered writes as unbuffered writes will not typically have pages
  83  * associated with them.
  84  *
  85  * When page_prepare succeeds, page_done will always be called to do any
  86  * cleanup work necessary.  In that page_done call, @page will be NULL if the
  87  * associated page could not be obtained.
  88  */
  89 struct iomap_page_ops {
  90         int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
  91                         struct iomap *iomap);
  92         void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
  93                         struct page *page, struct iomap *iomap);
  94 };
  95 
  96 /*
  97  * Flags for iomap_begin / iomap_end.  No flag implies a read.
  98  */
  99 #define IOMAP_WRITE             (1 << 0) /* writing, must allocate blocks */
 100 #define IOMAP_ZERO              (1 << 1) /* zeroing operation, may skip holes */
 101 #define IOMAP_REPORT            (1 << 2) /* report extent status, e.g. FIEMAP */
 102 #define IOMAP_FAULT             (1 << 3) /* mapping for page fault */
 103 #define IOMAP_DIRECT            (1 << 4) /* direct I/O */
 104 #define IOMAP_NOWAIT            (1 << 5) /* do not block */
 105 
 106 struct iomap_ops {
 107         /*
 108          * Return the existing mapping at pos, or reserve space starting at
 109          * pos for up to length, as long as we can do it as a single mapping.
 110          * The actual length is returned in iomap->length.
 111          */
 112         int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
 113                         unsigned flags, struct iomap *iomap);
 114 
 115         /*
 116          * Commit and/or unreserve space previous allocated using iomap_begin.
 117          * Written indicates the length of the successful write operation which
 118          * needs to be commited, while the rest needs to be unreserved.
 119          * Written might be zero if no data was written.
 120          */
 121         int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length,
 122                         ssize_t written, unsigned flags, struct iomap *iomap);
 123 };
 124 
 125 /*
 126  * Main iomap iterator function.
 127  */
 128 typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
 129                 void *data, struct iomap *iomap);
 130 
 131 loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
 132                 unsigned flags, const struct iomap_ops *ops, void *data,
 133                 iomap_actor_t actor);
 134 
 135 /*
 136  * Structure allocate for each page when block size < PAGE_SIZE to track
 137  * sub-page uptodate status and I/O completions.
 138  */
 139 struct iomap_page {
 140         atomic_t                read_count;
 141         atomic_t                write_count;
 142         DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
 143 };
 144 
 145 static inline struct iomap_page *to_iomap_page(struct page *page)
 146 {
 147         if (page_has_private(page))
 148                 return (struct iomap_page *)page_private(page);
 149         return NULL;
 150 }
 151 
 152 ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
 153                 const struct iomap_ops *ops);
 154 int iomap_readpage(struct page *page, const struct iomap_ops *ops);
 155 int iomap_readpages(struct address_space *mapping, struct list_head *pages,
 156                 unsigned nr_pages, const struct iomap_ops *ops);
 157 int iomap_set_page_dirty(struct page *page);
 158 int iomap_is_partially_uptodate(struct page *page, unsigned long from,
 159                 unsigned long count);
 160 int iomap_releasepage(struct page *page, gfp_t gfp_mask);
 161 void iomap_invalidatepage(struct page *page, unsigned int offset,
 162                 unsigned int len);
 163 #ifdef CONFIG_MIGRATION
 164 int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
 165                 struct page *page, enum migrate_mode mode);
 166 #else
 167 #define iomap_migrate_page NULL
 168 #endif
 169 int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
 170                 const struct iomap_ops *ops);
 171 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
 172                 bool *did_zero, const struct iomap_ops *ops);
 173 int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
 174                 const struct iomap_ops *ops);
 175 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
 176                         const struct iomap_ops *ops);
 177 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 178                 loff_t start, loff_t len, const struct iomap_ops *ops);
 179 loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
 180                 const struct iomap_ops *ops);
 181 loff_t iomap_seek_data(struct inode *inode, loff_t offset,
 182                 const struct iomap_ops *ops);
 183 sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
 184                 const struct iomap_ops *ops);
 185 
 186 /*
 187  * Flags for direct I/O ->end_io:
 188  */
 189 #define IOMAP_DIO_UNWRITTEN     (1 << 0)        /* covers unwritten extent(s) */
 190 #define IOMAP_DIO_COW           (1 << 1)        /* covers COW extent(s) */
 191 
 192 struct iomap_dio_ops {
 193         int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
 194                       unsigned flags);
 195 };
 196 
 197 ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
 198                 const struct iomap_ops *ops, const struct iomap_dio_ops *dops);
 199 int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
 200 
 201 #ifdef CONFIG_SWAP
 202 struct file;
 203 struct swap_info_struct;
 204 
 205 int iomap_swapfile_activate(struct swap_info_struct *sis,
 206                 struct file *swap_file, sector_t *pagespan,
 207                 const struct iomap_ops *ops);
 208 #else
 209 # define iomap_swapfile_activate(sis, swapfile, pagespan, ops)  (-EIO)
 210 #endif /* CONFIG_SWAP */
 211 
 212 #endif /* LINUX_IOMAP_H */

/* [<][>][^][v][top][bottom][index][help] */