root/include/linux/hmm.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. hmm_range_wait_until_valid
  2. hmm_range_valid
  3. hmm_device_entry_to_page
  4. hmm_device_entry_to_pfn
  5. hmm_device_entry_from_page
  6. hmm_device_entry_from_pfn

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * Copyright 2013 Red Hat Inc.
   4  *
   5  * Authors: Jérôme Glisse <jglisse@redhat.com>
   6  */
   7 /*
   8  * Heterogeneous Memory Management (HMM)
   9  *
  10  * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
  11  * is for. Here we focus on the HMM API description, with some explanation of
  12  * the underlying implementation.
  13  *
  14  * Short description: HMM provides a set of helpers to share a virtual address
  15  * space between CPU and a device, so that the device can access any valid
  16  * address of the process (while still obeying memory protection). HMM also
  17  * provides helpers to migrate process memory to device memory, and back. Each
  18  * set of functionality (address space mirroring, and migration to and from
  19  * device memory) can be used independently of the other.
  20  *
  21  *
  22  * HMM address space mirroring API:
  23  *
  24  * Use HMM address space mirroring if you want to mirror a range of the CPU
  25  * page tables of a process into a device page table. Here, "mirror" means "keep
  26  * synchronized". Prerequisites: the device must provide the ability to write-
  27  * protect its page tables (at PAGE_SIZE granularity), and must be able to
  28  * recover from the resulting potential page faults.
  29  *
  30  * HMM guarantees that at any point in time, a given virtual address points to
  31  * either the same memory in both CPU and device page tables (that is: CPU and
  32  * device page tables each point to the same pages), or that one page table (CPU
  33  * or device) points to no entry, while the other still points to the old page
  34  * for the address. The latter case happens when the CPU page table update
  35  * happens first, and then the update is mirrored over to the device page table.
  36  * This does not cause any issue, because the CPU page table cannot start
  37  * pointing to a new page until the device page table is invalidated.
  38  *
  39  * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
  40  * updates to each device driver that has registered a mirror. It also provides
  41  * some API calls to help with taking a snapshot of the CPU page table, and to
  42  * synchronize with any updates that might happen concurrently.
  43  *
  44  *
  45  * HMM migration to and from device memory:
  46  *
  47  * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
  48  * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
  49  * of the device memory, and allows the device driver to manage its memory
  50  * using those struct pages. Having struct pages for device memory makes
  51  * migration easier. Because that memory is not addressable by the CPU it must
  52  * never be pinned to the device; in other words, any CPU page fault can always
  53  * cause the device memory to be migrated (copied/moved) back to regular memory.
  54  *
  55  * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
  56  * allows use of a device DMA engine to perform the copy operation between
  57  * regular system memory and device memory.
  58  */
  59 #ifndef LINUX_HMM_H
  60 #define LINUX_HMM_H
  61 
  62 #include <linux/kconfig.h>
  63 #include <asm/pgtable.h>
  64 
  65 #ifdef CONFIG_HMM_MIRROR
  66 
  67 #include <linux/device.h>
  68 #include <linux/migrate.h>
  69 #include <linux/memremap.h>
  70 #include <linux/completion.h>
  71 #include <linux/mmu_notifier.h>
  72 
  73 
  74 /*
  75  * struct hmm - HMM per mm struct
  76  *
  77  * @mm: mm struct this HMM struct is bound to
  78  * @lock: lock protecting ranges list
  79  * @ranges: list of range being snapshotted
  80  * @mirrors: list of mirrors for this mm
  81  * @mmu_notifier: mmu notifier to track updates to CPU page table
  82  * @mirrors_sem: read/write semaphore protecting the mirrors list
  83  * @wq: wait queue for user waiting on a range invalidation
  84  * @notifiers: count of active mmu notifiers
  85  */
  86 struct hmm {
  87         struct mmu_notifier     mmu_notifier;
  88         spinlock_t              ranges_lock;
  89         struct list_head        ranges;
  90         struct list_head        mirrors;
  91         struct rw_semaphore     mirrors_sem;
  92         wait_queue_head_t       wq;
  93         long                    notifiers;
  94 };
  95 
  96 /*
  97  * hmm_pfn_flag_e - HMM flag enums
  98  *
  99  * Flags:
 100  * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
 101  * HMM_PFN_WRITE: CPU page table has write permission set
 102  * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
 103  *
 104  * The driver provides a flags array for mapping page protections to device
 105  * PTE bits. If the driver valid bit for an entry is bit 3,
 106  * i.e., (entry & (1 << 3)), then the driver must provide
 107  * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
 108  * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
 109  * except that this is per device driver rather than per architecture.
 110  */
 111 enum hmm_pfn_flag_e {
 112         HMM_PFN_VALID = 0,
 113         HMM_PFN_WRITE,
 114         HMM_PFN_DEVICE_PRIVATE,
 115         HMM_PFN_FLAG_MAX
 116 };
 117 
 118 /*
 119  * hmm_pfn_value_e - HMM pfn special value
 120  *
 121  * Flags:
 122  * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
 123  * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
 124  * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
 125  *      result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
 126  *      be mirrored by a device, because the entry will never have HMM_PFN_VALID
 127  *      set and the pfn value is undefined.
 128  *
 129  * Driver provides values for none entry, error entry, and special entry.
 130  * Driver can alias (i.e., use same value) error and special, but
 131  * it should not alias none with error or special.
 132  *
 133  * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
 134  * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
 135  * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
 136  * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
 137  */
 138 enum hmm_pfn_value_e {
 139         HMM_PFN_ERROR,
 140         HMM_PFN_NONE,
 141         HMM_PFN_SPECIAL,
 142         HMM_PFN_VALUE_MAX
 143 };
 144 
 145 /*
 146  * struct hmm_range - track invalidation lock on virtual address range
 147  *
 148  * @hmm: the core HMM structure this range is active against
 149  * @vma: the vm area struct for the range
 150  * @list: all range lock are on a list
 151  * @start: range virtual start address (inclusive)
 152  * @end: range virtual end address (exclusive)
 153  * @pfns: array of pfns (big enough for the range)
 154  * @flags: pfn flags to match device driver page table
 155  * @values: pfn value for some special case (none, special, error, ...)
 156  * @default_flags: default flags for the range (write, read, ... see hmm doc)
 157  * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
 158  * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
 159  * @valid: pfns array did not change since it has been fill by an HMM function
 160  */
 161 struct hmm_range {
 162         struct hmm              *hmm;
 163         struct list_head        list;
 164         unsigned long           start;
 165         unsigned long           end;
 166         uint64_t                *pfns;
 167         const uint64_t          *flags;
 168         const uint64_t          *values;
 169         uint64_t                default_flags;
 170         uint64_t                pfn_flags_mask;
 171         uint8_t                 pfn_shift;
 172         bool                    valid;
 173 };
 174 
 175 /*
 176  * hmm_range_wait_until_valid() - wait for range to be valid
 177  * @range: range affected by invalidation to wait on
 178  * @timeout: time out for wait in ms (ie abort wait after that period of time)
 179  * Return: true if the range is valid, false otherwise.
 180  */
 181 static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
 182                                               unsigned long timeout)
 183 {
 184         return wait_event_timeout(range->hmm->wq, range->valid,
 185                                   msecs_to_jiffies(timeout)) != 0;
 186 }
 187 
 188 /*
 189  * hmm_range_valid() - test if a range is valid or not
 190  * @range: range
 191  * Return: true if the range is valid, false otherwise.
 192  */
 193 static inline bool hmm_range_valid(struct hmm_range *range)
 194 {
 195         return range->valid;
 196 }
 197 
 198 /*
 199  * hmm_device_entry_to_page() - return struct page pointed to by a device entry
 200  * @range: range use to decode device entry value
 201  * @entry: device entry value to get corresponding struct page from
 202  * Return: struct page pointer if entry is a valid, NULL otherwise
 203  *
 204  * If the device entry is valid (ie valid flag set) then return the struct page
 205  * matching the entry value. Otherwise return NULL.
 206  */
 207 static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
 208                                                     uint64_t entry)
 209 {
 210         if (entry == range->values[HMM_PFN_NONE])
 211                 return NULL;
 212         if (entry == range->values[HMM_PFN_ERROR])
 213                 return NULL;
 214         if (entry == range->values[HMM_PFN_SPECIAL])
 215                 return NULL;
 216         if (!(entry & range->flags[HMM_PFN_VALID]))
 217                 return NULL;
 218         return pfn_to_page(entry >> range->pfn_shift);
 219 }
 220 
 221 /*
 222  * hmm_device_entry_to_pfn() - return pfn value store in a device entry
 223  * @range: range use to decode device entry value
 224  * @entry: device entry to extract pfn from
 225  * Return: pfn value if device entry is valid, -1UL otherwise
 226  */
 227 static inline unsigned long
 228 hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
 229 {
 230         if (pfn == range->values[HMM_PFN_NONE])
 231                 return -1UL;
 232         if (pfn == range->values[HMM_PFN_ERROR])
 233                 return -1UL;
 234         if (pfn == range->values[HMM_PFN_SPECIAL])
 235                 return -1UL;
 236         if (!(pfn & range->flags[HMM_PFN_VALID]))
 237                 return -1UL;
 238         return (pfn >> range->pfn_shift);
 239 }
 240 
 241 /*
 242  * hmm_device_entry_from_page() - create a valid device entry for a page
 243  * @range: range use to encode HMM pfn value
 244  * @page: page for which to create the device entry
 245  * Return: valid device entry for the page
 246  */
 247 static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
 248                                                   struct page *page)
 249 {
 250         return (page_to_pfn(page) << range->pfn_shift) |
 251                 range->flags[HMM_PFN_VALID];
 252 }
 253 
 254 /*
 255  * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
 256  * @range: range use to encode HMM pfn value
 257  * @pfn: pfn value for which to create the device entry
 258  * Return: valid device entry for the pfn
 259  */
 260 static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
 261                                                  unsigned long pfn)
 262 {
 263         return (pfn << range->pfn_shift) |
 264                 range->flags[HMM_PFN_VALID];
 265 }
 266 
 267 /*
 268  * Mirroring: how to synchronize device page table with CPU page table.
 269  *
 270  * A device driver that is participating in HMM mirroring must always
 271  * synchronize with CPU page table updates. For this, device drivers can either
 272  * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
 273  * drivers can decide to register one mirror per device per process, or just
 274  * one mirror per process for a group of devices. The pattern is:
 275  *
 276  *      int device_bind_address_space(..., struct mm_struct *mm, ...)
 277  *      {
 278  *          struct device_address_space *das;
 279  *
 280  *          // Device driver specific initialization, and allocation of das
 281  *          // which contains an hmm_mirror struct as one of its fields.
 282  *          ...
 283  *
 284  *          ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
 285  *          if (ret) {
 286  *              // Cleanup on error
 287  *              return ret;
 288  *          }
 289  *
 290  *          // Other device driver specific initialization
 291  *          ...
 292  *      }
 293  *
 294  * Once an hmm_mirror is registered for an address space, the device driver
 295  * will get callbacks through sync_cpu_device_pagetables() operation (see
 296  * hmm_mirror_ops struct).
 297  *
 298  * Device driver must not free the struct containing the hmm_mirror struct
 299  * before calling hmm_mirror_unregister(). The expected usage is to do that when
 300  * the device driver is unbinding from an address space.
 301  *
 302  *
 303  *      void device_unbind_address_space(struct device_address_space *das)
 304  *      {
 305  *          // Device driver specific cleanup
 306  *          ...
 307  *
 308  *          hmm_mirror_unregister(&das->mirror);
 309  *
 310  *          // Other device driver specific cleanup, and now das can be freed
 311  *          ...
 312  *      }
 313  */
 314 
 315 struct hmm_mirror;
 316 
 317 /*
 318  * struct hmm_mirror_ops - HMM mirror device operations callback
 319  *
 320  * @update: callback to update range on a device
 321  */
 322 struct hmm_mirror_ops {
 323         /* release() - release hmm_mirror
 324          *
 325          * @mirror: pointer to struct hmm_mirror
 326          *
 327          * This is called when the mm_struct is being released.  The callback
 328          * must ensure that all access to any pages obtained from this mirror
 329          * is halted before the callback returns. All future access should
 330          * fault.
 331          */
 332         void (*release)(struct hmm_mirror *mirror);
 333 
 334         /* sync_cpu_device_pagetables() - synchronize page tables
 335          *
 336          * @mirror: pointer to struct hmm_mirror
 337          * @update: update information (see struct mmu_notifier_range)
 338          * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
 339          * and callback needs to block, 0 otherwise.
 340          *
 341          * This callback ultimately originates from mmu_notifiers when the CPU
 342          * page table is updated. The device driver must update its page table
 343          * in response to this callback. The update argument tells what action
 344          * to perform.
 345          *
 346          * The device driver must not return from this callback until the device
 347          * page tables are completely updated (TLBs flushed, etc); this is a
 348          * synchronous call.
 349          */
 350         int (*sync_cpu_device_pagetables)(
 351                 struct hmm_mirror *mirror,
 352                 const struct mmu_notifier_range *update);
 353 };
 354 
 355 /*
 356  * struct hmm_mirror - mirror struct for a device driver
 357  *
 358  * @hmm: pointer to struct hmm (which is unique per mm_struct)
 359  * @ops: device driver callback for HMM mirror operations
 360  * @list: for list of mirrors of a given mm
 361  *
 362  * Each address space (mm_struct) being mirrored by a device must register one
 363  * instance of an hmm_mirror struct with HMM. HMM will track the list of all
 364  * mirrors for each mm_struct.
 365  */
 366 struct hmm_mirror {
 367         struct hmm                      *hmm;
 368         const struct hmm_mirror_ops     *ops;
 369         struct list_head                list;
 370 };
 371 
 372 int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
 373 void hmm_mirror_unregister(struct hmm_mirror *mirror);
 374 
 375 /*
 376  * Please see Documentation/vm/hmm.rst for how to use the range API.
 377  */
 378 int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
 379 void hmm_range_unregister(struct hmm_range *range);
 380 
 381 /*
 382  * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
 383  */
 384 #define HMM_FAULT_ALLOW_RETRY           (1 << 0)
 385 
 386 /* Don't fault in missing PTEs, just snapshot the current state. */
 387 #define HMM_FAULT_SNAPSHOT              (1 << 1)
 388 
 389 long hmm_range_fault(struct hmm_range *range, unsigned int flags);
 390 
 391 long hmm_range_dma_map(struct hmm_range *range,
 392                        struct device *device,
 393                        dma_addr_t *daddrs,
 394                        unsigned int flags);
 395 long hmm_range_dma_unmap(struct hmm_range *range,
 396                          struct device *device,
 397                          dma_addr_t *daddrs,
 398                          bool dirty);
 399 
 400 /*
 401  * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
 402  *
 403  * When waiting for mmu notifiers we need some kind of time out otherwise we
 404  * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
 405  * wait already.
 406  */
 407 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
 408 
 409 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
 410 
 411 #endif /* LINUX_HMM_H */

/* [<][>][^][v][top][bottom][index][help] */