root/drivers/pci/mmap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pci_mmap_page_range
  2. pci_mmap_resource_range
  3. pci_mmap_resource_range

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Generic PCI resource mmap helper
   4  *
   5  * Copyright © 2017 Amazon.com, Inc. or its affiliates.
   6  *
   7  * Author: David Woodhouse <dwmw2@infradead.org>
   8  */
   9 
  10 #include <linux/kernel.h>
  11 #include <linux/mm.h>
  12 #include <linux/pci.h>
  13 
  14 #ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
  15 
  16 /*
  17  * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
  18  * pci_mmap_page_range() (if needed) as a wrapper round it.
  19  */
  20 
  21 #ifdef HAVE_PCI_MMAP
  22 int pci_mmap_page_range(struct pci_dev *pdev, int bar,
  23                         struct vm_area_struct *vma,
  24                         enum pci_mmap_state mmap_state, int write_combine)
  25 {
  26         resource_size_t start, end;
  27 
  28         pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
  29 
  30         /* Adjust vm_pgoff to be the offset within the resource */
  31         vma->vm_pgoff -= start >> PAGE_SHIFT;
  32         return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
  33                                        write_combine);
  34 }
  35 #endif
  36 
  37 static const struct vm_operations_struct pci_phys_vm_ops = {
  38 #ifdef CONFIG_HAVE_IOREMAP_PROT
  39         .access = generic_access_phys,
  40 #endif
  41 };
  42 
  43 int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
  44                             struct vm_area_struct *vma,
  45                             enum pci_mmap_state mmap_state, int write_combine)
  46 {
  47         unsigned long size;
  48         int ret;
  49 
  50         size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1;
  51         if (vma->vm_pgoff + vma_pages(vma) > size)
  52                 return -EINVAL;
  53 
  54         if (write_combine)
  55                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  56         else
  57                 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
  58 
  59         if (mmap_state == pci_mmap_io) {
  60                 ret = pci_iobar_pfn(pdev, bar, vma);
  61                 if (ret)
  62                         return ret;
  63         } else
  64                 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
  65 
  66         vma->vm_ops = &pci_phys_vm_ops;
  67 
  68         return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  69                                   vma->vm_end - vma->vm_start,
  70                                   vma->vm_page_prot);
  71 }
  72 
  73 #elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
  74 
  75 /*
  76  * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
  77  * the architecture's pci_mmap_page_range(), converting to "user visible"
  78  * addresses as necessary.
  79  */
  80 
  81 int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
  82                             struct vm_area_struct *vma,
  83                             enum pci_mmap_state mmap_state, int write_combine)
  84 {
  85         resource_size_t start, end;
  86 
  87         /*
  88          * pci_mmap_page_range() expects the same kind of entry as coming
  89          * from /proc/bus/pci/ which is a "user visible" value. If this is
  90          * different from the resource itself, arch will do necessary fixup.
  91          */
  92         pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
  93         vma->vm_pgoff += start >> PAGE_SHIFT;
  94         return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
  95 }
  96 #endif

/* [<][>][^][v][top][bottom][index][help] */