root/drivers/iommu/intel-pasid.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. intel_pasid_alloc_id
  2. intel_pasid_free_id
  3. intel_pasid_lookup_id
  4. device_attach_pasid_table
  5. device_detach_pasid_table
  6. search_pasid_table
  7. get_alias_pasid_table
  8. intel_pasid_alloc_table
  9. intel_pasid_free_table
  10. intel_pasid_get_table
  11. intel_pasid_get_dev_max_id
  12. intel_pasid_get_entry
  13. pasid_clear_entry
  14. intel_pasid_clear_entry
  15. pasid_set_bits
  16. pasid_set_domain_id
  17. pasid_get_domain_id
  18. pasid_set_slptr
  19. pasid_set_address_width
  20. pasid_set_translation_type
  21. pasid_set_fault_enable
  22. pasid_set_sre
  23. pasid_set_present
  24. pasid_set_page_snoop
  25. pasid_set_flptr
  26. pasid_set_flpm
  27. pasid_cache_invalidation_with_pasid
  28. iotlb_invalidation_with_pasid
  29. devtlb_invalidation_with_pasid
  30. intel_pasid_tear_down_entry
  31. intel_pasid_setup_first_level
  32. intel_pasid_setup_second_level
  33. intel_pasid_setup_pass_through

   1 // SPDX-License-Identifier: GPL-2.0
   2 /**
   3  * intel-pasid.c - PASID idr, table and entry manipulation
   4  *
   5  * Copyright (C) 2018 Intel Corporation
   6  *
   7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
   8  */
   9 
  10 #define pr_fmt(fmt)     "DMAR: " fmt
  11 
  12 #include <linux/bitops.h>
  13 #include <linux/cpufeature.h>
  14 #include <linux/dmar.h>
  15 #include <linux/intel-iommu.h>
  16 #include <linux/iommu.h>
  17 #include <linux/memory.h>
  18 #include <linux/pci.h>
  19 #include <linux/pci-ats.h>
  20 #include <linux/spinlock.h>
  21 
  22 #include "intel-pasid.h"
  23 
  24 /*
  25  * Intel IOMMU system wide PASID name space:
  26  */
  27 static DEFINE_SPINLOCK(pasid_lock);
  28 u32 intel_pasid_max_id = PASID_MAX;
  29 static DEFINE_IDR(pasid_idr);
  30 
  31 int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
  32 {
  33         int ret, min, max;
  34 
  35         min = max_t(int, start, PASID_MIN);
  36         max = min_t(int, end, intel_pasid_max_id);
  37 
  38         WARN_ON(in_interrupt());
  39         idr_preload(gfp);
  40         spin_lock(&pasid_lock);
  41         ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC);
  42         spin_unlock(&pasid_lock);
  43         idr_preload_end();
  44 
  45         return ret;
  46 }
  47 
  48 void intel_pasid_free_id(int pasid)
  49 {
  50         spin_lock(&pasid_lock);
  51         idr_remove(&pasid_idr, pasid);
  52         spin_unlock(&pasid_lock);
  53 }
  54 
  55 void *intel_pasid_lookup_id(int pasid)
  56 {
  57         void *p;
  58 
  59         spin_lock(&pasid_lock);
  60         p = idr_find(&pasid_idr, pasid);
  61         spin_unlock(&pasid_lock);
  62 
  63         return p;
  64 }
  65 
  66 /*
  67  * Per device pasid table management:
  68  */
  69 static inline void
  70 device_attach_pasid_table(struct device_domain_info *info,
  71                           struct pasid_table *pasid_table)
  72 {
  73         info->pasid_table = pasid_table;
  74         list_add(&info->table, &pasid_table->dev);
  75 }
  76 
  77 static inline void
  78 device_detach_pasid_table(struct device_domain_info *info,
  79                           struct pasid_table *pasid_table)
  80 {
  81         info->pasid_table = NULL;
  82         list_del(&info->table);
  83 }
  84 
  85 struct pasid_table_opaque {
  86         struct pasid_table      **pasid_table;
  87         int                     segment;
  88         int                     bus;
  89         int                     devfn;
  90 };
  91 
  92 static int search_pasid_table(struct device_domain_info *info, void *opaque)
  93 {
  94         struct pasid_table_opaque *data = opaque;
  95 
  96         if (info->iommu->segment == data->segment &&
  97             info->bus == data->bus &&
  98             info->devfn == data->devfn &&
  99             info->pasid_table) {
 100                 *data->pasid_table = info->pasid_table;
 101                 return 1;
 102         }
 103 
 104         return 0;
 105 }
 106 
 107 static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque)
 108 {
 109         struct pasid_table_opaque *data = opaque;
 110 
 111         data->segment = pci_domain_nr(pdev->bus);
 112         data->bus = PCI_BUS_NUM(alias);
 113         data->devfn = alias & 0xff;
 114 
 115         return for_each_device_domain(&search_pasid_table, data);
 116 }
 117 
 118 /*
 119  * Allocate a pasid table for @dev. It should be called in a
 120  * single-thread context.
 121  */
 122 int intel_pasid_alloc_table(struct device *dev)
 123 {
 124         struct device_domain_info *info;
 125         struct pasid_table *pasid_table;
 126         struct pasid_table_opaque data;
 127         struct page *pages;
 128         int max_pasid = 0;
 129         int ret, order;
 130         int size;
 131 
 132         might_sleep();
 133         info = dev->archdata.iommu;
 134         if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
 135                 return -EINVAL;
 136 
 137         /* DMA alias device already has a pasid table, use it: */
 138         data.pasid_table = &pasid_table;
 139         ret = pci_for_each_dma_alias(to_pci_dev(dev),
 140                                      &get_alias_pasid_table, &data);
 141         if (ret)
 142                 goto attach_out;
 143 
 144         pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL);
 145         if (!pasid_table)
 146                 return -ENOMEM;
 147         INIT_LIST_HEAD(&pasid_table->dev);
 148 
 149         if (info->pasid_supported)
 150                 max_pasid = min_t(int, pci_max_pasids(to_pci_dev(dev)),
 151                                   intel_pasid_max_id);
 152 
 153         size = max_pasid >> (PASID_PDE_SHIFT - 3);
 154         order = size ? get_order(size) : 0;
 155         pages = alloc_pages_node(info->iommu->node,
 156                                  GFP_KERNEL | __GFP_ZERO, order);
 157         if (!pages) {
 158                 kfree(pasid_table);
 159                 return -ENOMEM;
 160         }
 161 
 162         pasid_table->table = page_address(pages);
 163         pasid_table->order = order;
 164         pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
 165 
 166 attach_out:
 167         device_attach_pasid_table(info, pasid_table);
 168 
 169         return 0;
 170 }
 171 
 172 void intel_pasid_free_table(struct device *dev)
 173 {
 174         struct device_domain_info *info;
 175         struct pasid_table *pasid_table;
 176         struct pasid_dir_entry *dir;
 177         struct pasid_entry *table;
 178         int i, max_pde;
 179 
 180         info = dev->archdata.iommu;
 181         if (!info || !dev_is_pci(dev) || !info->pasid_table)
 182                 return;
 183 
 184         pasid_table = info->pasid_table;
 185         device_detach_pasid_table(info, pasid_table);
 186 
 187         if (!list_empty(&pasid_table->dev))
 188                 return;
 189 
 190         /* Free scalable mode PASID directory tables: */
 191         dir = pasid_table->table;
 192         max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
 193         for (i = 0; i < max_pde; i++) {
 194                 table = get_pasid_table_from_pde(&dir[i]);
 195                 free_pgtable_page(table);
 196         }
 197 
 198         free_pages((unsigned long)pasid_table->table, pasid_table->order);
 199         kfree(pasid_table);
 200 }
 201 
 202 struct pasid_table *intel_pasid_get_table(struct device *dev)
 203 {
 204         struct device_domain_info *info;
 205 
 206         info = dev->archdata.iommu;
 207         if (!info)
 208                 return NULL;
 209 
 210         return info->pasid_table;
 211 }
 212 
 213 int intel_pasid_get_dev_max_id(struct device *dev)
 214 {
 215         struct device_domain_info *info;
 216 
 217         info = dev->archdata.iommu;
 218         if (!info || !info->pasid_table)
 219                 return 0;
 220 
 221         return info->pasid_table->max_pasid;
 222 }
 223 
 224 struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
 225 {
 226         struct device_domain_info *info;
 227         struct pasid_table *pasid_table;
 228         struct pasid_dir_entry *dir;
 229         struct pasid_entry *entries;
 230         int dir_index, index;
 231 
 232         pasid_table = intel_pasid_get_table(dev);
 233         if (WARN_ON(!pasid_table || pasid < 0 ||
 234                     pasid >= intel_pasid_get_dev_max_id(dev)))
 235                 return NULL;
 236 
 237         dir = pasid_table->table;
 238         info = dev->archdata.iommu;
 239         dir_index = pasid >> PASID_PDE_SHIFT;
 240         index = pasid & PASID_PTE_MASK;
 241 
 242         spin_lock(&pasid_lock);
 243         entries = get_pasid_table_from_pde(&dir[dir_index]);
 244         if (!entries) {
 245                 entries = alloc_pgtable_page(info->iommu->node);
 246                 if (!entries) {
 247                         spin_unlock(&pasid_lock);
 248                         return NULL;
 249                 }
 250 
 251                 WRITE_ONCE(dir[dir_index].val,
 252                            (u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
 253         }
 254         spin_unlock(&pasid_lock);
 255 
 256         return &entries[index];
 257 }
 258 
 259 /*
 260  * Interfaces for PASID table entry manipulation:
 261  */
 262 static inline void pasid_clear_entry(struct pasid_entry *pe)
 263 {
 264         WRITE_ONCE(pe->val[0], 0);
 265         WRITE_ONCE(pe->val[1], 0);
 266         WRITE_ONCE(pe->val[2], 0);
 267         WRITE_ONCE(pe->val[3], 0);
 268         WRITE_ONCE(pe->val[4], 0);
 269         WRITE_ONCE(pe->val[5], 0);
 270         WRITE_ONCE(pe->val[6], 0);
 271         WRITE_ONCE(pe->val[7], 0);
 272 }
 273 
 274 static void intel_pasid_clear_entry(struct device *dev, int pasid)
 275 {
 276         struct pasid_entry *pe;
 277 
 278         pe = intel_pasid_get_entry(dev, pasid);
 279         if (WARN_ON(!pe))
 280                 return;
 281 
 282         pasid_clear_entry(pe);
 283 }
 284 
 285 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
 286 {
 287         u64 old;
 288 
 289         old = READ_ONCE(*ptr);
 290         WRITE_ONCE(*ptr, (old & ~mask) | bits);
 291 }
 292 
 293 /*
 294  * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
 295  * PASID entry.
 296  */
 297 static inline void
 298 pasid_set_domain_id(struct pasid_entry *pe, u64 value)
 299 {
 300         pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
 301 }
 302 
 303 /*
 304  * Get domain ID value of a scalable mode PASID entry.
 305  */
 306 static inline u16
 307 pasid_get_domain_id(struct pasid_entry *pe)
 308 {
 309         return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
 310 }
 311 
 312 /*
 313  * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
 314  * of a scalable mode PASID entry.
 315  */
 316 static inline void
 317 pasid_set_slptr(struct pasid_entry *pe, u64 value)
 318 {
 319         pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
 320 }
 321 
 322 /*
 323  * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
 324  * entry.
 325  */
 326 static inline void
 327 pasid_set_address_width(struct pasid_entry *pe, u64 value)
 328 {
 329         pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
 330 }
 331 
 332 /*
 333  * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
 334  * of a scalable mode PASID entry.
 335  */
 336 static inline void
 337 pasid_set_translation_type(struct pasid_entry *pe, u64 value)
 338 {
 339         pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
 340 }
 341 
 342 /*
 343  * Enable fault processing by clearing the FPD(Fault Processing
 344  * Disable) field (Bit 1) of a scalable mode PASID entry.
 345  */
 346 static inline void pasid_set_fault_enable(struct pasid_entry *pe)
 347 {
 348         pasid_set_bits(&pe->val[0], 1 << 1, 0);
 349 }
 350 
 351 /*
 352  * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
 353  * scalable mode PASID entry.
 354  */
 355 static inline void pasid_set_sre(struct pasid_entry *pe)
 356 {
 357         pasid_set_bits(&pe->val[2], 1 << 0, 1);
 358 }
 359 
 360 /*
 361  * Setup the P(Present) field (Bit 0) of a scalable mode PASID
 362  * entry.
 363  */
 364 static inline void pasid_set_present(struct pasid_entry *pe)
 365 {
 366         pasid_set_bits(&pe->val[0], 1 << 0, 1);
 367 }
 368 
 369 /*
 370  * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
 371  * entry.
 372  */
 373 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
 374 {
 375         pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
 376 }
 377 
 378 /*
 379  * Setup the First Level Page table Pointer field (Bit 140~191)
 380  * of a scalable mode PASID entry.
 381  */
 382 static inline void
 383 pasid_set_flptr(struct pasid_entry *pe, u64 value)
 384 {
 385         pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
 386 }
 387 
 388 /*
 389  * Setup the First Level Paging Mode field (Bit 130~131) of a
 390  * scalable mode PASID entry.
 391  */
 392 static inline void
 393 pasid_set_flpm(struct pasid_entry *pe, u64 value)
 394 {
 395         pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
 396 }
 397 
 398 static void
 399 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
 400                                     u16 did, int pasid)
 401 {
 402         struct qi_desc desc;
 403 
 404         desc.qw0 = QI_PC_DID(did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
 405         desc.qw1 = 0;
 406         desc.qw2 = 0;
 407         desc.qw3 = 0;
 408 
 409         qi_submit_sync(&desc, iommu);
 410 }
 411 
 412 static void
 413 iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
 414 {
 415         struct qi_desc desc;
 416 
 417         desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
 418                         QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
 419         desc.qw1 = 0;
 420         desc.qw2 = 0;
 421         desc.qw3 = 0;
 422 
 423         qi_submit_sync(&desc, iommu);
 424 }
 425 
 426 static void
 427 devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
 428                                struct device *dev, int pasid)
 429 {
 430         struct device_domain_info *info;
 431         u16 sid, qdep, pfsid;
 432 
 433         info = dev->archdata.iommu;
 434         if (!info || !info->ats_enabled)
 435                 return;
 436 
 437         sid = info->bus << 8 | info->devfn;
 438         qdep = info->ats_qdep;
 439         pfsid = info->pfsid;
 440 
 441         qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
 442 }
 443 
 444 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
 445                                  struct device *dev, int pasid)
 446 {
 447         struct pasid_entry *pte;
 448         u16 did;
 449 
 450         pte = intel_pasid_get_entry(dev, pasid);
 451         if (WARN_ON(!pte))
 452                 return;
 453 
 454         did = pasid_get_domain_id(pte);
 455         intel_pasid_clear_entry(dev, pasid);
 456 
 457         if (!ecap_coherent(iommu->ecap))
 458                 clflush_cache_range(pte, sizeof(*pte));
 459 
 460         pasid_cache_invalidation_with_pasid(iommu, did, pasid);
 461         iotlb_invalidation_with_pasid(iommu, did, pasid);
 462 
 463         /* Device IOTLB doesn't need to be flushed in caching mode. */
 464         if (!cap_caching_mode(iommu->cap))
 465                 devtlb_invalidation_with_pasid(iommu, dev, pasid);
 466 }
 467 
 468 /*
 469  * Set up the scalable mode pasid table entry for first only
 470  * translation type.
 471  */
 472 int intel_pasid_setup_first_level(struct intel_iommu *iommu,
 473                                   struct device *dev, pgd_t *pgd,
 474                                   int pasid, u16 did, int flags)
 475 {
 476         struct pasid_entry *pte;
 477 
 478         if (!ecap_flts(iommu->ecap)) {
 479                 pr_err("No first level translation support on %s\n",
 480                        iommu->name);
 481                 return -EINVAL;
 482         }
 483 
 484         pte = intel_pasid_get_entry(dev, pasid);
 485         if (WARN_ON(!pte))
 486                 return -EINVAL;
 487 
 488         pasid_clear_entry(pte);
 489 
 490         /* Setup the first level page table pointer: */
 491         pasid_set_flptr(pte, (u64)__pa(pgd));
 492         if (flags & PASID_FLAG_SUPERVISOR_MODE) {
 493                 if (!ecap_srs(iommu->ecap)) {
 494                         pr_err("No supervisor request support on %s\n",
 495                                iommu->name);
 496                         return -EINVAL;
 497                 }
 498                 pasid_set_sre(pte);
 499         }
 500 
 501 #ifdef CONFIG_X86
 502         /* Both CPU and IOMMU paging mode need to match */
 503         if (cpu_feature_enabled(X86_FEATURE_LA57)) {
 504                 if (cap_5lp_support(iommu->cap)) {
 505                         pasid_set_flpm(pte, 1);
 506                 } else {
 507                         pr_err("VT-d has no 5-level paging support for CPU\n");
 508                         pasid_clear_entry(pte);
 509                         return -EINVAL;
 510                 }
 511         }
 512 #endif /* CONFIG_X86 */
 513 
 514         pasid_set_domain_id(pte, did);
 515         pasid_set_address_width(pte, iommu->agaw);
 516         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 517 
 518         /* Setup Present and PASID Granular Transfer Type: */
 519         pasid_set_translation_type(pte, 1);
 520         pasid_set_present(pte);
 521 
 522         if (!ecap_coherent(iommu->ecap))
 523                 clflush_cache_range(pte, sizeof(*pte));
 524 
 525         if (cap_caching_mode(iommu->cap)) {
 526                 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
 527                 iotlb_invalidation_with_pasid(iommu, did, pasid);
 528         } else {
 529                 iommu_flush_write_buffer(iommu);
 530         }
 531 
 532         return 0;
 533 }
 534 
 535 /*
 536  * Set up the scalable mode pasid entry for second only translation type.
 537  */
 538 int intel_pasid_setup_second_level(struct intel_iommu *iommu,
 539                                    struct dmar_domain *domain,
 540                                    struct device *dev, int pasid)
 541 {
 542         struct pasid_entry *pte;
 543         struct dma_pte *pgd;
 544         u64 pgd_val;
 545         int agaw;
 546         u16 did;
 547 
 548         /*
 549          * If hardware advertises no support for second level
 550          * translation, return directly.
 551          */
 552         if (!ecap_slts(iommu->ecap)) {
 553                 pr_err("No second level translation support on %s\n",
 554                        iommu->name);
 555                 return -EINVAL;
 556         }
 557 
 558         /*
 559          * Skip top levels of page tables for iommu which has less agaw
 560          * than default. Unnecessary for PT mode.
 561          */
 562         pgd = domain->pgd;
 563         for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
 564                 pgd = phys_to_virt(dma_pte_addr(pgd));
 565                 if (!dma_pte_present(pgd)) {
 566                         dev_err(dev, "Invalid domain page table\n");
 567                         return -EINVAL;
 568                 }
 569         }
 570 
 571         pgd_val = virt_to_phys(pgd);
 572         did = domain->iommu_did[iommu->seq_id];
 573 
 574         pte = intel_pasid_get_entry(dev, pasid);
 575         if (!pte) {
 576                 dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
 577                 return -ENODEV;
 578         }
 579 
 580         pasid_clear_entry(pte);
 581         pasid_set_domain_id(pte, did);
 582         pasid_set_slptr(pte, pgd_val);
 583         pasid_set_address_width(pte, agaw);
 584         pasid_set_translation_type(pte, 2);
 585         pasid_set_fault_enable(pte);
 586         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 587 
 588         /*
 589          * Since it is a second level only translation setup, we should
 590          * set SRE bit as well (addresses are expected to be GPAs).
 591          */
 592         pasid_set_sre(pte);
 593         pasid_set_present(pte);
 594 
 595         if (!ecap_coherent(iommu->ecap))
 596                 clflush_cache_range(pte, sizeof(*pte));
 597 
 598         if (cap_caching_mode(iommu->cap)) {
 599                 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
 600                 iotlb_invalidation_with_pasid(iommu, did, pasid);
 601         } else {
 602                 iommu_flush_write_buffer(iommu);
 603         }
 604 
 605         return 0;
 606 }
 607 
 608 /*
 609  * Set up the scalable mode pasid entry for passthrough translation type.
 610  */
 611 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
 612                                    struct dmar_domain *domain,
 613                                    struct device *dev, int pasid)
 614 {
 615         u16 did = FLPT_DEFAULT_DID;
 616         struct pasid_entry *pte;
 617 
 618         pte = intel_pasid_get_entry(dev, pasid);
 619         if (!pte) {
 620                 dev_err(dev, "Failed to get pasid entry of PASID %d\n", pasid);
 621                 return -ENODEV;
 622         }
 623 
 624         pasid_clear_entry(pte);
 625         pasid_set_domain_id(pte, did);
 626         pasid_set_address_width(pte, iommu->agaw);
 627         pasid_set_translation_type(pte, 4);
 628         pasid_set_fault_enable(pte);
 629         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 630 
 631         /*
 632          * We should set SRE bit as well since the addresses are expected
 633          * to be GPAs.
 634          */
 635         pasid_set_sre(pte);
 636         pasid_set_present(pte);
 637 
 638         if (!ecap_coherent(iommu->ecap))
 639                 clflush_cache_range(pte, sizeof(*pte));
 640 
 641         if (cap_caching_mode(iommu->cap)) {
 642                 pasid_cache_invalidation_with_pasid(iommu, did, pasid);
 643                 iotlb_invalidation_with_pasid(iommu, did, pasid);
 644         } else {
 645                 iommu_flush_write_buffer(iommu);
 646         }
 647 
 648         return 0;
 649 }

/* [<][>][^][v][top][bottom][index][help] */