Lines Matching refs:entry
110 struct msi_desc *entry; in arch_setup_msi_irqs() local
122 for_each_pci_msi_entry(entry, dev) { in arch_setup_msi_irqs()
123 ret = arch_setup_msi_irq(dev, entry); in arch_setup_msi_irqs()
140 struct msi_desc *entry; in default_teardown_msi_irqs() local
142 for_each_pci_msi_entry(entry, dev) in default_teardown_msi_irqs()
143 if (entry->irq) in default_teardown_msi_irqs()
144 for (i = 0; i < entry->nvec_used; i++) in default_teardown_msi_irqs()
145 arch_teardown_msi_irq(entry->irq + i); in default_teardown_msi_irqs()
155 struct msi_desc *entry; in default_restore_msi_irq() local
157 entry = NULL; in default_restore_msi_irq()
159 for_each_pci_msi_entry(entry, dev) { in default_restore_msi_irq()
160 if (irq == entry->irq) in default_restore_msi_irq()
164 entry = irq_get_msi_desc(irq); in default_restore_msi_irq()
167 if (entry) in default_restore_msi_irq()
168 __pci_write_msi_msg(entry, &entry->msg); in default_restore_msi_irq()
272 struct msi_desc *entry; in default_restore_msi_irqs() local
274 for_each_pci_msi_entry(entry, dev) in default_restore_msi_irqs()
275 default_restore_msi_irq(dev, entry->irq); in default_restore_msi_irqs()
278 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) in __pci_read_msi_msg() argument
280 struct pci_dev *dev = msi_desc_to_pci_dev(entry); in __pci_read_msi_msg()
284 if (entry->msi_attrib.is_msix) { in __pci_read_msi_msg()
285 void __iomem *base = entry->mask_base + in __pci_read_msi_msg()
286 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; in __pci_read_msi_msg()
297 if (entry->msi_attrib.is_64) { in __pci_read_msi_msg()
309 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) in __pci_write_msi_msg() argument
311 struct pci_dev *dev = msi_desc_to_pci_dev(entry); in __pci_write_msi_msg()
315 } else if (entry->msi_attrib.is_msix) { in __pci_write_msi_msg()
317 base = entry->mask_base + in __pci_write_msi_msg()
318 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; in __pci_write_msi_msg()
329 msgctl |= entry->msi_attrib.multiple << 4; in __pci_write_msi_msg()
334 if (entry->msi_attrib.is_64) { in __pci_write_msi_msg()
344 entry->msg = *msg; in __pci_write_msi_msg()
349 struct msi_desc *entry = irq_get_msi_desc(irq); in pci_write_msi_msg() local
351 __pci_write_msi_msg(entry, msg); in pci_write_msi_msg()
358 struct msi_desc *entry, *tmp; in free_msi_irqs() local
363 for_each_pci_msi_entry(entry, dev) in free_msi_irqs()
364 if (entry->irq) in free_msi_irqs()
365 for (i = 0; i < entry->nvec_used; i++) in free_msi_irqs()
366 BUG_ON(irq_has_action(entry->irq + i)); in free_msi_irqs()
370 list_for_each_entry_safe(entry, tmp, msi_list, list) { in free_msi_irqs()
371 if (entry->msi_attrib.is_msix) { in free_msi_irqs()
372 if (list_is_last(&entry->list, msi_list)) in free_msi_irqs()
373 iounmap(entry->mask_base); in free_msi_irqs()
376 list_del(&entry->list); in free_msi_irqs()
377 kfree(entry); in free_msi_irqs()
406 struct msi_desc *entry; in __pci_restore_msi_state() local
411 entry = irq_get_msi_desc(dev->irq); in __pci_restore_msi_state()
418 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), in __pci_restore_msi_state()
419 entry->masked); in __pci_restore_msi_state()
421 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; in __pci_restore_msi_state()
427 struct msi_desc *entry; in __pci_restore_msix_state() local
439 for_each_pci_msi_entry(entry, dev) in __pci_restore_msix_state()
440 msix_mask_irq(entry, entry->masked); in __pci_restore_msix_state()
455 struct msi_desc *entry; in msi_mode_show() local
463 entry = irq_get_msi_desc(irq); in msi_mode_show()
464 if (entry) in msi_mode_show()
466 entry->msi_attrib.is_msix ? "msix" : "msi"); in msi_mode_show()
478 struct msi_desc *entry; in populate_msi_sysfs() local
485 for_each_pci_msi_entry(entry, pdev) in populate_msi_sysfs()
486 num_msi += entry->nvec_used; in populate_msi_sysfs()
494 for_each_pci_msi_entry(entry, pdev) { in populate_msi_sysfs()
495 for (i = 0; i < entry->nvec_used; i++) { in populate_msi_sysfs()
503 entry->irq + i); in populate_msi_sysfs()
551 struct msi_desc *entry; in msi_setup_entry() local
554 entry = alloc_msi_entry(&dev->dev); in msi_setup_entry()
555 if (!entry) in msi_setup_entry()
560 entry->msi_attrib.is_msix = 0; in msi_setup_entry()
561 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); in msi_setup_entry()
562 entry->msi_attrib.entry_nr = 0; in msi_setup_entry()
563 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); in msi_setup_entry()
564 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ in msi_setup_entry()
565 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; in msi_setup_entry()
566 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); in msi_setup_entry()
567 entry->nvec_used = nvec; in msi_setup_entry()
570 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; in msi_setup_entry()
572 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; in msi_setup_entry()
575 if (entry->msi_attrib.maskbit) in msi_setup_entry()
576 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); in msi_setup_entry()
578 return entry; in msi_setup_entry()
583 struct msi_desc *entry; in msi_verify_entries() local
585 for_each_pci_msi_entry(entry, dev) { in msi_verify_entries()
586 if (!dev->no_64bit_msi || !entry->msg.address_hi) in msi_verify_entries()
608 struct msi_desc *entry; in msi_capability_init() local
614 entry = msi_setup_entry(dev, nvec); in msi_capability_init()
615 if (!entry) in msi_capability_init()
619 mask = msi_mask(entry->msi_attrib.multi_cap); in msi_capability_init()
620 msi_mask_irq(entry, mask, mask); in msi_capability_init()
622 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); in msi_capability_init()
627 msi_mask_irq(entry, mask, ~mask); in msi_capability_init()
634 msi_mask_irq(entry, mask, ~mask); in msi_capability_init()
641 msi_mask_irq(entry, mask, ~mask); in msi_capability_init()
652 dev->irq = entry->irq; in msi_capability_init()
679 struct msi_desc *entry; in msix_setup_entries() local
683 entry = alloc_msi_entry(&dev->dev); in msix_setup_entries()
684 if (!entry) { in msix_setup_entries()
693 entry->msi_attrib.is_msix = 1; in msix_setup_entries()
694 entry->msi_attrib.is_64 = 1; in msix_setup_entries()
695 entry->msi_attrib.entry_nr = entries[i].entry; in msix_setup_entries()
696 entry->msi_attrib.default_irq = dev->irq; in msix_setup_entries()
697 entry->mask_base = base; in msix_setup_entries()
698 entry->nvec_used = 1; in msix_setup_entries()
700 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); in msix_setup_entries()
709 struct msi_desc *entry; in msix_program_entries() local
712 for_each_pci_msi_entry(entry, dev) { in msix_program_entries()
713 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + in msix_program_entries()
716 entries[i].vector = entry->irq; in msix_program_entries()
717 entry->masked = readl(entry->mask_base + offset); in msix_program_entries()
718 msix_mask_irq(entry, 1); in msix_program_entries()
790 struct msi_desc *entry; in msix_capability_init() local
793 for_each_pci_msi_entry(entry, dev) { in msix_capability_init()
794 if (entry->irq != 0) in msix_capability_init()
963 if (entries[i].entry >= nr_entries) in pci_enable_msix()
966 if (entries[i].entry == entries[j].entry) in pci_enable_msix()
983 struct msi_desc *entry; in pci_msix_shutdown() local
989 for_each_pci_msi_entry(entry, dev) { in pci_msix_shutdown()
991 __pci_msix_desc_mask_irq(entry, 1); in pci_msix_shutdown()