1/* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2013 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13#include <linux/module.h> 14#include <linux/kernel.h> 15#include <linux/device.h> 16#include <linux/pci.h> 17#include <linux/pci_ids.h> 18#include <linux/dma-mapping.h> 19#include <linux/kthread.h> 20#include <linux/sched.h> 21#include <linux/interrupt.h> 22#include <linux/spinlock.h> 23#include <linux/delay.h> 24#include <linux/ccp.h> 25 26#include "ccp-dev.h" 27 28#define IO_BAR 2 29#define IO_OFFSET 0x20000 30 31#define MSIX_VECTORS 2 32 33struct ccp_msix { 34 u32 vector; 35 char name[16]; 36}; 37 38struct ccp_pci { 39 int msix_count; 40 struct ccp_msix msix[MSIX_VECTORS]; 41}; 42 43static int ccp_get_msix_irqs(struct ccp_device *ccp) 44{ 45 struct ccp_pci *ccp_pci = ccp->dev_specific; 46 struct device *dev = ccp->dev; 47 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 48 struct msix_entry msix_entry[MSIX_VECTORS]; 49 unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; 50 int v, ret; 51 52 for (v = 0; v < ARRAY_SIZE(msix_entry); v++) 53 msix_entry[v].entry = v; 54 55 ret = pci_enable_msix_range(pdev, msix_entry, 1, v); 56 if (ret < 0) 57 return ret; 58 59 ccp_pci->msix_count = ret; 60 for (v = 0; v < ccp_pci->msix_count; v++) { 61 /* Set the interrupt names and request the irqs */ 62 snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v); 63 ccp_pci->msix[v].vector = msix_entry[v].vector; 64 ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler, 65 0, ccp_pci->msix[v].name, dev); 66 if (ret) { 67 dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", 68 ret); 69 goto e_irq; 70 } 71 } 72 73 return 0; 74 75e_irq: 76 while (v--) 77 free_irq(ccp_pci->msix[v].vector, dev); 78 79 pci_disable_msix(pdev); 80 81 ccp_pci->msix_count = 0; 82 83 return ret; 84} 85 86static int ccp_get_msi_irq(struct ccp_device *ccp) 87{ 88 struct device *dev = ccp->dev; 89 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 90 int ret; 91 92 ret = pci_enable_msi(pdev); 93 if (ret) 94 return ret; 95 96 ccp->irq = pdev->irq; 97 ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); 98 if (ret) { 99 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 100 goto e_msi; 101 } 102 103 return 0; 104 105e_msi: 106 pci_disable_msi(pdev); 107 108 return ret; 109} 110 111static int ccp_get_irqs(struct ccp_device *ccp) 112{ 113 struct device *dev = ccp->dev; 114 int ret; 115 116 ret = ccp_get_msix_irqs(ccp); 117 if (!ret) 118 return 0; 119 120 /* Couldn't get MSI-X vectors, try MSI */ 121 dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); 122 ret = ccp_get_msi_irq(ccp); 123 if (!ret) 124 return 0; 125 126 /* Couldn't get MSI interrupt */ 127 dev_notice(dev, "could not enable MSI (%d)\n", ret); 128 129 return ret; 130} 131 132static void ccp_free_irqs(struct ccp_device *ccp) 133{ 134 struct ccp_pci *ccp_pci = ccp->dev_specific; 135 struct device *dev = ccp->dev; 136 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 137 138 if (ccp_pci->msix_count) { 139 while (ccp_pci->msix_count--) 140 free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, 141 dev); 142 pci_disable_msix(pdev); 143 } else { 144 free_irq(ccp->irq, dev); 145 pci_disable_msi(pdev); 146 } 147} 148 149static int ccp_find_mmio_area(struct ccp_device *ccp) 150{ 151 struct device *dev = ccp->dev; 152 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 153 resource_size_t io_len; 154 unsigned long io_flags; 155 156 io_flags = pci_resource_flags(pdev, IO_BAR); 157 io_len = pci_resource_len(pdev, IO_BAR); 158 if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800))) 159 return IO_BAR; 160 161 return -EIO; 162} 163 164static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 165{ 166 struct ccp_device *ccp; 167 struct ccp_pci *ccp_pci; 168 struct device *dev = &pdev->dev; 169 unsigned int bar; 170 int ret; 171 172 ret = -ENOMEM; 173 ccp = ccp_alloc_struct(dev); 174 if (!ccp) 175 goto e_err; 176 177 ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); 178 if (!ccp_pci) 179 goto e_err; 180 181 ccp->dev_specific = ccp_pci; 182 ccp->get_irq = ccp_get_irqs; 183 ccp->free_irq = ccp_free_irqs; 184 185 ret = pci_request_regions(pdev, "ccp"); 186 if (ret) { 187 dev_err(dev, "pci_request_regions failed (%d)\n", ret); 188 goto e_err; 189 } 190 191 ret = pci_enable_device(pdev); 192 if (ret) { 193 dev_err(dev, "pci_enable_device failed (%d)\n", ret); 194 goto e_regions; 195 } 196 197 pci_set_master(pdev); 198 199 ret = ccp_find_mmio_area(ccp); 200 if (ret < 0) 201 goto e_device; 202 bar = ret; 203 204 ret = -EIO; 205 ccp->io_map = pci_iomap(pdev, bar, 0); 206 if (!ccp->io_map) { 207 dev_err(dev, "pci_iomap failed\n"); 208 goto e_device; 209 } 210 ccp->io_regs = ccp->io_map + IO_OFFSET; 211 212 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 213 if (ret) { 214 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 215 if (ret) { 216 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", 217 ret); 218 goto e_iomap; 219 } 220 } 221 222 dev_set_drvdata(dev, ccp); 223 224 ret = ccp_init(ccp); 225 if (ret) 226 goto e_iomap; 227 228 dev_notice(dev, "enabled\n"); 229 230 return 0; 231 232e_iomap: 233 pci_iounmap(pdev, ccp->io_map); 234 235e_device: 236 pci_disable_device(pdev); 237 238e_regions: 239 pci_release_regions(pdev); 240 241e_err: 242 dev_notice(dev, "initialization failed\n"); 243 return ret; 244} 245 246static void ccp_pci_remove(struct pci_dev *pdev) 247{ 248 struct device *dev = &pdev->dev; 249 struct ccp_device *ccp = dev_get_drvdata(dev); 250 251 if (!ccp) 252 return; 253 254 ccp_destroy(ccp); 255 256 pci_iounmap(pdev, ccp->io_map); 257 258 pci_disable_device(pdev); 259 260 pci_release_regions(pdev); 261 262 dev_notice(dev, "disabled\n"); 263} 264 265#ifdef CONFIG_PM 266static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) 267{ 268 struct device *dev = &pdev->dev; 269 struct ccp_device *ccp = dev_get_drvdata(dev); 270 unsigned long flags; 271 unsigned int i; 272 273 spin_lock_irqsave(&ccp->cmd_lock, flags); 274 275 ccp->suspending = 1; 276 277 /* Wake all the queue kthreads to prepare for suspend */ 278 for (i = 0; i < ccp->cmd_q_count; i++) 279 wake_up_process(ccp->cmd_q[i].kthread); 280 281 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 282 283 /* Wait for all queue kthreads to say they're done */ 284 while (!ccp_queues_suspended(ccp)) 285 wait_event_interruptible(ccp->suspend_queue, 286 ccp_queues_suspended(ccp)); 287 288 return 0; 289} 290 291static int ccp_pci_resume(struct pci_dev *pdev) 292{ 293 struct device *dev = &pdev->dev; 294 struct ccp_device *ccp = dev_get_drvdata(dev); 295 unsigned long flags; 296 unsigned int i; 297 298 spin_lock_irqsave(&ccp->cmd_lock, flags); 299 300 ccp->suspending = 0; 301 302 /* Wake up all the kthreads */ 303 for (i = 0; i < ccp->cmd_q_count; i++) { 304 ccp->cmd_q[i].suspended = 0; 305 wake_up_process(ccp->cmd_q[i].kthread); 306 } 307 308 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 309 310 return 0; 311} 312#endif 313 314static const struct pci_device_id ccp_pci_table[] = { 315 { PCI_VDEVICE(AMD, 0x1537), }, 316 /* Last entry must be zero */ 317 { 0, } 318}; 319MODULE_DEVICE_TABLE(pci, ccp_pci_table); 320 321static struct pci_driver ccp_pci_driver = { 322 .name = "ccp", 323 .id_table = ccp_pci_table, 324 .probe = ccp_pci_probe, 325 .remove = ccp_pci_remove, 326#ifdef CONFIG_PM 327 .suspend = ccp_pci_suspend, 328 .resume = ccp_pci_resume, 329#endif 330}; 331 332int ccp_pci_init(void) 333{ 334 return pci_register_driver(&ccp_pci_driver); 335} 336 337void ccp_pci_exit(void) 338{ 339 pci_unregister_driver(&ccp_pci_driver); 340} 341