1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/kernel.h>
48 #include <linux/init.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
51 #include <linux/slab.h>
52 #include <linux/errno.h>
53 #include <linux/interrupt.h>
54 #include <adf_accel_devices.h>
55 #include <adf_common_drv.h>
56 #include <adf_cfg.h>
57 #include <adf_cfg_strings.h>
58 #include <adf_cfg_common.h>
59 #include <adf_transport_access_macros.h>
60 #include <adf_transport_internal.h>
61 #include "adf_drv.h"
62 
adf_enable_msix(struct adf_accel_dev * accel_dev)63 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
64 {
65 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
66 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
67 	uint32_t msix_num_entries = hw_data->num_banks + 1;
68 	int i;
69 
70 	for (i = 0; i < msix_num_entries; i++)
71 		pci_dev_info->msix_entries.entries[i].entry = i;
72 
73 	if (pci_enable_msix_exact(pci_dev_info->pci_dev,
74 				  pci_dev_info->msix_entries.entries,
75 				  msix_num_entries)) {
76 		dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
77 		return -EFAULT;
78 	}
79 	return 0;
80 }
81 
adf_disable_msix(struct adf_accel_pci * pci_dev_info)82 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
83 {
84 	pci_disable_msix(pci_dev_info->pci_dev);
85 }
86 
adf_msix_isr_bundle(int irq,void * bank_ptr)87 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
88 {
89 	struct adf_etr_bank_data *bank = bank_ptr;
90 
91 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
92 	tasklet_hi_schedule(&bank->resp_handler);
93 	return IRQ_HANDLED;
94 }
95 
adf_msix_isr_ae(int irq,void * dev_ptr)96 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
97 {
98 	struct adf_accel_dev *accel_dev = dev_ptr;
99 
100 	dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
101 		 accel_dev->accel_id);
102 	return IRQ_HANDLED;
103 }
104 
adf_request_irqs(struct adf_accel_dev * accel_dev)105 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
106 {
107 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
108 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
109 	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
110 	struct adf_etr_data *etr_data = accel_dev->transport;
111 	int ret, i;
112 	char *name;
113 
114 	/* Request msix irq for all banks */
115 	for (i = 0; i < hw_data->num_banks; i++) {
116 		struct adf_etr_bank_data *bank = &etr_data->banks[i];
117 		unsigned int cpu, cpus = num_online_cpus();
118 
119 		name = *(pci_dev_info->msix_entries.names + i);
120 		snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
121 			 "qat%d-bundle%d", accel_dev->accel_id, i);
122 		ret = request_irq(msixe[i].vector,
123 				  adf_msix_isr_bundle, 0, name, bank);
124 		if (ret) {
125 			dev_err(&GET_DEV(accel_dev),
126 				"failed to enable irq %d for %s\n",
127 				msixe[i].vector, name);
128 			return ret;
129 		}
130 
131 		cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
132 		irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
133 	}
134 
135 	/* Request msix irq for AE */
136 	name = *(pci_dev_info->msix_entries.names + i);
137 	snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
138 		 "qat%d-ae-cluster", accel_dev->accel_id);
139 	ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
140 	if (ret) {
141 		dev_err(&GET_DEV(accel_dev),
142 			"failed to enable irq %d, for %s\n",
143 			msixe[i].vector, name);
144 		return ret;
145 	}
146 	return ret;
147 }
148 
adf_free_irqs(struct adf_accel_dev * accel_dev)149 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
150 {
151 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
152 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
153 	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
154 	struct adf_etr_data *etr_data = accel_dev->transport;
155 	int i;
156 
157 	for (i = 0; i < hw_data->num_banks; i++) {
158 		irq_set_affinity_hint(msixe[i].vector, NULL);
159 		free_irq(msixe[i].vector, &etr_data->banks[i]);
160 	}
161 	irq_set_affinity_hint(msixe[i].vector, NULL);
162 	free_irq(msixe[i].vector, accel_dev);
163 }
164 
adf_isr_alloc_msix_entry_table(struct adf_accel_dev * accel_dev)165 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
166 {
167 	int i;
168 	char **names;
169 	struct msix_entry *entries;
170 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
171 	uint32_t msix_num_entries = hw_data->num_banks + 1;
172 
173 	entries = kzalloc_node(msix_num_entries * sizeof(*entries),
174 			       GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
175 	if (!entries)
176 		return -ENOMEM;
177 
178 	names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
179 	if (!names) {
180 		kfree(entries);
181 		return -ENOMEM;
182 	}
183 	for (i = 0; i < msix_num_entries; i++) {
184 		*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
185 		if (!(*(names + i)))
186 			goto err;
187 	}
188 	accel_dev->accel_pci_dev.msix_entries.entries = entries;
189 	accel_dev->accel_pci_dev.msix_entries.names = names;
190 	return 0;
191 err:
192 	for (i = 0; i < msix_num_entries; i++)
193 		kfree(*(names + i));
194 	kfree(entries);
195 	kfree(names);
196 	return -ENOMEM;
197 }
198 
adf_isr_free_msix_entry_table(struct adf_accel_dev * accel_dev)199 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
200 {
201 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
202 	uint32_t msix_num_entries = hw_data->num_banks + 1;
203 	char **names = accel_dev->accel_pci_dev.msix_entries.names;
204 	int i;
205 
206 	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
207 	for (i = 0; i < msix_num_entries; i++)
208 		kfree(*(names + i));
209 	kfree(names);
210 }
211 
adf_setup_bh(struct adf_accel_dev * accel_dev)212 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
213 {
214 	struct adf_etr_data *priv_data = accel_dev->transport;
215 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
216 	int i;
217 
218 	for (i = 0; i < hw_data->num_banks; i++)
219 		tasklet_init(&priv_data->banks[i].resp_handler,
220 			     adf_response_handler,
221 			     (unsigned long)&priv_data->banks[i]);
222 	return 0;
223 }
224 
adf_cleanup_bh(struct adf_accel_dev * accel_dev)225 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
226 {
227 	struct adf_etr_data *priv_data = accel_dev->transport;
228 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
229 	int i;
230 
231 	for (i = 0; i < hw_data->num_banks; i++) {
232 		tasklet_disable(&priv_data->banks[i].resp_handler);
233 		tasklet_kill(&priv_data->banks[i].resp_handler);
234 	}
235 }
236 
adf_isr_resource_free(struct adf_accel_dev * accel_dev)237 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
238 {
239 	adf_free_irqs(accel_dev);
240 	adf_cleanup_bh(accel_dev);
241 	adf_disable_msix(&accel_dev->accel_pci_dev);
242 	adf_isr_free_msix_entry_table(accel_dev);
243 }
244 
adf_isr_resource_alloc(struct adf_accel_dev * accel_dev)245 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
246 {
247 	int ret;
248 
249 	ret = adf_isr_alloc_msix_entry_table(accel_dev);
250 	if (ret)
251 		return ret;
252 	if (adf_enable_msix(accel_dev))
253 		goto err_out;
254 
255 	if (adf_setup_bh(accel_dev))
256 		goto err_out;
257 
258 	if (adf_request_irqs(accel_dev))
259 		goto err_out;
260 
261 	return 0;
262 err_out:
263 	adf_isr_resource_free(accel_dev);
264 	return -EFAULT;
265 }
266