1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
52 #include "adf_cfg.h"
53 #include "adf_cfg_strings.h"
54 #include "qat_crypto.h"
55 #include "icp_qat_fw.h"
56 
57 #define SEC ADF_KERNEL_SEC
58 
59 static struct service_hndl qat_crypto;
60 
qat_crypto_put_instance(struct qat_crypto_instance * inst)61 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
62 {
63 	atomic_dec(&inst->refctr);
64 	adf_dev_put(inst->accel_dev);
65 }
66 
qat_crypto_free_instances(struct adf_accel_dev * accel_dev)67 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
68 {
69 	struct qat_crypto_instance *inst;
70 	struct list_head *list_ptr, *tmp;
71 	int i;
72 
73 	list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
74 		inst = list_entry(list_ptr, struct qat_crypto_instance, list);
75 
76 		for (i = 0; i < atomic_read(&inst->refctr); i++)
77 			qat_crypto_put_instance(inst);
78 
79 		if (inst->sym_tx)
80 			adf_remove_ring(inst->sym_tx);
81 
82 		if (inst->sym_rx)
83 			adf_remove_ring(inst->sym_rx);
84 
85 		if (inst->pke_tx)
86 			adf_remove_ring(inst->pke_tx);
87 
88 		if (inst->pke_rx)
89 			adf_remove_ring(inst->pke_rx);
90 
91 		list_del(list_ptr);
92 		kfree(inst);
93 	}
94 	return 0;
95 }
96 
qat_crypto_get_instance_node(int node)97 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
98 {
99 	struct adf_accel_dev *accel_dev = NULL;
100 	struct qat_crypto_instance *inst = NULL;
101 	struct list_head *itr;
102 	unsigned long best = ~0;
103 
104 	list_for_each(itr, adf_devmgr_get_head()) {
105 		struct adf_accel_dev *tmp_dev;
106 		unsigned long ctr;
107 
108 		tmp_dev = list_entry(itr, struct adf_accel_dev, list);
109 
110 		if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
111 		     dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
112 		    adf_dev_started(tmp_dev) &&
113 		    !list_empty(&tmp_dev->crypto_list)) {
114 			ctr = atomic_read(&tmp_dev->ref_count);
115 			if (best > ctr) {
116 				accel_dev = tmp_dev;
117 				best = ctr;
118 			}
119 		}
120 	}
121 	if (!accel_dev)
122 		pr_info("QAT: Could not find a device on node %d\n", node);
123 
124 	/* Get any started device */
125 	list_for_each(itr, adf_devmgr_get_head()) {
126 		struct adf_accel_dev *tmp_dev;
127 
128 		tmp_dev = list_entry(itr, struct adf_accel_dev, list);
129 
130 		if (adf_dev_started(tmp_dev) &&
131 		    !list_empty(&tmp_dev->crypto_list)) {
132 			accel_dev = tmp_dev;
133 			break;
134 		}
135 	}
136 
137 	if (!accel_dev)
138 		return NULL;
139 
140 	best = ~0;
141 	list_for_each(itr, &accel_dev->crypto_list) {
142 		struct qat_crypto_instance *tmp_inst;
143 		unsigned long ctr;
144 
145 		tmp_inst = list_entry(itr, struct qat_crypto_instance, list);
146 		ctr = atomic_read(&tmp_inst->refctr);
147 		if (best > ctr) {
148 			inst = tmp_inst;
149 			best = ctr;
150 		}
151 	}
152 	if (inst) {
153 		if (adf_dev_get(accel_dev)) {
154 			dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
155 			return NULL;
156 		}
157 		atomic_inc(&inst->refctr);
158 	}
159 	return inst;
160 }
161 
qat_crypto_create_instances(struct adf_accel_dev * accel_dev)162 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
163 {
164 	int i;
165 	unsigned long bank;
166 	unsigned long num_inst, num_msg_sym, num_msg_asym;
167 	int msg_size;
168 	struct qat_crypto_instance *inst;
169 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
170 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
171 
172 	INIT_LIST_HEAD(&accel_dev->crypto_list);
173 	strlcpy(key, ADF_NUM_CY, sizeof(key));
174 	if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
175 		return -EFAULT;
176 
177 	if (kstrtoul(val, 0, &num_inst))
178 		return -EFAULT;
179 
180 	for (i = 0; i < num_inst; i++) {
181 		inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
182 				    dev_to_node(&GET_DEV(accel_dev)));
183 		if (!inst)
184 			goto err;
185 
186 		list_add_tail(&inst->list, &accel_dev->crypto_list);
187 		inst->id = i;
188 		atomic_set(&inst->refctr, 0);
189 		inst->accel_dev = accel_dev;
190 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
191 		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
192 			goto err;
193 
194 		if (kstrtoul(val, 10, &bank))
195 			goto err;
196 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
197 		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
198 			goto err;
199 
200 		if (kstrtoul(val, 10, &num_msg_sym))
201 			goto err;
202 
203 		num_msg_sym = num_msg_sym >> 1;
204 
205 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
206 		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
207 			goto err;
208 
209 		if (kstrtoul(val, 10, &num_msg_asym))
210 			goto err;
211 		num_msg_asym = num_msg_asym >> 1;
212 
213 		msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
214 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
215 		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
216 				    msg_size, key, NULL, 0, &inst->sym_tx))
217 			goto err;
218 
219 		msg_size = msg_size >> 1;
220 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
221 		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
222 				    msg_size, key, NULL, 0, &inst->pke_tx))
223 			goto err;
224 
225 		msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
226 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
227 		if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
228 				    msg_size, key, qat_alg_callback, 0,
229 				    &inst->sym_rx))
230 			goto err;
231 
232 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
233 		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
234 				    msg_size, key, qat_alg_asym_callback, 0,
235 				    &inst->pke_rx))
236 			goto err;
237 	}
238 	return 0;
239 err:
240 	qat_crypto_free_instances(accel_dev);
241 	return -ENOMEM;
242 }
243 
qat_crypto_init(struct adf_accel_dev * accel_dev)244 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
245 {
246 	if (qat_crypto_create_instances(accel_dev))
247 		return -EFAULT;
248 
249 	return 0;
250 }
251 
qat_crypto_shutdown(struct adf_accel_dev * accel_dev)252 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
253 {
254 	return qat_crypto_free_instances(accel_dev);
255 }
256 
qat_crypto_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)257 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
258 				    enum adf_event event)
259 {
260 	int ret;
261 
262 	switch (event) {
263 	case ADF_EVENT_INIT:
264 		ret = qat_crypto_init(accel_dev);
265 		break;
266 	case ADF_EVENT_SHUTDOWN:
267 		ret = qat_crypto_shutdown(accel_dev);
268 		break;
269 	case ADF_EVENT_RESTARTING:
270 	case ADF_EVENT_RESTARTED:
271 	case ADF_EVENT_START:
272 	case ADF_EVENT_STOP:
273 	default:
274 		ret = 0;
275 	}
276 	return ret;
277 }
278 
qat_crypto_register(void)279 int qat_crypto_register(void)
280 {
281 	memset(&qat_crypto, 0, sizeof(qat_crypto));
282 	qat_crypto.event_hld = qat_crypto_event_handler;
283 	qat_crypto.name = "qat_crypto";
284 	return adf_service_register(&qat_crypto);
285 }
286 
qat_crypto_unregister(void)287 int qat_crypto_unregister(void)
288 {
289 	return adf_service_unregister(&qat_crypto);
290 }
291