This source file includes following definitions.
- adf_modulo
- adf_check_ring_alignment
- adf_verify_ring_size
- adf_reserve_ring
- adf_unreserve_ring
- adf_enable_ring_irq
- adf_disable_ring_irq
- adf_send_message
- adf_handle_response
- adf_configure_tx_ring
- adf_configure_rx_ring
- adf_init_ring
- adf_cleanup_ring
- adf_create_ring
- adf_remove_ring
- adf_ring_response_handler
- adf_response_handler
- adf_get_cfg_int
- adf_get_coalesc_timer
- adf_init_bank
- adf_init_etr_data
- cleanup_bank
- adf_cleanup_etr_handles
- adf_cleanup_etr_data
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #include <linux/delay.h>
48 #include "adf_accel_devices.h"
49 #include "adf_transport_internal.h"
50 #include "adf_transport_access_macros.h"
51 #include "adf_cfg.h"
52 #include "adf_common_drv.h"
53
54 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
55 {
56 uint32_t div = data >> shift;
57 uint32_t mult = div << shift;
58
59 return data - mult;
60 }
61
62 static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
63 {
64 if (((size - 1) & addr) != 0)
65 return -EFAULT;
66 return 0;
67 }
68
69 static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
70 {
71 int i = ADF_MIN_RING_SIZE;
72
73 for (; i <= ADF_MAX_RING_SIZE; i++)
74 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
75 return i;
76
77 return ADF_DEFAULT_RING_SIZE;
78 }
79
80 static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
81 {
82 spin_lock(&bank->lock);
83 if (bank->ring_mask & (1 << ring)) {
84 spin_unlock(&bank->lock);
85 return -EFAULT;
86 }
87 bank->ring_mask |= (1 << ring);
88 spin_unlock(&bank->lock);
89 return 0;
90 }
91
92 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
93 {
94 spin_lock(&bank->lock);
95 bank->ring_mask &= ~(1 << ring);
96 spin_unlock(&bank->lock);
97 }
98
99 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
100 {
101 spin_lock_bh(&bank->lock);
102 bank->irq_mask |= (1 << ring);
103 spin_unlock_bh(&bank->lock);
104 WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
105 WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
106 bank->irq_coalesc_timer);
107 }
108
109 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
110 {
111 spin_lock_bh(&bank->lock);
112 bank->irq_mask &= ~(1 << ring);
113 spin_unlock_bh(&bank->lock);
114 WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
115 }
116
117 int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
118 {
119 if (atomic_add_return(1, ring->inflights) >
120 ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
121 atomic_dec(ring->inflights);
122 return -EAGAIN;
123 }
124 spin_lock_bh(&ring->lock);
125 memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
126 ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
127
128 ring->tail = adf_modulo(ring->tail +
129 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
130 ADF_RING_SIZE_MODULO(ring->ring_size));
131 WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
132 ring->ring_number, ring->tail);
133 spin_unlock_bh(&ring->lock);
134 return 0;
135 }
136
137 static int adf_handle_response(struct adf_etr_ring_data *ring)
138 {
139 uint32_t msg_counter = 0;
140 uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
141
142 while (*msg != ADF_RING_EMPTY_SIG) {
143 ring->callback((uint32_t *)msg);
144 atomic_dec(ring->inflights);
145 *msg = ADF_RING_EMPTY_SIG;
146 ring->head = adf_modulo(ring->head +
147 ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
148 ADF_RING_SIZE_MODULO(ring->ring_size));
149 msg_counter++;
150 msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head);
151 }
152 if (msg_counter > 0)
153 WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
154 ring->bank->bank_number,
155 ring->ring_number, ring->head);
156 return 0;
157 }
158
159 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
160 {
161 uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
162
163 WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
164 ring->ring_number, ring_config);
165 }
166
167 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
168 {
169 uint32_t ring_config =
170 BUILD_RESP_RING_CONFIG(ring->ring_size,
171 ADF_RING_NEAR_WATERMARK_512,
172 ADF_RING_NEAR_WATERMARK_0);
173
174 WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
175 ring->ring_number, ring_config);
176 }
177
178 static int adf_init_ring(struct adf_etr_ring_data *ring)
179 {
180 struct adf_etr_bank_data *bank = ring->bank;
181 struct adf_accel_dev *accel_dev = bank->accel_dev;
182 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
183 uint64_t ring_base;
184 uint32_t ring_size_bytes =
185 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
186
187 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
188 ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
189 ring_size_bytes, &ring->dma_addr,
190 GFP_KERNEL);
191 if (!ring->base_addr)
192 return -ENOMEM;
193
194 memset(ring->base_addr, 0x7F, ring_size_bytes);
195
196 if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
197 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
198 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
199 ring->base_addr, ring->dma_addr);
200 return -EFAULT;
201 }
202
203 if (hw_data->tx_rings_mask & (1 << ring->ring_number))
204 adf_configure_tx_ring(ring);
205
206 else
207 adf_configure_rx_ring(ring);
208
209 ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
210 WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
211 ring->ring_number, ring_base);
212 spin_lock_init(&ring->lock);
213 return 0;
214 }
215
216 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
217 {
218 uint32_t ring_size_bytes =
219 ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
220 ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
221
222 if (ring->base_addr) {
223 memset(ring->base_addr, 0x7F, ring_size_bytes);
224 dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
225 ring_size_bytes, ring->base_addr,
226 ring->dma_addr);
227 }
228 }
229
230 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
231 uint32_t bank_num, uint32_t num_msgs,
232 uint32_t msg_size, const char *ring_name,
233 adf_callback_fn callback, int poll_mode,
234 struct adf_etr_ring_data **ring_ptr)
235 {
236 struct adf_etr_data *transport_data = accel_dev->transport;
237 struct adf_etr_bank_data *bank;
238 struct adf_etr_ring_data *ring;
239 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
240 uint32_t ring_num;
241 int ret;
242
243 if (bank_num >= GET_MAX_BANKS(accel_dev)) {
244 dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
245 return -EFAULT;
246 }
247 if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
248 dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
249 return -EFAULT;
250 }
251 if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
252 ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
253 dev_err(&GET_DEV(accel_dev),
254 "Invalid ring size for given msg size\n");
255 return -EFAULT;
256 }
257 if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
258 dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
259 section, ring_name);
260 return -EFAULT;
261 }
262 if (kstrtouint(val, 10, &ring_num)) {
263 dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
264 return -EFAULT;
265 }
266 if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
267 dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
268 return -EFAULT;
269 }
270
271 bank = &transport_data->banks[bank_num];
272 if (adf_reserve_ring(bank, ring_num)) {
273 dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
274 ring_num, ring_name);
275 return -EFAULT;
276 }
277 ring = &bank->rings[ring_num];
278 ring->ring_number = ring_num;
279 ring->bank = bank;
280 ring->callback = callback;
281 ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
282 ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
283 ring->head = 0;
284 ring->tail = 0;
285 atomic_set(ring->inflights, 0);
286 ret = adf_init_ring(ring);
287 if (ret)
288 goto err;
289
290
291 adf_update_ring_arb(ring);
292
293 if (adf_ring_debugfs_add(ring, ring_name)) {
294 dev_err(&GET_DEV(accel_dev),
295 "Couldn't add ring debugfs entry\n");
296 ret = -EFAULT;
297 goto err;
298 }
299
300
301 if (callback && (!poll_mode))
302 adf_enable_ring_irq(bank, ring->ring_number);
303 *ring_ptr = ring;
304 return 0;
305 err:
306 adf_cleanup_ring(ring);
307 adf_unreserve_ring(bank, ring_num);
308 adf_update_ring_arb(ring);
309 return ret;
310 }
311
312 void adf_remove_ring(struct adf_etr_ring_data *ring)
313 {
314 struct adf_etr_bank_data *bank = ring->bank;
315
316
317 adf_disable_ring_irq(bank, ring->ring_number);
318
319
320 WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
321 ring->ring_number, 0);
322 WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
323 ring->ring_number, 0);
324 adf_ring_debugfs_rm(ring);
325 adf_unreserve_ring(bank, ring->ring_number);
326
327 adf_update_ring_arb(ring);
328 adf_cleanup_ring(ring);
329 }
330
331 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
332 {
333 uint32_t empty_rings, i;
334
335 empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
336 empty_rings = ~empty_rings & bank->irq_mask;
337
338 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
339 if (empty_rings & (1 << i))
340 adf_handle_response(&bank->rings[i]);
341 }
342 }
343
344 void adf_response_handler(uintptr_t bank_addr)
345 {
346 struct adf_etr_bank_data *bank = (void *)bank_addr;
347
348
349 adf_ring_response_handler(bank);
350 WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
351 bank->irq_mask);
352 }
353
354 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
355 const char *section, const char *format,
356 uint32_t key, uint32_t *value)
357 {
358 char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
359 char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
360
361 snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
362
363 if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
364 return -EFAULT;
365
366 if (kstrtouint(val_buf, 10, value))
367 return -EFAULT;
368 return 0;
369 }
370
371 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
372 const char *section,
373 uint32_t bank_num_in_accel)
374 {
375 if (adf_get_cfg_int(bank->accel_dev, section,
376 ADF_ETRMGR_COALESCE_TIMER_FORMAT,
377 bank_num_in_accel, &bank->irq_coalesc_timer))
378 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
379
380 if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
381 ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
382 bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
383 }
384
385 static int adf_init_bank(struct adf_accel_dev *accel_dev,
386 struct adf_etr_bank_data *bank,
387 uint32_t bank_num, void __iomem *csr_addr)
388 {
389 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
390 struct adf_etr_ring_data *ring;
391 struct adf_etr_ring_data *tx_ring;
392 uint32_t i, coalesc_enabled = 0;
393
394 memset(bank, 0, sizeof(*bank));
395 bank->bank_number = bank_num;
396 bank->csr_addr = csr_addr;
397 bank->accel_dev = accel_dev;
398 spin_lock_init(&bank->lock);
399
400
401
402
403 if ((adf_get_cfg_int(accel_dev, "Accelerator0",
404 ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
405 &coalesc_enabled) == 0) && coalesc_enabled)
406 adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
407 else
408 bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
409
410 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
411 WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
412 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
413 ring = &bank->rings[i];
414 if (hw_data->tx_rings_mask & (1 << i)) {
415 ring->inflights =
416 kzalloc_node(sizeof(atomic_t),
417 GFP_KERNEL,
418 dev_to_node(&GET_DEV(accel_dev)));
419 if (!ring->inflights)
420 goto err;
421 } else {
422 if (i < hw_data->tx_rx_gap) {
423 dev_err(&GET_DEV(accel_dev),
424 "Invalid tx rings mask config\n");
425 goto err;
426 }
427 tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
428 ring->inflights = tx_ring->inflights;
429 }
430 }
431 if (adf_bank_debugfs_add(bank)) {
432 dev_err(&GET_DEV(accel_dev),
433 "Failed to add bank debugfs entry\n");
434 goto err;
435 }
436
437 WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
438 WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
439 return 0;
440 err:
441 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
442 ring = &bank->rings[i];
443 if (hw_data->tx_rings_mask & (1 << i))
444 kfree(ring->inflights);
445 }
446 return -ENOMEM;
447 }
448
449
450
451
452
453
454
455
456
457
458
459 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
460 {
461 struct adf_etr_data *etr_data;
462 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
463 void __iomem *csr_addr;
464 uint32_t size;
465 uint32_t num_banks = 0;
466 int i, ret;
467
468 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
469 dev_to_node(&GET_DEV(accel_dev)));
470 if (!etr_data)
471 return -ENOMEM;
472
473 num_banks = GET_MAX_BANKS(accel_dev);
474 size = num_banks * sizeof(struct adf_etr_bank_data);
475 etr_data->banks = kzalloc_node(size, GFP_KERNEL,
476 dev_to_node(&GET_DEV(accel_dev)));
477 if (!etr_data->banks) {
478 ret = -ENOMEM;
479 goto err_bank;
480 }
481
482 accel_dev->transport = etr_data;
483 i = hw_data->get_etr_bar_id(hw_data);
484 csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
485
486
487 etr_data->debug = debugfs_create_dir("transport",
488 accel_dev->debugfs_dir);
489
490 for (i = 0; i < num_banks; i++) {
491 ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
492 csr_addr);
493 if (ret)
494 goto err_bank_all;
495 }
496
497 return 0;
498
499 err_bank_all:
500 debugfs_remove(etr_data->debug);
501 kfree(etr_data->banks);
502 err_bank:
503 kfree(etr_data);
504 accel_dev->transport = NULL;
505 return ret;
506 }
507 EXPORT_SYMBOL_GPL(adf_init_etr_data);
508
509 static void cleanup_bank(struct adf_etr_bank_data *bank)
510 {
511 uint32_t i;
512
513 for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
514 struct adf_accel_dev *accel_dev = bank->accel_dev;
515 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
516 struct adf_etr_ring_data *ring = &bank->rings[i];
517
518 if (bank->ring_mask & (1 << i))
519 adf_cleanup_ring(ring);
520
521 if (hw_data->tx_rings_mask & (1 << i))
522 kfree(ring->inflights);
523 }
524 adf_bank_debugfs_rm(bank);
525 memset(bank, 0, sizeof(*bank));
526 }
527
528 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
529 {
530 struct adf_etr_data *etr_data = accel_dev->transport;
531 uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
532
533 for (i = 0; i < num_banks; i++)
534 cleanup_bank(&etr_data->banks[i]);
535 }
536
537
538
539
540
541
542
543
544
545
546
547 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
548 {
549 struct adf_etr_data *etr_data = accel_dev->transport;
550
551 if (etr_data) {
552 adf_cleanup_etr_handles(accel_dev);
553 debugfs_remove(etr_data->debug);
554 kfree(etr_data->banks);
555 kfree(etr_data);
556 accel_dev->transport = NULL;
557 }
558 }
559 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);