This source file includes following definitions.
- qed_chain_get_prod_idx
- qed_chain_get_cons_idx
- qed_chain_get_cons_idx_u32
- qed_chain_get_elem_left
- qed_chain_get_elem_left_u32
- qed_chain_get_usable_per_page
- qed_chain_get_unusable_per_page
- qed_chain_get_page_cnt
- qed_chain_get_pbl_phys
- qed_chain_advance_page
- qed_chain_return_produced
- qed_chain_produce
- qed_chain_get_capacity
- qed_chain_recycle_consumed
- qed_chain_consume
- qed_chain_reset
- qed_chain_init_params
- qed_chain_init_mem
- qed_chain_init_pbl_mem
- qed_chain_init_next_ptr_elem
- qed_chain_get_last_elem
- qed_chain_set_prod
- qed_chain_pbl_zero_mem
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #ifndef _QED_CHAIN_H
34 #define _QED_CHAIN_H
35
36 #include <linux/types.h>
37 #include <asm/byteorder.h>
38 #include <linux/kernel.h>
39 #include <linux/list.h>
40 #include <linux/slab.h>
41 #include <linux/qed/common_hsi.h>
42
43 enum qed_chain_mode {
44
45 QED_CHAIN_MODE_NEXT_PTR,
46
47
48 QED_CHAIN_MODE_SINGLE,
49
50
51 QED_CHAIN_MODE_PBL,
52 };
53
54 enum qed_chain_use_mode {
55 QED_CHAIN_USE_TO_PRODUCE,
56 QED_CHAIN_USE_TO_CONSUME,
57 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
58 };
59
60 enum qed_chain_cnt_type {
61
62 QED_CHAIN_CNT_TYPE_U16,
63
64
65 QED_CHAIN_CNT_TYPE_U32,
66 };
67
68 struct qed_chain_next {
69 struct regpair next_phys;
70 void *next_virt;
71 };
72
73 struct qed_chain_pbl_u16 {
74 u16 prod_page_idx;
75 u16 cons_page_idx;
76 };
77
78 struct qed_chain_pbl_u32 {
79 u32 prod_page_idx;
80 u32 cons_page_idx;
81 };
82
83 struct qed_chain_ext_pbl {
84 dma_addr_t p_pbl_phys;
85 void *p_pbl_virt;
86 };
87
88 struct qed_chain_u16 {
89
90 u16 prod_idx;
91 u16 cons_idx;
92 };
93
94 struct qed_chain_u32 {
95
96 u32 prod_idx;
97 u32 cons_idx;
98 };
99
100 struct addr_tbl_entry {
101 void *virt_addr;
102 dma_addr_t dma_map;
103 };
104
105 struct qed_chain {
106
107
108
109
110 void *p_prod_elem;
111 void *p_cons_elem;
112
113
114 struct {
115
116
117
118
119 struct addr_tbl_entry *pp_addr_tbl;
120
121 union {
122 struct qed_chain_pbl_u16 u16;
123 struct qed_chain_pbl_u32 u32;
124 } c;
125 } pbl;
126
127 union {
128 struct qed_chain_u16 chain16;
129 struct qed_chain_u32 chain32;
130 } u;
131
132
133 u32 capacity;
134 u32 page_cnt;
135
136 enum qed_chain_mode mode;
137
138
139 u16 elem_per_page;
140 u16 elem_per_page_mask;
141 u16 elem_size;
142 u16 next_page_mask;
143 u16 usable_per_page;
144 u8 elem_unusable;
145
146 u8 cnt_type;
147
148
149
150
151
152
153 struct {
154 dma_addr_t p_phys_table;
155 void *p_virt_table;
156 } pbl_sp;
157
158
159
160
161
162 void *p_virt_addr;
163 dma_addr_t p_phys_addr;
164
165
166 u32 size;
167
168 u8 intended_use;
169
170 bool b_external_pbl;
171 };
172
173 #define QED_CHAIN_PBL_ENTRY_SIZE (8)
174 #define QED_CHAIN_PAGE_SIZE (0x1000)
175 #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
176
177 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
178 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
179 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
180 (elem_size))) : 0)
181
182 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
183 ((u32)(ELEMS_PER_PAGE(elem_size) - \
184 UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
185
186 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
187 DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
188
189 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
190 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
191
192
193 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
194 {
195 return p_chain->u.chain16.prod_idx;
196 }
197
198 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
199 {
200 return p_chain->u.chain16.cons_idx;
201 }
202
203 static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
204 {
205 return p_chain->u.chain32.cons_idx;
206 }
207
208 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
209 {
210 u16 used;
211
212 used = (u16) (((u32)0x10000 +
213 (u32)p_chain->u.chain16.prod_idx) -
214 (u32)p_chain->u.chain16.cons_idx);
215 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
216 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
217 p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
218
219 return (u16)(p_chain->capacity - used);
220 }
221
222 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
223 {
224 u32 used;
225
226 used = (u32) (((u64)0x100000000ULL +
227 (u64)p_chain->u.chain32.prod_idx) -
228 (u64)p_chain->u.chain32.cons_idx);
229 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
230 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
231 p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
232
233 return p_chain->capacity - used;
234 }
235
236 static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
237 {
238 return p_chain->usable_per_page;
239 }
240
241 static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
242 {
243 return p_chain->elem_unusable;
244 }
245
246 static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
247 {
248 return p_chain->page_cnt;
249 }
250
251 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
252 {
253 return p_chain->pbl_sp.p_phys_table;
254 }
255
256
257
258
259
260
261
262
263
264
265
266 static inline void
267 qed_chain_advance_page(struct qed_chain *p_chain,
268 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
269 {
270 struct qed_chain_next *p_next = NULL;
271 u32 page_index = 0;
272
273 switch (p_chain->mode) {
274 case QED_CHAIN_MODE_NEXT_PTR:
275 p_next = *p_next_elem;
276 *p_next_elem = p_next->next_virt;
277 if (is_chain_u16(p_chain))
278 *(u16 *)idx_to_inc += p_chain->elem_unusable;
279 else
280 *(u32 *)idx_to_inc += p_chain->elem_unusable;
281 break;
282 case QED_CHAIN_MODE_SINGLE:
283 *p_next_elem = p_chain->p_virt_addr;
284 break;
285
286 case QED_CHAIN_MODE_PBL:
287 if (is_chain_u16(p_chain)) {
288 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
289 *(u16 *)page_to_inc = 0;
290 page_index = *(u16 *)page_to_inc;
291 } else {
292 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
293 *(u32 *)page_to_inc = 0;
294 page_index = *(u32 *)page_to_inc;
295 }
296 *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
297 }
298 }
299
300 #define is_unusable_idx(p, idx) \
301 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
302
303 #define is_unusable_idx_u32(p, idx) \
304 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
305 #define is_unusable_next_idx(p, idx) \
306 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
307 (p)->usable_per_page)
308
309 #define is_unusable_next_idx_u32(p, idx) \
310 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
311 (p)->usable_per_page)
312
313 #define test_and_skip(p, idx) \
314 do { \
315 if (is_chain_u16(p)) { \
316 if (is_unusable_idx(p, idx)) \
317 (p)->u.chain16.idx += (p)->elem_unusable; \
318 } else { \
319 if (is_unusable_idx_u32(p, idx)) \
320 (p)->u.chain32.idx += (p)->elem_unusable; \
321 } \
322 } while (0)
323
324
325
326
327
328
329
330
331
332 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
333 {
334 if (is_chain_u16(p_chain))
335 p_chain->u.chain16.cons_idx++;
336 else
337 p_chain->u.chain32.cons_idx++;
338 test_and_skip(p_chain, cons_idx);
339 }
340
341
342
343
344
345
346
347
348
349
350
351
352 static inline void *qed_chain_produce(struct qed_chain *p_chain)
353 {
354 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
355
356 if (is_chain_u16(p_chain)) {
357 if ((p_chain->u.chain16.prod_idx &
358 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
359 p_prod_idx = &p_chain->u.chain16.prod_idx;
360 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
361 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
362 p_prod_idx, p_prod_page_idx);
363 }
364 p_chain->u.chain16.prod_idx++;
365 } else {
366 if ((p_chain->u.chain32.prod_idx &
367 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
368 p_prod_idx = &p_chain->u.chain32.prod_idx;
369 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
370 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
371 p_prod_idx, p_prod_page_idx);
372 }
373 p_chain->u.chain32.prod_idx++;
374 }
375
376 p_ret = p_chain->p_prod_elem;
377 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
378 p_chain->elem_size);
379
380 return p_ret;
381 }
382
383
384
385
386
387
388
389
390
391
392
393 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
394 {
395 return p_chain->capacity;
396 }
397
398
399
400
401
402
403
404
405
406 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
407 {
408 test_and_skip(p_chain, prod_idx);
409 if (is_chain_u16(p_chain))
410 p_chain->u.chain16.prod_idx++;
411 else
412 p_chain->u.chain32.prod_idx++;
413 }
414
415
416
417
418
419
420
421
422
423
424
425 static inline void *qed_chain_consume(struct qed_chain *p_chain)
426 {
427 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
428
429 if (is_chain_u16(p_chain)) {
430 if ((p_chain->u.chain16.cons_idx &
431 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
432 p_cons_idx = &p_chain->u.chain16.cons_idx;
433 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
434 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
435 p_cons_idx, p_cons_page_idx);
436 }
437 p_chain->u.chain16.cons_idx++;
438 } else {
439 if ((p_chain->u.chain32.cons_idx &
440 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
441 p_cons_idx = &p_chain->u.chain32.cons_idx;
442 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
443 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
444 p_cons_idx, p_cons_page_idx);
445 }
446 p_chain->u.chain32.cons_idx++;
447 }
448
449 p_ret = p_chain->p_cons_elem;
450 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
451 p_chain->elem_size);
452
453 return p_ret;
454 }
455
456
457
458
459
460
461 static inline void qed_chain_reset(struct qed_chain *p_chain)
462 {
463 u32 i;
464
465 if (is_chain_u16(p_chain)) {
466 p_chain->u.chain16.prod_idx = 0;
467 p_chain->u.chain16.cons_idx = 0;
468 } else {
469 p_chain->u.chain32.prod_idx = 0;
470 p_chain->u.chain32.cons_idx = 0;
471 }
472 p_chain->p_cons_elem = p_chain->p_virt_addr;
473 p_chain->p_prod_elem = p_chain->p_virt_addr;
474
475 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
476
477
478
479
480
481 u32 reset_val = p_chain->page_cnt - 1;
482
483 if (is_chain_u16(p_chain)) {
484 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
485 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
486 } else {
487 p_chain->pbl.c.u32.prod_page_idx = reset_val;
488 p_chain->pbl.c.u32.cons_page_idx = reset_val;
489 }
490 }
491
492 switch (p_chain->intended_use) {
493 case QED_CHAIN_USE_TO_CONSUME:
494
495 for (i = 0; i < p_chain->capacity; i++)
496 qed_chain_recycle_consumed(p_chain);
497 break;
498
499 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
500 case QED_CHAIN_USE_TO_PRODUCE:
501 default:
502
503 break;
504 }
505 }
506
507
508
509
510
511
512
513
514
515
516
517
518 static inline void qed_chain_init_params(struct qed_chain *p_chain,
519 u32 page_cnt,
520 u8 elem_size,
521 enum qed_chain_use_mode intended_use,
522 enum qed_chain_mode mode,
523 enum qed_chain_cnt_type cnt_type)
524 {
525
526 p_chain->p_virt_addr = NULL;
527 p_chain->p_phys_addr = 0;
528 p_chain->elem_size = elem_size;
529 p_chain->intended_use = (u8)intended_use;
530 p_chain->mode = mode;
531 p_chain->cnt_type = (u8)cnt_type;
532
533 p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
534 p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
535 p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
536 p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
537 p_chain->next_page_mask = (p_chain->usable_per_page &
538 p_chain->elem_per_page_mask);
539
540 p_chain->page_cnt = page_cnt;
541 p_chain->capacity = p_chain->usable_per_page * page_cnt;
542 p_chain->size = p_chain->elem_per_page * page_cnt;
543
544 p_chain->pbl_sp.p_phys_table = 0;
545 p_chain->pbl_sp.p_virt_table = NULL;
546 p_chain->pbl.pp_addr_tbl = NULL;
547 }
548
549
550
551
552
553
554
555
556
557
558
559 static inline void qed_chain_init_mem(struct qed_chain *p_chain,
560 void *p_virt_addr, dma_addr_t p_phys_addr)
561 {
562 p_chain->p_virt_addr = p_virt_addr;
563 p_chain->p_phys_addr = p_phys_addr;
564 }
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
582 void *p_virt_pbl,
583 dma_addr_t p_phys_pbl,
584 struct addr_tbl_entry *pp_addr_tbl)
585 {
586 p_chain->pbl_sp.p_phys_table = p_phys_pbl;
587 p_chain->pbl_sp.p_virt_table = p_virt_pbl;
588 p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
589 }
590
591
592
593
594
595
596
597
598
599
600
601
602
603 static inline void
604 qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
605 void *p_virt_curr,
606 void *p_virt_next, dma_addr_t p_phys_next)
607 {
608 struct qed_chain_next *p_next;
609 u32 size;
610
611 size = p_chain->elem_size * p_chain->usable_per_page;
612 p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
613
614 DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
615
616 p_next->next_virt = p_virt_next;
617 }
618
619
620
621
622
623
624
625
626
627
628 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
629 {
630 struct qed_chain_next *p_next = NULL;
631 void *p_virt_addr = NULL;
632 u32 size, last_page_idx;
633
634 if (!p_chain->p_virt_addr)
635 goto out;
636
637 switch (p_chain->mode) {
638 case QED_CHAIN_MODE_NEXT_PTR:
639 size = p_chain->elem_size * p_chain->usable_per_page;
640 p_virt_addr = p_chain->p_virt_addr;
641 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
642 while (p_next->next_virt != p_chain->p_virt_addr) {
643 p_virt_addr = p_next->next_virt;
644 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
645 size);
646 }
647 break;
648 case QED_CHAIN_MODE_SINGLE:
649 p_virt_addr = p_chain->p_virt_addr;
650 break;
651 case QED_CHAIN_MODE_PBL:
652 last_page_idx = p_chain->page_cnt - 1;
653 p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
654 break;
655 }
656
657 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
658 p_virt_addr = (u8 *)p_virt_addr + size;
659 out:
660 return p_virt_addr;
661 }
662
663
664
665
666
667
668
669 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
670 u32 prod_idx, void *p_prod_elem)
671 {
672 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
673 u32 cur_prod, page_mask, page_cnt, page_diff;
674
675 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
676 p_chain->u.chain32.prod_idx;
677
678
679 page_mask = ~p_chain->elem_per_page_mask;
680
681
682
683
684
685
686
687
688 page_diff = (((cur_prod - 1) & page_mask) -
689 ((prod_idx - 1) & page_mask)) /
690 p_chain->elem_per_page;
691
692 page_cnt = qed_chain_get_page_cnt(p_chain);
693 if (is_chain_u16(p_chain))
694 p_chain->pbl.c.u16.prod_page_idx =
695 (p_chain->pbl.c.u16.prod_page_idx -
696 page_diff + page_cnt) % page_cnt;
697 else
698 p_chain->pbl.c.u32.prod_page_idx =
699 (p_chain->pbl.c.u32.prod_page_idx -
700 page_diff + page_cnt) % page_cnt;
701 }
702
703 if (is_chain_u16(p_chain))
704 p_chain->u.chain16.prod_idx = (u16) prod_idx;
705 else
706 p_chain->u.chain32.prod_idx = prod_idx;
707 p_chain->p_prod_elem = p_prod_elem;
708 }
709
710
711
712
713
714
715 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
716 {
717 u32 i, page_cnt;
718
719 if (p_chain->mode != QED_CHAIN_MODE_PBL)
720 return;
721
722 page_cnt = qed_chain_get_page_cnt(p_chain);
723
724 for (i = 0; i < page_cnt; i++)
725 memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
726 QED_CHAIN_PAGE_SIZE);
727 }
728
729 #endif