This source file includes following definitions.
- shadow_sr_wr_ind_addr
- shadow_dst_wr_ind_addr
- ath10k_set_ring_byte
- ath10k_get_ring_byte
- ath10k_ce_read32
- ath10k_ce_write32
- ath10k_ce_dest_ring_write_index_set
- ath10k_ce_dest_ring_write_index_get
- ath10k_ce_src_ring_write_index_set
- ath10k_ce_src_ring_write_index_get
- ath10k_ce_src_ring_read_index_from_ddr
- ath10k_ce_src_ring_read_index_get
- ath10k_ce_shadow_src_ring_write_index_set
- ath10k_ce_shadow_dest_ring_write_index_set
- ath10k_ce_src_ring_base_addr_set
- ath10k_ce_set_src_ring_base_addr_hi
- ath10k_ce_src_ring_size_set
- ath10k_ce_src_ring_dmax_set
- ath10k_ce_src_ring_byte_swap_set
- ath10k_ce_dest_ring_byte_swap_set
- ath10k_ce_dest_ring_read_index_from_ddr
- ath10k_ce_dest_ring_read_index_get
- ath10k_ce_dest_ring_base_addr_set
- ath10k_ce_set_dest_ring_base_addr_hi
- ath10k_ce_dest_ring_size_set
- ath10k_ce_src_ring_highmark_set
- ath10k_ce_src_ring_lowmark_set
- ath10k_ce_dest_ring_highmark_set
- ath10k_ce_dest_ring_lowmark_set
- ath10k_ce_copy_complete_inter_enable
- ath10k_ce_copy_complete_intr_disable
- ath10k_ce_watermark_intr_disable
- ath10k_ce_error_intr_enable
- ath10k_ce_error_intr_disable
- ath10k_ce_engine_int_status_clear
- _ath10k_ce_send_nolock
- _ath10k_ce_send_nolock_64
- ath10k_ce_send_nolock
- __ath10k_ce_send_revert
- ath10k_ce_send
- ath10k_ce_num_free_src_entries
- __ath10k_ce_rx_num_free_bufs
- __ath10k_ce_rx_post_buf
- __ath10k_ce_rx_post_buf_64
- ath10k_ce_rx_update_write_idx
- ath10k_ce_rx_post_buf
- _ath10k_ce_completed_recv_next_nolock
- _ath10k_ce_completed_recv_next_nolock_64
- ath10k_ce_completed_recv_next_nolock
- ath10k_ce_completed_recv_next
- _ath10k_ce_revoke_recv_next
- _ath10k_ce_revoke_recv_next_64
- ath10k_ce_revoke_recv_next
- _ath10k_ce_completed_send_next_nolock
- _ath10k_ce_completed_send_next_nolock_64
- ath10k_ce_completed_send_next_nolock
- ath10k_ce_extract_desc_data
- ath10k_ce_extract_desc_data_64
- ath10k_ce_cancel_send_next
- ath10k_ce_completed_send_next
- ath10k_ce_per_engine_service
- ath10k_ce_per_engine_service_any
- ath10k_ce_per_engine_handler_adjust
- ath10k_ce_disable_interrupts
- ath10k_ce_enable_interrupts
- ath10k_ce_init_src_ring
- ath10k_ce_init_dest_ring
- ath10k_ce_alloc_shadow_base
- ath10k_ce_alloc_src_ring
- ath10k_ce_alloc_src_ring_64
- ath10k_ce_alloc_dest_ring
- ath10k_ce_alloc_dest_ring_64
- ath10k_ce_init_pipe
- ath10k_ce_deinit_src_ring
- ath10k_ce_deinit_dest_ring
- ath10k_ce_deinit_pipe
- _ath10k_ce_free_pipe
- _ath10k_ce_free_pipe_64
- ath10k_ce_free_pipe
- ath10k_ce_dump_registers
- ath10k_ce_set_ops
- ath10k_ce_alloc_pipe
- ath10k_ce_alloc_rri
- ath10k_ce_free_rri
1
2
3
4
5
6
7
8 #include "hif.h"
9 #include "ce.h"
10 #include "debug.h"
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51 static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
52 struct ath10k_ce_pipe *ce_state)
53 {
54 u32 ce_id = ce_state->id;
55 u32 addr = 0;
56
57 switch (ce_id) {
58 case 0:
59 addr = 0x00032000;
60 break;
61 case 3:
62 addr = 0x0003200C;
63 break;
64 case 4:
65 addr = 0x00032010;
66 break;
67 case 5:
68 addr = 0x00032014;
69 break;
70 case 7:
71 addr = 0x0003201C;
72 break;
73 default:
74 ath10k_warn(ar, "invalid CE id: %d", ce_id);
75 break;
76 }
77 return addr;
78 }
79
80 static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
81 struct ath10k_ce_pipe *ce_state)
82 {
83 u32 ce_id = ce_state->id;
84 u32 addr = 0;
85
86 switch (ce_id) {
87 case 1:
88 addr = 0x00032034;
89 break;
90 case 2:
91 addr = 0x00032038;
92 break;
93 case 5:
94 addr = 0x00032044;
95 break;
96 case 7:
97 addr = 0x0003204C;
98 break;
99 case 8:
100 addr = 0x00032050;
101 break;
102 case 9:
103 addr = 0x00032054;
104 break;
105 case 10:
106 addr = 0x00032058;
107 break;
108 case 11:
109 addr = 0x0003205C;
110 break;
111 default:
112 ath10k_warn(ar, "invalid CE id: %d", ce_id);
113 break;
114 }
115
116 return addr;
117 }
118
119 static inline unsigned int
120 ath10k_set_ring_byte(unsigned int offset,
121 struct ath10k_hw_ce_regs_addr_map *addr_map)
122 {
123 return ((offset << addr_map->lsb) & addr_map->mask);
124 }
125
126 static inline unsigned int
127 ath10k_get_ring_byte(unsigned int offset,
128 struct ath10k_hw_ce_regs_addr_map *addr_map)
129 {
130 return ((offset & addr_map->mask) >> (addr_map->lsb));
131 }
132
133 static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
134 {
135 struct ath10k_ce *ce = ath10k_ce_priv(ar);
136
137 return ce->bus_ops->read32(ar, offset);
138 }
139
140 static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
141 {
142 struct ath10k_ce *ce = ath10k_ce_priv(ar);
143
144 ce->bus_ops->write32(ar, offset, value);
145 }
146
147 static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
148 u32 ce_ctrl_addr,
149 unsigned int n)
150 {
151 ath10k_ce_write32(ar, ce_ctrl_addr +
152 ar->hw_ce_regs->dst_wr_index_addr, n);
153 }
154
155 static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
156 u32 ce_ctrl_addr)
157 {
158 return ath10k_ce_read32(ar, ce_ctrl_addr +
159 ar->hw_ce_regs->dst_wr_index_addr);
160 }
161
162 static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
163 u32 ce_ctrl_addr,
164 unsigned int n)
165 {
166 ath10k_ce_write32(ar, ce_ctrl_addr +
167 ar->hw_ce_regs->sr_wr_index_addr, n);
168 }
169
170 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
171 u32 ce_ctrl_addr)
172 {
173 return ath10k_ce_read32(ar, ce_ctrl_addr +
174 ar->hw_ce_regs->sr_wr_index_addr);
175 }
176
177 static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
178 u32 ce_id)
179 {
180 struct ath10k_ce *ce = ath10k_ce_priv(ar);
181
182 return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
183 }
184
185 static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
186 u32 ce_ctrl_addr)
187 {
188 struct ath10k_ce *ce = ath10k_ce_priv(ar);
189 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
190 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
191 u32 index;
192
193 if (ar->hw_params.rri_on_ddr &&
194 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
195 index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
196 else
197 index = ath10k_ce_read32(ar, ce_ctrl_addr +
198 ar->hw_ce_regs->current_srri_addr);
199
200 return index;
201 }
202
203 static inline void
204 ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
205 struct ath10k_ce_pipe *ce_state,
206 unsigned int value)
207 {
208 ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
209 }
210
211 static inline void
212 ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
213 struct ath10k_ce_pipe *ce_state,
214 unsigned int value)
215 {
216 ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
217 }
218
219 static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
220 u32 ce_id,
221 u64 addr)
222 {
223 struct ath10k_ce *ce = ath10k_ce_priv(ar);
224 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
225 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
226 u32 addr_lo = lower_32_bits(addr);
227
228 ath10k_ce_write32(ar, ce_ctrl_addr +
229 ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
230
231 if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
232 ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
233 addr);
234 }
235 }
236
237 static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
238 u32 ce_ctrl_addr,
239 u64 addr)
240 {
241 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
242
243 ath10k_ce_write32(ar, ce_ctrl_addr +
244 ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
245 }
246
247 static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
248 u32 ce_ctrl_addr,
249 unsigned int n)
250 {
251 ath10k_ce_write32(ar, ce_ctrl_addr +
252 ar->hw_ce_regs->sr_size_addr, n);
253 }
254
255 static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
256 u32 ce_ctrl_addr,
257 unsigned int n)
258 {
259 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
260
261 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
262 ctrl_regs->addr);
263
264 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
265 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
266 ath10k_set_ring_byte(n, ctrl_regs->dmax));
267 }
268
269 static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
270 u32 ce_ctrl_addr,
271 unsigned int n)
272 {
273 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
274
275 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
276 ctrl_regs->addr);
277
278 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
279 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
280 ath10k_set_ring_byte(n, ctrl_regs->src_ring));
281 }
282
283 static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
284 u32 ce_ctrl_addr,
285 unsigned int n)
286 {
287 struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
288
289 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
290 ctrl_regs->addr);
291
292 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
293 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
294 ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
295 }
296
297 static inline
298 u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
299 {
300 struct ath10k_ce *ce = ath10k_ce_priv(ar);
301
302 return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
303 CE_DDR_RRI_MASK;
304 }
305
306 static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
307 u32 ce_ctrl_addr)
308 {
309 struct ath10k_ce *ce = ath10k_ce_priv(ar);
310 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
311 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
312 u32 index;
313
314 if (ar->hw_params.rri_on_ddr &&
315 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
316 index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
317 else
318 index = ath10k_ce_read32(ar, ce_ctrl_addr +
319 ar->hw_ce_regs->current_drri_addr);
320
321 return index;
322 }
323
324 static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
325 u32 ce_id,
326 u64 addr)
327 {
328 struct ath10k_ce *ce = ath10k_ce_priv(ar);
329 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
330 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
331 u32 addr_lo = lower_32_bits(addr);
332
333 ath10k_ce_write32(ar, ce_ctrl_addr +
334 ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
335
336 if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
337 ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
338 addr);
339 }
340 }
341
342 static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
343 u32 ce_ctrl_addr,
344 u64 addr)
345 {
346 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
347 u32 reg_value;
348
349 reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
350 ar->hw_ce_regs->dr_base_addr_hi);
351 reg_value &= ~CE_DESC_ADDR_HI_MASK;
352 reg_value |= addr_hi;
353 ath10k_ce_write32(ar, ce_ctrl_addr +
354 ar->hw_ce_regs->dr_base_addr_hi, reg_value);
355 }
356
357 static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
358 u32 ce_ctrl_addr,
359 unsigned int n)
360 {
361 ath10k_ce_write32(ar, ce_ctrl_addr +
362 ar->hw_ce_regs->dr_size_addr, n);
363 }
364
365 static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
366 u32 ce_ctrl_addr,
367 unsigned int n)
368 {
369 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
370 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
371
372 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
373 (addr & ~(srcr_wm->wm_high->mask)) |
374 (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
375 }
376
377 static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
378 u32 ce_ctrl_addr,
379 unsigned int n)
380 {
381 struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
382 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
383
384 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
385 (addr & ~(srcr_wm->wm_low->mask)) |
386 (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
387 }
388
389 static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
390 u32 ce_ctrl_addr,
391 unsigned int n)
392 {
393 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
394 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
395
396 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
397 (addr & ~(dstr_wm->wm_high->mask)) |
398 (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
399 }
400
401 static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
402 u32 ce_ctrl_addr,
403 unsigned int n)
404 {
405 struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
406 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
407
408 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
409 (addr & ~(dstr_wm->wm_low->mask)) |
410 (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
411 }
412
413 static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
414 u32 ce_ctrl_addr)
415 {
416 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
417
418 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
419 ar->hw_ce_regs->host_ie_addr);
420
421 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
422 host_ie_addr | host_ie->copy_complete->mask);
423 }
424
425 static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
426 u32 ce_ctrl_addr)
427 {
428 struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
429
430 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
431 ar->hw_ce_regs->host_ie_addr);
432
433 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
434 host_ie_addr & ~(host_ie->copy_complete->mask));
435 }
436
437 static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
438 u32 ce_ctrl_addr)
439 {
440 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
441
442 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
443 ar->hw_ce_regs->host_ie_addr);
444
445 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
446 host_ie_addr & ~(wm_regs->wm_mask));
447 }
448
449 static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
450 u32 ce_ctrl_addr)
451 {
452 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
453
454 u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
455 ar->hw_ce_regs->misc_ie_addr);
456
457 ath10k_ce_write32(ar,
458 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
459 misc_ie_addr | misc_regs->err_mask);
460 }
461
462 static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
463 u32 ce_ctrl_addr)
464 {
465 struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
466
467 u32 misc_ie_addr = ath10k_ce_read32(ar,
468 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
469
470 ath10k_ce_write32(ar,
471 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
472 misc_ie_addr & ~(misc_regs->err_mask));
473 }
474
475 static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
476 u32 ce_ctrl_addr,
477 unsigned int mask)
478 {
479 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
480
481 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
482 }
483
484
485
486
487
488 static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
489 void *per_transfer_context,
490 dma_addr_t buffer,
491 unsigned int nbytes,
492 unsigned int transfer_id,
493 unsigned int flags)
494 {
495 struct ath10k *ar = ce_state->ar;
496 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
497 struct ce_desc *desc, sdesc;
498 unsigned int nentries_mask = src_ring->nentries_mask;
499 unsigned int sw_index = src_ring->sw_index;
500 unsigned int write_index = src_ring->write_index;
501 u32 ctrl_addr = ce_state->ctrl_addr;
502 u32 desc_flags = 0;
503 int ret = 0;
504
505 if (nbytes > ce_state->src_sz_max)
506 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
507 __func__, nbytes, ce_state->src_sz_max);
508
509 if (unlikely(CE_RING_DELTA(nentries_mask,
510 write_index, sw_index - 1) <= 0)) {
511 ret = -ENOSR;
512 goto exit;
513 }
514
515 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
516 write_index);
517
518 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
519
520 if (flags & CE_SEND_FLAG_GATHER)
521 desc_flags |= CE_DESC_FLAGS_GATHER;
522 if (flags & CE_SEND_FLAG_BYTE_SWAP)
523 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
524
525 sdesc.addr = __cpu_to_le32(buffer);
526 sdesc.nbytes = __cpu_to_le16(nbytes);
527 sdesc.flags = __cpu_to_le16(desc_flags);
528
529 *desc = sdesc;
530
531 src_ring->per_transfer_context[write_index] = per_transfer_context;
532
533
534 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
535
536
537 if (!(flags & CE_SEND_FLAG_GATHER))
538 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
539
540 src_ring->write_index = write_index;
541 exit:
542 return ret;
543 }
544
545 static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
546 void *per_transfer_context,
547 dma_addr_t buffer,
548 unsigned int nbytes,
549 unsigned int transfer_id,
550 unsigned int flags)
551 {
552 struct ath10k *ar = ce_state->ar;
553 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
554 struct ce_desc_64 *desc, sdesc;
555 unsigned int nentries_mask = src_ring->nentries_mask;
556 unsigned int sw_index;
557 unsigned int write_index = src_ring->write_index;
558 u32 ctrl_addr = ce_state->ctrl_addr;
559 __le32 *addr;
560 u32 desc_flags = 0;
561 int ret = 0;
562
563 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
564 return -ESHUTDOWN;
565
566 if (nbytes > ce_state->src_sz_max)
567 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
568 __func__, nbytes, ce_state->src_sz_max);
569
570 if (ar->hw_params.rri_on_ddr)
571 sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
572 else
573 sw_index = src_ring->sw_index;
574
575 if (unlikely(CE_RING_DELTA(nentries_mask,
576 write_index, sw_index - 1) <= 0)) {
577 ret = -ENOSR;
578 goto exit;
579 }
580
581 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
582 write_index);
583
584 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
585
586 if (flags & CE_SEND_FLAG_GATHER)
587 desc_flags |= CE_DESC_FLAGS_GATHER;
588
589 if (flags & CE_SEND_FLAG_BYTE_SWAP)
590 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
591
592 addr = (__le32 *)&sdesc.addr;
593
594 flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
595 addr[0] = __cpu_to_le32(buffer);
596 addr[1] = __cpu_to_le32(flags);
597 if (flags & CE_SEND_FLAG_GATHER)
598 addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
599 else
600 addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
601
602 sdesc.nbytes = __cpu_to_le16(nbytes);
603 sdesc.flags = __cpu_to_le16(desc_flags);
604
605 *desc = sdesc;
606
607 src_ring->per_transfer_context[write_index] = per_transfer_context;
608
609
610 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
611
612 if (!(flags & CE_SEND_FLAG_GATHER)) {
613 if (ar->hw_params.shadow_reg_support)
614 ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
615 write_index);
616 else
617 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
618 write_index);
619 }
620
621 src_ring->write_index = write_index;
622 exit:
623 return ret;
624 }
625
626 int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
627 void *per_transfer_context,
628 dma_addr_t buffer,
629 unsigned int nbytes,
630 unsigned int transfer_id,
631 unsigned int flags)
632 {
633 return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
634 buffer, nbytes, transfer_id, flags);
635 }
636 EXPORT_SYMBOL(ath10k_ce_send_nolock);
637
638 void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
639 {
640 struct ath10k *ar = pipe->ar;
641 struct ath10k_ce *ce = ath10k_ce_priv(ar);
642 struct ath10k_ce_ring *src_ring = pipe->src_ring;
643 u32 ctrl_addr = pipe->ctrl_addr;
644
645 lockdep_assert_held(&ce->ce_lock);
646
647
648
649
650
651
652 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
653 return;
654
655 if (WARN_ON_ONCE(src_ring->write_index ==
656 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
657 return;
658
659 src_ring->write_index--;
660 src_ring->write_index &= src_ring->nentries_mask;
661
662 src_ring->per_transfer_context[src_ring->write_index] = NULL;
663 }
664 EXPORT_SYMBOL(__ath10k_ce_send_revert);
665
666 int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
667 void *per_transfer_context,
668 dma_addr_t buffer,
669 unsigned int nbytes,
670 unsigned int transfer_id,
671 unsigned int flags)
672 {
673 struct ath10k *ar = ce_state->ar;
674 struct ath10k_ce *ce = ath10k_ce_priv(ar);
675 int ret;
676
677 spin_lock_bh(&ce->ce_lock);
678 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
679 buffer, nbytes, transfer_id, flags);
680 spin_unlock_bh(&ce->ce_lock);
681
682 return ret;
683 }
684 EXPORT_SYMBOL(ath10k_ce_send);
685
686 int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
687 {
688 struct ath10k *ar = pipe->ar;
689 struct ath10k_ce *ce = ath10k_ce_priv(ar);
690 int delta;
691
692 spin_lock_bh(&ce->ce_lock);
693 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
694 pipe->src_ring->write_index,
695 pipe->src_ring->sw_index - 1);
696 spin_unlock_bh(&ce->ce_lock);
697
698 return delta;
699 }
700 EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
701
702 int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
703 {
704 struct ath10k *ar = pipe->ar;
705 struct ath10k_ce *ce = ath10k_ce_priv(ar);
706 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
707 unsigned int nentries_mask = dest_ring->nentries_mask;
708 unsigned int write_index = dest_ring->write_index;
709 unsigned int sw_index = dest_ring->sw_index;
710
711 lockdep_assert_held(&ce->ce_lock);
712
713 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
714 }
715 EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
716
717 static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
718 dma_addr_t paddr)
719 {
720 struct ath10k *ar = pipe->ar;
721 struct ath10k_ce *ce = ath10k_ce_priv(ar);
722 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
723 unsigned int nentries_mask = dest_ring->nentries_mask;
724 unsigned int write_index = dest_ring->write_index;
725 unsigned int sw_index = dest_ring->sw_index;
726 struct ce_desc *base = dest_ring->base_addr_owner_space;
727 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
728 u32 ctrl_addr = pipe->ctrl_addr;
729
730 lockdep_assert_held(&ce->ce_lock);
731
732 if ((pipe->id != 5) &&
733 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
734 return -ENOSPC;
735
736 desc->addr = __cpu_to_le32(paddr);
737 desc->nbytes = 0;
738
739 dest_ring->per_transfer_context[write_index] = ctx;
740 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
741 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
742 dest_ring->write_index = write_index;
743
744 return 0;
745 }
746
747 static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
748 void *ctx,
749 dma_addr_t paddr)
750 {
751 struct ath10k *ar = pipe->ar;
752 struct ath10k_ce *ce = ath10k_ce_priv(ar);
753 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
754 unsigned int nentries_mask = dest_ring->nentries_mask;
755 unsigned int write_index = dest_ring->write_index;
756 unsigned int sw_index = dest_ring->sw_index;
757 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
758 struct ce_desc_64 *desc =
759 CE_DEST_RING_TO_DESC_64(base, write_index);
760 u32 ctrl_addr = pipe->ctrl_addr;
761
762 lockdep_assert_held(&ce->ce_lock);
763
764 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
765 return -ENOSPC;
766
767 desc->addr = __cpu_to_le64(paddr);
768 desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
769
770 desc->nbytes = 0;
771
772 dest_ring->per_transfer_context[write_index] = ctx;
773 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
774 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
775 dest_ring->write_index = write_index;
776
777 return 0;
778 }
779
780 void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
781 {
782 struct ath10k *ar = pipe->ar;
783 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
784 unsigned int nentries_mask = dest_ring->nentries_mask;
785 unsigned int write_index = dest_ring->write_index;
786 u32 ctrl_addr = pipe->ctrl_addr;
787 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
788
789
790
791
792 if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
793 nentries -= 1;
794
795 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
796 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
797 dest_ring->write_index = write_index;
798 }
799 EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
800
801 int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
802 dma_addr_t paddr)
803 {
804 struct ath10k *ar = pipe->ar;
805 struct ath10k_ce *ce = ath10k_ce_priv(ar);
806 int ret;
807
808 spin_lock_bh(&ce->ce_lock);
809 ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
810 spin_unlock_bh(&ce->ce_lock);
811
812 return ret;
813 }
814 EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
815
816
817
818
819
820 static int
821 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
822 void **per_transfer_contextp,
823 unsigned int *nbytesp)
824 {
825 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
826 unsigned int nentries_mask = dest_ring->nentries_mask;
827 unsigned int sw_index = dest_ring->sw_index;
828
829 struct ce_desc *base = dest_ring->base_addr_owner_space;
830 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
831 struct ce_desc sdesc;
832 u16 nbytes;
833
834
835 sdesc = *desc;
836
837 nbytes = __le16_to_cpu(sdesc.nbytes);
838 if (nbytes == 0) {
839
840
841
842
843
844
845 return -EIO;
846 }
847
848 desc->nbytes = 0;
849
850
851 *nbytesp = nbytes;
852
853 if (per_transfer_contextp)
854 *per_transfer_contextp =
855 dest_ring->per_transfer_context[sw_index];
856
857
858
859
860 if (ce_state->id != 5)
861 dest_ring->per_transfer_context[sw_index] = NULL;
862
863
864 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
865 dest_ring->sw_index = sw_index;
866
867 return 0;
868 }
869
870 static int
871 _ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
872 void **per_transfer_contextp,
873 unsigned int *nbytesp)
874 {
875 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
876 unsigned int nentries_mask = dest_ring->nentries_mask;
877 unsigned int sw_index = dest_ring->sw_index;
878 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
879 struct ce_desc_64 *desc =
880 CE_DEST_RING_TO_DESC_64(base, sw_index);
881 struct ce_desc_64 sdesc;
882 u16 nbytes;
883
884
885 sdesc = *desc;
886
887 nbytes = __le16_to_cpu(sdesc.nbytes);
888 if (nbytes == 0) {
889
890
891
892
893
894 return -EIO;
895 }
896
897 desc->nbytes = 0;
898
899
900 *nbytesp = nbytes;
901
902 if (per_transfer_contextp)
903 *per_transfer_contextp =
904 dest_ring->per_transfer_context[sw_index];
905
906
907
908
909 if (ce_state->id != 5)
910 dest_ring->per_transfer_context[sw_index] = NULL;
911
912
913 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
914 dest_ring->sw_index = sw_index;
915
916 return 0;
917 }
918
919 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
920 void **per_transfer_ctx,
921 unsigned int *nbytesp)
922 {
923 return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
924 per_transfer_ctx,
925 nbytesp);
926 }
927 EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
928
929 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
930 void **per_transfer_contextp,
931 unsigned int *nbytesp)
932 {
933 struct ath10k *ar = ce_state->ar;
934 struct ath10k_ce *ce = ath10k_ce_priv(ar);
935 int ret;
936
937 spin_lock_bh(&ce->ce_lock);
938 ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
939 per_transfer_contextp,
940 nbytesp);
941
942 spin_unlock_bh(&ce->ce_lock);
943
944 return ret;
945 }
946 EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
947
948 static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
949 void **per_transfer_contextp,
950 dma_addr_t *bufferp)
951 {
952 struct ath10k_ce_ring *dest_ring;
953 unsigned int nentries_mask;
954 unsigned int sw_index;
955 unsigned int write_index;
956 int ret;
957 struct ath10k *ar;
958 struct ath10k_ce *ce;
959
960 dest_ring = ce_state->dest_ring;
961
962 if (!dest_ring)
963 return -EIO;
964
965 ar = ce_state->ar;
966 ce = ath10k_ce_priv(ar);
967
968 spin_lock_bh(&ce->ce_lock);
969
970 nentries_mask = dest_ring->nentries_mask;
971 sw_index = dest_ring->sw_index;
972 write_index = dest_ring->write_index;
973 if (write_index != sw_index) {
974 struct ce_desc *base = dest_ring->base_addr_owner_space;
975 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
976
977
978 *bufferp = __le32_to_cpu(desc->addr);
979
980 if (per_transfer_contextp)
981 *per_transfer_contextp =
982 dest_ring->per_transfer_context[sw_index];
983
984
985 dest_ring->per_transfer_context[sw_index] = NULL;
986 desc->nbytes = 0;
987
988
989 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
990 dest_ring->sw_index = sw_index;
991 ret = 0;
992 } else {
993 ret = -EIO;
994 }
995
996 spin_unlock_bh(&ce->ce_lock);
997
998 return ret;
999 }
1000
1001 static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
1002 void **per_transfer_contextp,
1003 dma_addr_t *bufferp)
1004 {
1005 struct ath10k_ce_ring *dest_ring;
1006 unsigned int nentries_mask;
1007 unsigned int sw_index;
1008 unsigned int write_index;
1009 int ret;
1010 struct ath10k *ar;
1011 struct ath10k_ce *ce;
1012
1013 dest_ring = ce_state->dest_ring;
1014
1015 if (!dest_ring)
1016 return -EIO;
1017
1018 ar = ce_state->ar;
1019 ce = ath10k_ce_priv(ar);
1020
1021 spin_lock_bh(&ce->ce_lock);
1022
1023 nentries_mask = dest_ring->nentries_mask;
1024 sw_index = dest_ring->sw_index;
1025 write_index = dest_ring->write_index;
1026 if (write_index != sw_index) {
1027 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
1028 struct ce_desc_64 *desc =
1029 CE_DEST_RING_TO_DESC_64(base, sw_index);
1030
1031
1032 *bufferp = __le64_to_cpu(desc->addr);
1033
1034 if (per_transfer_contextp)
1035 *per_transfer_contextp =
1036 dest_ring->per_transfer_context[sw_index];
1037
1038
1039 dest_ring->per_transfer_context[sw_index] = NULL;
1040 desc->nbytes = 0;
1041
1042
1043 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1044 dest_ring->sw_index = sw_index;
1045 ret = 0;
1046 } else {
1047 ret = -EIO;
1048 }
1049
1050 spin_unlock_bh(&ce->ce_lock);
1051
1052 return ret;
1053 }
1054
1055 int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
1056 void **per_transfer_contextp,
1057 dma_addr_t *bufferp)
1058 {
1059 return ce_state->ops->ce_revoke_recv_next(ce_state,
1060 per_transfer_contextp,
1061 bufferp);
1062 }
1063 EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
1064
1065
1066
1067
1068
1069 static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1070 void **per_transfer_contextp)
1071 {
1072 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1073 u32 ctrl_addr = ce_state->ctrl_addr;
1074 struct ath10k *ar = ce_state->ar;
1075 unsigned int nentries_mask = src_ring->nentries_mask;
1076 unsigned int sw_index = src_ring->sw_index;
1077 unsigned int read_index;
1078 struct ce_desc *desc;
1079
1080 if (src_ring->hw_index == sw_index) {
1081
1082
1083
1084
1085
1086
1087
1088
1089 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1090 if (read_index == 0xffffffff)
1091 return -ENODEV;
1092
1093 read_index &= nentries_mask;
1094 src_ring->hw_index = read_index;
1095 }
1096
1097 if (ar->hw_params.rri_on_ddr)
1098 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1099 else
1100 read_index = src_ring->hw_index;
1101
1102 if (read_index == sw_index)
1103 return -EIO;
1104
1105 if (per_transfer_contextp)
1106 *per_transfer_contextp =
1107 src_ring->per_transfer_context[sw_index];
1108
1109
1110 src_ring->per_transfer_context[sw_index] = NULL;
1111 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
1112 sw_index);
1113 desc->nbytes = 0;
1114
1115
1116 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1117 src_ring->sw_index = sw_index;
1118
1119 return 0;
1120 }
1121
1122 static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
1123 void **per_transfer_contextp)
1124 {
1125 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1126 u32 ctrl_addr = ce_state->ctrl_addr;
1127 struct ath10k *ar = ce_state->ar;
1128 unsigned int nentries_mask = src_ring->nentries_mask;
1129 unsigned int sw_index = src_ring->sw_index;
1130 unsigned int read_index;
1131 struct ce_desc_64 *desc;
1132
1133 if (src_ring->hw_index == sw_index) {
1134
1135
1136
1137
1138
1139
1140
1141
1142 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1143 if (read_index == 0xffffffff)
1144 return -ENODEV;
1145
1146 read_index &= nentries_mask;
1147 src_ring->hw_index = read_index;
1148 }
1149
1150 if (ar->hw_params.rri_on_ddr)
1151 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1152 else
1153 read_index = src_ring->hw_index;
1154
1155 if (read_index == sw_index)
1156 return -EIO;
1157
1158 if (per_transfer_contextp)
1159 *per_transfer_contextp =
1160 src_ring->per_transfer_context[sw_index];
1161
1162
1163 src_ring->per_transfer_context[sw_index] = NULL;
1164 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
1165 sw_index);
1166 desc->nbytes = 0;
1167
1168
1169 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1170 src_ring->sw_index = sw_index;
1171
1172 return 0;
1173 }
1174
1175 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1176 void **per_transfer_contextp)
1177 {
1178 return ce_state->ops->ce_completed_send_next_nolock(ce_state,
1179 per_transfer_contextp);
1180 }
1181 EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
1182
1183 static void ath10k_ce_extract_desc_data(struct ath10k *ar,
1184 struct ath10k_ce_ring *src_ring,
1185 u32 sw_index,
1186 dma_addr_t *bufferp,
1187 u32 *nbytesp,
1188 u32 *transfer_idp)
1189 {
1190 struct ce_desc *base = src_ring->base_addr_owner_space;
1191 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
1192
1193
1194 *bufferp = __le32_to_cpu(desc->addr);
1195 *nbytesp = __le16_to_cpu(desc->nbytes);
1196 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1197 CE_DESC_FLAGS_META_DATA);
1198 }
1199
1200 static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
1201 struct ath10k_ce_ring *src_ring,
1202 u32 sw_index,
1203 dma_addr_t *bufferp,
1204 u32 *nbytesp,
1205 u32 *transfer_idp)
1206 {
1207 struct ce_desc_64 *base = src_ring->base_addr_owner_space;
1208 struct ce_desc_64 *desc =
1209 CE_SRC_RING_TO_DESC_64(base, sw_index);
1210
1211
1212 *bufferp = __le64_to_cpu(desc->addr);
1213 *nbytesp = __le16_to_cpu(desc->nbytes);
1214 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1215 CE_DESC_FLAGS_META_DATA);
1216 }
1217
1218
1219 int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
1220 void **per_transfer_contextp,
1221 dma_addr_t *bufferp,
1222 unsigned int *nbytesp,
1223 unsigned int *transfer_idp)
1224 {
1225 struct ath10k_ce_ring *src_ring;
1226 unsigned int nentries_mask;
1227 unsigned int sw_index;
1228 unsigned int write_index;
1229 int ret;
1230 struct ath10k *ar;
1231 struct ath10k_ce *ce;
1232
1233 src_ring = ce_state->src_ring;
1234
1235 if (!src_ring)
1236 return -EIO;
1237
1238 ar = ce_state->ar;
1239 ce = ath10k_ce_priv(ar);
1240
1241 spin_lock_bh(&ce->ce_lock);
1242
1243 nentries_mask = src_ring->nentries_mask;
1244 sw_index = src_ring->sw_index;
1245 write_index = src_ring->write_index;
1246
1247 if (write_index != sw_index) {
1248 ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1249 bufferp, nbytesp,
1250 transfer_idp);
1251
1252 if (per_transfer_contextp)
1253 *per_transfer_contextp =
1254 src_ring->per_transfer_context[sw_index];
1255
1256
1257 src_ring->per_transfer_context[sw_index] = NULL;
1258
1259
1260 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1261 src_ring->sw_index = sw_index;
1262 ret = 0;
1263 } else {
1264 ret = -EIO;
1265 }
1266
1267 spin_unlock_bh(&ce->ce_lock);
1268
1269 return ret;
1270 }
1271 EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
1272
1273 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1274 void **per_transfer_contextp)
1275 {
1276 struct ath10k *ar = ce_state->ar;
1277 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1278 int ret;
1279
1280 spin_lock_bh(&ce->ce_lock);
1281 ret = ath10k_ce_completed_send_next_nolock(ce_state,
1282 per_transfer_contextp);
1283 spin_unlock_bh(&ce->ce_lock);
1284
1285 return ret;
1286 }
1287 EXPORT_SYMBOL(ath10k_ce_completed_send_next);
1288
1289
1290
1291
1292
1293
1294
1295 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1296 {
1297 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1298 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1299 struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1300 u32 ctrl_addr = ce_state->ctrl_addr;
1301
1302 spin_lock_bh(&ce->ce_lock);
1303
1304
1305 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1306 wm_regs->cc_mask);
1307
1308 spin_unlock_bh(&ce->ce_lock);
1309
1310 if (ce_state->recv_cb)
1311 ce_state->recv_cb(ce_state);
1312
1313 if (ce_state->send_cb)
1314 ce_state->send_cb(ce_state);
1315
1316 spin_lock_bh(&ce->ce_lock);
1317
1318
1319
1320
1321
1322 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
1323
1324 spin_unlock_bh(&ce->ce_lock);
1325 }
1326 EXPORT_SYMBOL(ath10k_ce_per_engine_service);
1327
1328
1329
1330
1331
1332
1333
1334 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1335 {
1336 int ce_id;
1337 u32 intr_summary;
1338
1339 intr_summary = ath10k_ce_interrupt_summary(ar);
1340
1341 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1342 if (intr_summary & (1 << ce_id))
1343 intr_summary &= ~(1 << ce_id);
1344 else
1345
1346 continue;
1347
1348 ath10k_ce_per_engine_service(ar, ce_id);
1349 }
1350 }
1351 EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
1352
1353
1354
1355
1356
1357
1358
1359
1360 static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1361 {
1362 u32 ctrl_addr = ce_state->ctrl_addr;
1363 struct ath10k *ar = ce_state->ar;
1364 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1365
1366 if ((!disable_copy_compl_intr) &&
1367 (ce_state->send_cb || ce_state->recv_cb))
1368 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1369 else
1370 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1371
1372 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1373 }
1374
1375 int ath10k_ce_disable_interrupts(struct ath10k *ar)
1376 {
1377 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1378 struct ath10k_ce_pipe *ce_state;
1379 u32 ctrl_addr;
1380 int ce_id;
1381
1382 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
1383 ce_state = &ce->ce_states[ce_id];
1384 if (ce_state->attr_flags & CE_ATTR_POLL)
1385 continue;
1386
1387 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1388
1389 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1390 ath10k_ce_error_intr_disable(ar, ctrl_addr);
1391 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1392 }
1393
1394 return 0;
1395 }
1396 EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
1397
1398 void ath10k_ce_enable_interrupts(struct ath10k *ar)
1399 {
1400 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1401 int ce_id;
1402 struct ath10k_ce_pipe *ce_state;
1403
1404
1405
1406
1407 for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
1408 ce_state = &ce->ce_states[ce_id];
1409 if (ce_state->attr_flags & CE_ATTR_POLL)
1410 continue;
1411
1412 ath10k_ce_per_engine_handler_adjust(ce_state);
1413 }
1414 }
1415 EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
1416
1417 static int ath10k_ce_init_src_ring(struct ath10k *ar,
1418 unsigned int ce_id,
1419 const struct ce_attr *attr)
1420 {
1421 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1422 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1423 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1424 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1425
1426 nentries = roundup_pow_of_two(attr->src_nentries);
1427
1428 if (ar->hw_params.target_64bit)
1429 memset(src_ring->base_addr_owner_space, 0,
1430 nentries * sizeof(struct ce_desc_64));
1431 else
1432 memset(src_ring->base_addr_owner_space, 0,
1433 nentries * sizeof(struct ce_desc));
1434
1435 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1436 src_ring->sw_index &= src_ring->nentries_mask;
1437 src_ring->hw_index = src_ring->sw_index;
1438
1439 src_ring->write_index =
1440 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1441 src_ring->write_index &= src_ring->nentries_mask;
1442
1443 ath10k_ce_src_ring_base_addr_set(ar, ce_id,
1444 src_ring->base_addr_ce_space);
1445 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1446 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1447 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1448 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1449 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1450
1451 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1452 "boot init ce src ring id %d entries %d base_addr %pK\n",
1453 ce_id, nentries, src_ring->base_addr_owner_space);
1454
1455 return 0;
1456 }
1457
1458 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1459 unsigned int ce_id,
1460 const struct ce_attr *attr)
1461 {
1462 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1463 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1464 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1465 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1466
1467 nentries = roundup_pow_of_two(attr->dest_nentries);
1468
1469 if (ar->hw_params.target_64bit)
1470 memset(dest_ring->base_addr_owner_space, 0,
1471 nentries * sizeof(struct ce_desc_64));
1472 else
1473 memset(dest_ring->base_addr_owner_space, 0,
1474 nentries * sizeof(struct ce_desc));
1475
1476 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1477 dest_ring->sw_index &= dest_ring->nentries_mask;
1478 dest_ring->write_index =
1479 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1480 dest_ring->write_index &= dest_ring->nentries_mask;
1481
1482 ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
1483 dest_ring->base_addr_ce_space);
1484 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1485 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1486 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1487 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1488
1489 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1490 "boot ce dest ring id %d entries %d base_addr %pK\n",
1491 ce_id, nentries, dest_ring->base_addr_owner_space);
1492
1493 return 0;
1494 }
1495
1496 static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
1497 struct ath10k_ce_ring *src_ring,
1498 u32 nentries)
1499 {
1500 src_ring->shadow_base_unaligned = kcalloc(nentries,
1501 sizeof(struct ce_desc_64),
1502 GFP_KERNEL);
1503 if (!src_ring->shadow_base_unaligned)
1504 return -ENOMEM;
1505
1506 src_ring->shadow_base = (struct ce_desc_64 *)
1507 PTR_ALIGN(src_ring->shadow_base_unaligned,
1508 CE_DESC_RING_ALIGN);
1509 return 0;
1510 }
1511
1512 static struct ath10k_ce_ring *
1513 ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1514 const struct ce_attr *attr)
1515 {
1516 struct ath10k_ce_ring *src_ring;
1517 u32 nentries = attr->src_nentries;
1518 dma_addr_t base_addr;
1519 int ret;
1520
1521 nentries = roundup_pow_of_two(nentries);
1522
1523 src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1524 nentries), GFP_KERNEL);
1525 if (src_ring == NULL)
1526 return ERR_PTR(-ENOMEM);
1527
1528 src_ring->nentries = nentries;
1529 src_ring->nentries_mask = nentries - 1;
1530
1531
1532
1533
1534
1535 src_ring->base_addr_owner_space_unaligned =
1536 dma_alloc_coherent(ar->dev,
1537 (nentries * sizeof(struct ce_desc) +
1538 CE_DESC_RING_ALIGN),
1539 &base_addr, GFP_KERNEL);
1540 if (!src_ring->base_addr_owner_space_unaligned) {
1541 kfree(src_ring);
1542 return ERR_PTR(-ENOMEM);
1543 }
1544
1545 src_ring->base_addr_ce_space_unaligned = base_addr;
1546
1547 src_ring->base_addr_owner_space =
1548 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1549 CE_DESC_RING_ALIGN);
1550 src_ring->base_addr_ce_space =
1551 ALIGN(src_ring->base_addr_ce_space_unaligned,
1552 CE_DESC_RING_ALIGN);
1553
1554 if (ar->hw_params.shadow_reg_support) {
1555 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1556 if (ret) {
1557 dma_free_coherent(ar->dev,
1558 (nentries * sizeof(struct ce_desc_64) +
1559 CE_DESC_RING_ALIGN),
1560 src_ring->base_addr_owner_space_unaligned,
1561 base_addr);
1562 kfree(src_ring);
1563 return ERR_PTR(ret);
1564 }
1565 }
1566
1567 return src_ring;
1568 }
1569
1570 static struct ath10k_ce_ring *
1571 ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1572 const struct ce_attr *attr)
1573 {
1574 struct ath10k_ce_ring *src_ring;
1575 u32 nentries = attr->src_nentries;
1576 dma_addr_t base_addr;
1577 int ret;
1578
1579 nentries = roundup_pow_of_two(nentries);
1580
1581 src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
1582 nentries), GFP_KERNEL);
1583 if (!src_ring)
1584 return ERR_PTR(-ENOMEM);
1585
1586 src_ring->nentries = nentries;
1587 src_ring->nentries_mask = nentries - 1;
1588
1589
1590
1591
1592 src_ring->base_addr_owner_space_unaligned =
1593 dma_alloc_coherent(ar->dev,
1594 (nentries * sizeof(struct ce_desc_64) +
1595 CE_DESC_RING_ALIGN),
1596 &base_addr, GFP_KERNEL);
1597 if (!src_ring->base_addr_owner_space_unaligned) {
1598 kfree(src_ring);
1599 return ERR_PTR(-ENOMEM);
1600 }
1601
1602 src_ring->base_addr_ce_space_unaligned = base_addr;
1603
1604 src_ring->base_addr_owner_space =
1605 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1606 CE_DESC_RING_ALIGN);
1607 src_ring->base_addr_ce_space =
1608 ALIGN(src_ring->base_addr_ce_space_unaligned,
1609 CE_DESC_RING_ALIGN);
1610
1611 if (ar->hw_params.shadow_reg_support) {
1612 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1613 if (ret) {
1614 dma_free_coherent(ar->dev,
1615 (nentries * sizeof(struct ce_desc_64) +
1616 CE_DESC_RING_ALIGN),
1617 src_ring->base_addr_owner_space_unaligned,
1618 base_addr);
1619 kfree(src_ring);
1620 return ERR_PTR(ret);
1621 }
1622 }
1623
1624 return src_ring;
1625 }
1626
1627 static struct ath10k_ce_ring *
1628 ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1629 const struct ce_attr *attr)
1630 {
1631 struct ath10k_ce_ring *dest_ring;
1632 u32 nentries;
1633 dma_addr_t base_addr;
1634
1635 nentries = roundup_pow_of_two(attr->dest_nentries);
1636
1637 dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1638 nentries), GFP_KERNEL);
1639 if (dest_ring == NULL)
1640 return ERR_PTR(-ENOMEM);
1641
1642 dest_ring->nentries = nentries;
1643 dest_ring->nentries_mask = nentries - 1;
1644
1645
1646
1647
1648
1649 dest_ring->base_addr_owner_space_unaligned =
1650 dma_alloc_coherent(ar->dev,
1651 (nentries * sizeof(struct ce_desc) +
1652 CE_DESC_RING_ALIGN),
1653 &base_addr, GFP_KERNEL);
1654 if (!dest_ring->base_addr_owner_space_unaligned) {
1655 kfree(dest_ring);
1656 return ERR_PTR(-ENOMEM);
1657 }
1658
1659 dest_ring->base_addr_ce_space_unaligned = base_addr;
1660
1661 dest_ring->base_addr_owner_space =
1662 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1663 CE_DESC_RING_ALIGN);
1664 dest_ring->base_addr_ce_space =
1665 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1666 CE_DESC_RING_ALIGN);
1667
1668 return dest_ring;
1669 }
1670
1671 static struct ath10k_ce_ring *
1672 ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1673 const struct ce_attr *attr)
1674 {
1675 struct ath10k_ce_ring *dest_ring;
1676 u32 nentries;
1677 dma_addr_t base_addr;
1678
1679 nentries = roundup_pow_of_two(attr->dest_nentries);
1680
1681 dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
1682 nentries), GFP_KERNEL);
1683 if (!dest_ring)
1684 return ERR_PTR(-ENOMEM);
1685
1686 dest_ring->nentries = nentries;
1687 dest_ring->nentries_mask = nentries - 1;
1688
1689
1690
1691
1692 dest_ring->base_addr_owner_space_unaligned =
1693 dma_alloc_coherent(ar->dev,
1694 (nentries * sizeof(struct ce_desc_64) +
1695 CE_DESC_RING_ALIGN),
1696 &base_addr, GFP_KERNEL);
1697 if (!dest_ring->base_addr_owner_space_unaligned) {
1698 kfree(dest_ring);
1699 return ERR_PTR(-ENOMEM);
1700 }
1701
1702 dest_ring->base_addr_ce_space_unaligned = base_addr;
1703
1704
1705
1706
1707 memset(dest_ring->base_addr_owner_space_unaligned, 0,
1708 nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
1709
1710 dest_ring->base_addr_owner_space =
1711 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1712 CE_DESC_RING_ALIGN);
1713 dest_ring->base_addr_ce_space =
1714 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1715 CE_DESC_RING_ALIGN);
1716
1717 return dest_ring;
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727 int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1728 const struct ce_attr *attr)
1729 {
1730 int ret;
1731
1732 if (attr->src_nentries) {
1733 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1734 if (ret) {
1735 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1736 ce_id, ret);
1737 return ret;
1738 }
1739 }
1740
1741 if (attr->dest_nentries) {
1742 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1743 if (ret) {
1744 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1745 ce_id, ret);
1746 return ret;
1747 }
1748 }
1749
1750 return 0;
1751 }
1752 EXPORT_SYMBOL(ath10k_ce_init_pipe);
1753
1754 static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1755 {
1756 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1757
1758 ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
1759 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1760 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1761 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1762 }
1763
1764 static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1765 {
1766 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1767
1768 ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
1769 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1770 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1771 }
1772
1773 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1774 {
1775 ath10k_ce_deinit_src_ring(ar, ce_id);
1776 ath10k_ce_deinit_dest_ring(ar, ce_id);
1777 }
1778 EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
1779
1780 static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1781 {
1782 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1783 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1784
1785 if (ce_state->src_ring) {
1786 if (ar->hw_params.shadow_reg_support)
1787 kfree(ce_state->src_ring->shadow_base_unaligned);
1788 dma_free_coherent(ar->dev,
1789 (ce_state->src_ring->nentries *
1790 sizeof(struct ce_desc) +
1791 CE_DESC_RING_ALIGN),
1792 ce_state->src_ring->base_addr_owner_space,
1793 ce_state->src_ring->base_addr_ce_space);
1794 kfree(ce_state->src_ring);
1795 }
1796
1797 if (ce_state->dest_ring) {
1798 dma_free_coherent(ar->dev,
1799 (ce_state->dest_ring->nentries *
1800 sizeof(struct ce_desc) +
1801 CE_DESC_RING_ALIGN),
1802 ce_state->dest_ring->base_addr_owner_space,
1803 ce_state->dest_ring->base_addr_ce_space);
1804 kfree(ce_state->dest_ring);
1805 }
1806
1807 ce_state->src_ring = NULL;
1808 ce_state->dest_ring = NULL;
1809 }
1810
1811 static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1812 {
1813 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1814 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1815
1816 if (ce_state->src_ring) {
1817 if (ar->hw_params.shadow_reg_support)
1818 kfree(ce_state->src_ring->shadow_base_unaligned);
1819 dma_free_coherent(ar->dev,
1820 (ce_state->src_ring->nentries *
1821 sizeof(struct ce_desc_64) +
1822 CE_DESC_RING_ALIGN),
1823 ce_state->src_ring->base_addr_owner_space,
1824 ce_state->src_ring->base_addr_ce_space);
1825 kfree(ce_state->src_ring);
1826 }
1827
1828 if (ce_state->dest_ring) {
1829 dma_free_coherent(ar->dev,
1830 (ce_state->dest_ring->nentries *
1831 sizeof(struct ce_desc_64) +
1832 CE_DESC_RING_ALIGN),
1833 ce_state->dest_ring->base_addr_owner_space,
1834 ce_state->dest_ring->base_addr_ce_space);
1835 kfree(ce_state->dest_ring);
1836 }
1837
1838 ce_state->src_ring = NULL;
1839 ce_state->dest_ring = NULL;
1840 }
1841
1842 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1843 {
1844 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1845 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1846
1847 ce_state->ops->ce_free_pipe(ar, ce_id);
1848 }
1849 EXPORT_SYMBOL(ath10k_ce_free_pipe);
1850
1851 void ath10k_ce_dump_registers(struct ath10k *ar,
1852 struct ath10k_fw_crash_data *crash_data)
1853 {
1854 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1855 struct ath10k_ce_crash_data ce_data;
1856 u32 addr, id;
1857
1858 lockdep_assert_held(&ar->dump_mutex);
1859
1860 ath10k_err(ar, "Copy Engine register dump:\n");
1861
1862 spin_lock_bh(&ce->ce_lock);
1863 for (id = 0; id < CE_COUNT; id++) {
1864 addr = ath10k_ce_base_address(ar, id);
1865 ce_data.base_addr = cpu_to_le32(addr);
1866
1867 ce_data.src_wr_idx =
1868 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1869 ce_data.src_r_idx =
1870 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1871 ce_data.dst_wr_idx =
1872 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1873 ce_data.dst_r_idx =
1874 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1875
1876 if (crash_data)
1877 crash_data->ce_crash_data[id] = ce_data;
1878
1879 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1880 le32_to_cpu(ce_data.base_addr),
1881 le32_to_cpu(ce_data.src_wr_idx),
1882 le32_to_cpu(ce_data.src_r_idx),
1883 le32_to_cpu(ce_data.dst_wr_idx),
1884 le32_to_cpu(ce_data.dst_r_idx));
1885 }
1886
1887 spin_unlock_bh(&ce->ce_lock);
1888 }
1889 EXPORT_SYMBOL(ath10k_ce_dump_registers);
1890
1891 static const struct ath10k_ce_ops ce_ops = {
1892 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1893 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1894 .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1895 .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1896 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1897 .ce_extract_desc_data = ath10k_ce_extract_desc_data,
1898 .ce_free_pipe = _ath10k_ce_free_pipe,
1899 .ce_send_nolock = _ath10k_ce_send_nolock,
1900 .ce_set_src_ring_base_addr_hi = NULL,
1901 .ce_set_dest_ring_base_addr_hi = NULL,
1902 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
1903 };
1904
1905 static const struct ath10k_ce_ops ce_64_ops = {
1906 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1907 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1908 .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1909 .ce_completed_recv_next_nolock =
1910 _ath10k_ce_completed_recv_next_nolock_64,
1911 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1912 .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1913 .ce_free_pipe = _ath10k_ce_free_pipe_64,
1914 .ce_send_nolock = _ath10k_ce_send_nolock_64,
1915 .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
1916 .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
1917 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
1918 };
1919
1920 static void ath10k_ce_set_ops(struct ath10k *ar,
1921 struct ath10k_ce_pipe *ce_state)
1922 {
1923 switch (ar->hw_rev) {
1924 case ATH10K_HW_WCN3990:
1925 ce_state->ops = &ce_64_ops;
1926 break;
1927 default:
1928 ce_state->ops = &ce_ops;
1929 break;
1930 }
1931 }
1932
1933 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1934 const struct ce_attr *attr)
1935 {
1936 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1937 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1938 int ret;
1939
1940 ath10k_ce_set_ops(ar, ce_state);
1941
1942
1943
1944
1945
1946 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1947 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1948 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1949 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1950 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1951 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1952
1953 ce_state->ar = ar;
1954 ce_state->id = ce_id;
1955 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1956 ce_state->attr_flags = attr->flags;
1957 ce_state->src_sz_max = attr->src_sz_max;
1958
1959 if (attr->src_nentries)
1960 ce_state->send_cb = attr->send_cb;
1961
1962 if (attr->dest_nentries)
1963 ce_state->recv_cb = attr->recv_cb;
1964
1965 if (attr->src_nentries) {
1966 ce_state->src_ring =
1967 ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1968 if (IS_ERR(ce_state->src_ring)) {
1969 ret = PTR_ERR(ce_state->src_ring);
1970 ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1971 ce_id, ret);
1972 ce_state->src_ring = NULL;
1973 return ret;
1974 }
1975 }
1976
1977 if (attr->dest_nentries) {
1978 ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1979 ce_id,
1980 attr);
1981 if (IS_ERR(ce_state->dest_ring)) {
1982 ret = PTR_ERR(ce_state->dest_ring);
1983 ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1984 ce_id, ret);
1985 ce_state->dest_ring = NULL;
1986 return ret;
1987 }
1988 }
1989
1990 return 0;
1991 }
1992 EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
1993
1994 void ath10k_ce_alloc_rri(struct ath10k *ar)
1995 {
1996 int i;
1997 u32 value;
1998 u32 ctrl1_regs;
1999 u32 ce_base_addr;
2000 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2001
2002 ce->vaddr_rri = dma_alloc_coherent(ar->dev,
2003 (CE_COUNT * sizeof(u32)),
2004 &ce->paddr_rri, GFP_KERNEL);
2005
2006 if (!ce->vaddr_rri)
2007 return;
2008
2009 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
2010 lower_32_bits(ce->paddr_rri));
2011 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
2012 (upper_32_bits(ce->paddr_rri) &
2013 CE_DESC_ADDR_HI_MASK));
2014
2015 for (i = 0; i < CE_COUNT; i++) {
2016 ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
2017 ce_base_addr = ath10k_ce_base_address(ar, i);
2018 value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
2019 value |= ar->hw_ce_regs->upd->mask;
2020 ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
2021 }
2022
2023 memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
2024 }
2025 EXPORT_SYMBOL(ath10k_ce_alloc_rri);
2026
2027 void ath10k_ce_free_rri(struct ath10k *ar)
2028 {
2029 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2030
2031 dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
2032 ce->vaddr_rri,
2033 ce->paddr_rri);
2034 }
2035 EXPORT_SYMBOL(ath10k_ce_free_rri);