Lines Matching refs:wl
33 static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count) in wl1251_tx_double_buffer_busy() argument
37 data_in_count = wl->data_in_count; in wl1251_tx_double_buffer_busy()
54 static int wl1251_tx_path_status(struct wl1251 *wl) in wl1251_tx_path_status() argument
59 addr = wl->data_path->tx_control_addr; in wl1251_tx_path_status()
60 status = wl1251_mem_read32(wl, addr); in wl1251_tx_path_status()
62 busy = wl1251_tx_double_buffer_busy(wl, data_out_count); in wl1251_tx_path_status()
70 static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb) in wl1251_tx_id() argument
75 if (wl->tx_frames[i] == NULL) { in wl1251_tx_id()
76 wl->tx_frames[i] = skb; in wl1251_tx_id()
148 static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb, in wl1251_tx_fill_hdr() argument
159 id = wl1251_tx_id(wl, skb); in wl1251_tx_fill_hdr()
168 rate = ieee80211_get_tx_rate(wl->hw, control); in wl1251_tx_fill_hdr()
182 static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, in wl1251_tx_send_packet() argument
233 wl->tx_frames[tx_hdr->id] = skb = newskb; in wl1251_tx_send_packet()
251 if (wl->data_in_count & 0x1) in wl1251_tx_send_packet()
252 addr = wl->data_path->tx_packet_ring_addr + in wl1251_tx_send_packet()
253 wl->data_path->tx_packet_ring_chunk_size; in wl1251_tx_send_packet()
255 addr = wl->data_path->tx_packet_ring_addr; in wl1251_tx_send_packet()
257 wl1251_mem_write(wl, addr, skb->data, len); in wl1251_tx_send_packet()
266 static void wl1251_tx_trigger(struct wl1251 *wl) in wl1251_tx_trigger() argument
270 if (wl->data_in_count & 0x1) { in wl1251_tx_trigger()
278 wl1251_reg_write32(wl, addr, data); in wl1251_tx_trigger()
281 wl->data_in_count = (wl->data_in_count + 1) & in wl1251_tx_trigger()
285 static void enable_tx_for_packet_injection(struct wl1251 *wl) in enable_tx_for_packet_injection() argument
289 ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel, in enable_tx_for_packet_injection()
290 wl->beacon_int, wl->dtim_period); in enable_tx_for_packet_injection()
296 ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100); in enable_tx_for_packet_injection()
302 wl->joined = true; in enable_tx_for_packet_injection()
306 static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb) in wl1251_tx_frame() argument
315 if (unlikely(wl->monitor_present)) in wl1251_tx_frame()
319 if (unlikely(wl->default_key != idx)) { in wl1251_tx_frame()
320 ret = wl1251_acx_default_key(wl, idx); in wl1251_tx_frame()
327 if ((wl->vif == NULL) && !wl->joined) in wl1251_tx_frame()
328 enable_tx_for_packet_injection(wl); in wl1251_tx_frame()
330 ret = wl1251_tx_path_status(wl); in wl1251_tx_frame()
334 ret = wl1251_tx_fill_hdr(wl, skb, info); in wl1251_tx_frame()
338 ret = wl1251_tx_send_packet(wl, skb, info); in wl1251_tx_frame()
342 wl1251_tx_trigger(wl); in wl1251_tx_frame()
349 struct wl1251 *wl = container_of(work, struct wl1251, tx_work); in wl1251_tx_work() local
354 mutex_lock(&wl->mutex); in wl1251_tx_work()
356 if (unlikely(wl->state == WL1251_STATE_OFF)) in wl1251_tx_work()
359 while ((skb = skb_dequeue(&wl->tx_queue))) { in wl1251_tx_work()
361 ret = wl1251_ps_elp_wakeup(wl); in wl1251_tx_work()
367 ret = wl1251_tx_frame(wl, skb); in wl1251_tx_work()
369 skb_queue_head(&wl->tx_queue, skb); in wl1251_tx_work()
379 wl1251_ps_elp_sleep(wl); in wl1251_tx_work()
381 mutex_unlock(&wl->mutex); in wl1251_tx_work()
412 static void wl1251_tx_packet_cb(struct wl1251 *wl, in wl1251_tx_packet_cb() argument
420 skb = wl->tx_frames[result->id]; in wl1251_tx_packet_cb()
434 wl->stats.retry_count += result->ack_failures; in wl1251_tx_packet_cb()
454 ieee80211_tx_status(wl->hw, skb); in wl1251_tx_packet_cb()
456 wl->tx_frames[result->id] = NULL; in wl1251_tx_packet_cb()
460 void wl1251_tx_complete(struct wl1251 *wl) in wl1251_tx_complete() argument
466 if (unlikely(wl->state != WL1251_STATE_ON)) in wl1251_tx_complete()
470 wl1251_mem_read(wl, wl->data_path->tx_complete_addr, in wl1251_tx_complete()
473 result_index = wl->next_tx_complete; in wl1251_tx_complete()
480 wl1251_tx_packet_cb(wl, result_ptr); in wl1251_tx_complete()
493 queue_len = skb_queue_len(&wl->tx_queue); in wl1251_tx_complete()
498 ieee80211_queue_work(wl->hw, &wl->tx_work); in wl1251_tx_complete()
501 if (wl->tx_queue_stopped && in wl1251_tx_complete()
505 spin_lock_irqsave(&wl->wl_lock, flags); in wl1251_tx_complete()
506 ieee80211_wake_queues(wl->hw); in wl1251_tx_complete()
507 wl->tx_queue_stopped = false; in wl1251_tx_complete()
508 spin_unlock_irqrestore(&wl->wl_lock, flags); in wl1251_tx_complete()
517 if (result_index > wl->next_tx_complete) { in wl1251_tx_complete()
519 wl1251_mem_write(wl, in wl1251_tx_complete()
520 wl->data_path->tx_complete_addr + in wl1251_tx_complete()
521 (wl->next_tx_complete * in wl1251_tx_complete()
523 &result[wl->next_tx_complete], in wl1251_tx_complete()
528 } else if (result_index < wl->next_tx_complete) { in wl1251_tx_complete()
530 wl1251_mem_write(wl, in wl1251_tx_complete()
531 wl->data_path->tx_complete_addr + in wl1251_tx_complete()
532 (wl->next_tx_complete * in wl1251_tx_complete()
534 &result[wl->next_tx_complete], in wl1251_tx_complete()
536 wl->next_tx_complete) * in wl1251_tx_complete()
539 wl1251_mem_write(wl, in wl1251_tx_complete()
540 wl->data_path->tx_complete_addr, in wl1251_tx_complete()
544 wl->next_tx_complete) * in wl1251_tx_complete()
549 wl1251_mem_write(wl, in wl1251_tx_complete()
550 wl->data_path->tx_complete_addr, in wl1251_tx_complete()
558 wl->next_tx_complete = result_index; in wl1251_tx_complete()
562 void wl1251_tx_flush(struct wl1251 *wl) in wl1251_tx_flush() argument
571 while ((skb = skb_dequeue(&wl->tx_queue))) { in wl1251_tx_flush()
579 ieee80211_tx_status(wl->hw, skb); in wl1251_tx_flush()
583 if (wl->tx_frames[i] != NULL) { in wl1251_tx_flush()
584 skb = wl->tx_frames[i]; in wl1251_tx_flush()
590 ieee80211_tx_status(wl->hw, skb); in wl1251_tx_flush()
591 wl->tx_frames[i] = NULL; in wl1251_tx_flush()