root/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rt2x00queue_alloc_rxskb
  2. rt2x00queue_map_txskb
  3. rt2x00queue_unmap_skb
  4. rt2x00queue_free_skb
  5. rt2x00queue_align_frame
  6. rt2x00queue_insert_l2pad
  7. rt2x00queue_remove_l2pad
  8. rt2x00queue_create_tx_descriptor_seq
  9. rt2x00queue_create_tx_descriptor_plcp
  10. rt2x00queue_create_tx_descriptor_ht
  11. rt2x00queue_create_tx_descriptor
  12. rt2x00queue_write_tx_data
  13. rt2x00queue_write_tx_descriptor
  14. rt2x00queue_kick_tx_queue
  15. rt2x00queue_bar_check
  16. rt2x00queue_write_tx_frame
  17. rt2x00queue_clear_beacon
  18. rt2x00queue_update_beacon
  19. rt2x00queue_for_each_entry
  20. rt2x00queue_get_entry
  21. rt2x00queue_index_inc
  22. rt2x00queue_pause_queue_nocheck
  23. rt2x00queue_pause_queue
  24. rt2x00queue_unpause_queue
  25. rt2x00queue_start_queue
  26. rt2x00queue_stop_queue
  27. rt2x00queue_flush_queue
  28. rt2x00queue_start_queues
  29. rt2x00queue_stop_queues
  30. rt2x00queue_flush_queues
  31. rt2x00queue_reset
  32. rt2x00queue_init_queues
  33. rt2x00queue_alloc_entries
  34. rt2x00queue_free_skbs
  35. rt2x00queue_alloc_rxskbs
  36. rt2x00queue_initialize
  37. rt2x00queue_uninitialize
  38. rt2x00queue_init
  39. rt2x00queue_allocate
  40. rt2x00queue_free

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3         Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
   4         Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
   5         Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
   6         <http://rt2x00.serialmonkey.com>
   7 
   8  */
   9 
  10 /*
  11         Module: rt2x00lib
  12         Abstract: rt2x00 queue specific routines.
  13  */
  14 
  15 #include <linux/slab.h>
  16 #include <linux/kernel.h>
  17 #include <linux/module.h>
  18 #include <linux/dma-mapping.h>
  19 
  20 #include "rt2x00.h"
  21 #include "rt2x00lib.h"
  22 
  23 struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
  24 {
  25         struct data_queue *queue = entry->queue;
  26         struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
  27         struct sk_buff *skb;
  28         struct skb_frame_desc *skbdesc;
  29         unsigned int frame_size;
  30         unsigned int head_size = 0;
  31         unsigned int tail_size = 0;
  32 
  33         /*
  34          * The frame size includes descriptor size, because the
  35          * hardware directly receive the frame into the skbuffer.
  36          */
  37         frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
  38 
  39         /*
  40          * The payload should be aligned to a 4-byte boundary,
  41          * this means we need at least 3 bytes for moving the frame
  42          * into the correct offset.
  43          */
  44         head_size = 4;
  45 
  46         /*
  47          * For IV/EIV/ICV assembly we must make sure there is
  48          * at least 8 bytes bytes available in headroom for IV/EIV
  49          * and 8 bytes for ICV data as tailroon.
  50          */
  51         if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
  52                 head_size += 8;
  53                 tail_size += 8;
  54         }
  55 
  56         /*
  57          * Allocate skbuffer.
  58          */
  59         skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
  60         if (!skb)
  61                 return NULL;
  62 
  63         /*
  64          * Make sure we not have a frame with the requested bytes
  65          * available in the head and tail.
  66          */
  67         skb_reserve(skb, head_size);
  68         skb_put(skb, frame_size);
  69 
  70         /*
  71          * Populate skbdesc.
  72          */
  73         skbdesc = get_skb_frame_desc(skb);
  74         memset(skbdesc, 0, sizeof(*skbdesc));
  75 
  76         if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
  77                 dma_addr_t skb_dma;
  78 
  79                 skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
  80                                          DMA_FROM_DEVICE);
  81                 if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
  82                         dev_kfree_skb_any(skb);
  83                         return NULL;
  84                 }
  85 
  86                 skbdesc->skb_dma = skb_dma;
  87                 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
  88         }
  89 
  90         return skb;
  91 }
  92 
  93 int rt2x00queue_map_txskb(struct queue_entry *entry)
  94 {
  95         struct device *dev = entry->queue->rt2x00dev->dev;
  96         struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
  97 
  98         skbdesc->skb_dma =
  99             dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
 100 
 101         if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
 102                 return -ENOMEM;
 103 
 104         skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
 105         rt2x00lib_dmadone(entry);
 106         return 0;
 107 }
 108 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
 109 
 110 void rt2x00queue_unmap_skb(struct queue_entry *entry)
 111 {
 112         struct device *dev = entry->queue->rt2x00dev->dev;
 113         struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
 114 
 115         if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
 116                 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 117                                  DMA_FROM_DEVICE);
 118                 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
 119         } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
 120                 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
 121                                  DMA_TO_DEVICE);
 122                 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
 123         }
 124 }
 125 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
 126 
 127 void rt2x00queue_free_skb(struct queue_entry *entry)
 128 {
 129         if (!entry->skb)
 130                 return;
 131 
 132         rt2x00queue_unmap_skb(entry);
 133         dev_kfree_skb_any(entry->skb);
 134         entry->skb = NULL;
 135 }
 136 
 137 void rt2x00queue_align_frame(struct sk_buff *skb)
 138 {
 139         unsigned int frame_length = skb->len;
 140         unsigned int align = ALIGN_SIZE(skb, 0);
 141 
 142         if (!align)
 143                 return;
 144 
 145         skb_push(skb, align);
 146         memmove(skb->data, skb->data + align, frame_length);
 147         skb_trim(skb, frame_length);
 148 }
 149 
 150 /*
 151  * H/W needs L2 padding between the header and the paylod if header size
 152  * is not 4 bytes aligned.
 153  */
 154 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
 155 {
 156         unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
 157 
 158         if (!l2pad)
 159                 return;
 160 
 161         skb_push(skb, l2pad);
 162         memmove(skb->data, skb->data + l2pad, hdr_len);
 163 }
 164 
 165 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
 166 {
 167         unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
 168 
 169         if (!l2pad)
 170                 return;
 171 
 172         memmove(skb->data + l2pad, skb->data, hdr_len);
 173         skb_pull(skb, l2pad);
 174 }
 175 
 176 static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
 177                                                  struct sk_buff *skb,
 178                                                  struct txentry_desc *txdesc)
 179 {
 180         struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 181         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 182         struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
 183         u16 seqno;
 184 
 185         if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 186                 return;
 187 
 188         __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 189 
 190         if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
 191                 /*
 192                  * rt2800 has a H/W (or F/W) bug, device incorrectly increase
 193                  * seqno on retransmitted data (non-QOS) and management frames.
 194                  * To workaround the problem let's generate seqno in software.
 195                  * Except for beacons which are transmitted periodically by H/W
 196                  * hence hardware has to assign seqno for them.
 197                  */
 198                 if (ieee80211_is_beacon(hdr->frame_control)) {
 199                         __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 200                         /* H/W will generate sequence number */
 201                         return;
 202                 }
 203 
 204                 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
 205         }
 206 
 207         /*
 208          * The hardware is not able to insert a sequence number. Assign a
 209          * software generated one here.
 210          *
 211          * This is wrong because beacons are not getting sequence
 212          * numbers assigned properly.
 213          *
 214          * A secondary problem exists for drivers that cannot toggle
 215          * sequence counting per-frame, since those will override the
 216          * sequence counter given by mac80211.
 217          */
 218         if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 219                 seqno = atomic_add_return(0x10, &intf->seqno);
 220         else
 221                 seqno = atomic_read(&intf->seqno);
 222 
 223         hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
 224         hdr->seq_ctrl |= cpu_to_le16(seqno);
 225 }
 226 
 227 static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
 228                                                   struct sk_buff *skb,
 229                                                   struct txentry_desc *txdesc,
 230                                                   const struct rt2x00_rate *hwrate)
 231 {
 232         struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 233         struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 234         unsigned int data_length;
 235         unsigned int duration;
 236         unsigned int residual;
 237 
 238         /*
 239          * Determine with what IFS priority this frame should be send.
 240          * Set ifs to IFS_SIFS when the this is not the first fragment,
 241          * or this fragment came after RTS/CTS.
 242          */
 243         if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
 244                 txdesc->u.plcp.ifs = IFS_BACKOFF;
 245         else
 246                 txdesc->u.plcp.ifs = IFS_SIFS;
 247 
 248         /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
 249         data_length = skb->len + 4;
 250         data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
 251 
 252         /*
 253          * PLCP setup
 254          * Length calculation depends on OFDM/CCK rate.
 255          */
 256         txdesc->u.plcp.signal = hwrate->plcp;
 257         txdesc->u.plcp.service = 0x04;
 258 
 259         if (hwrate->flags & DEV_RATE_OFDM) {
 260                 txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
 261                 txdesc->u.plcp.length_low = data_length & 0x3f;
 262         } else {
 263                 /*
 264                  * Convert length to microseconds.
 265                  */
 266                 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
 267                 duration = GET_DURATION(data_length, hwrate->bitrate);
 268 
 269                 if (residual != 0) {
 270                         duration++;
 271 
 272                         /*
 273                          * Check if we need to set the Length Extension
 274                          */
 275                         if (hwrate->bitrate == 110 && residual <= 30)
 276                                 txdesc->u.plcp.service |= 0x80;
 277                 }
 278 
 279                 txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
 280                 txdesc->u.plcp.length_low = duration & 0xff;
 281 
 282                 /*
 283                  * When preamble is enabled we should set the
 284                  * preamble bit for the signal.
 285                  */
 286                 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 287                         txdesc->u.plcp.signal |= 0x08;
 288         }
 289 }
 290 
 291 static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
 292                                                 struct sk_buff *skb,
 293                                                 struct txentry_desc *txdesc,
 294                                                 struct ieee80211_sta *sta,
 295                                                 const struct rt2x00_rate *hwrate)
 296 {
 297         struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 298         struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 299         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 300         struct rt2x00_sta *sta_priv = NULL;
 301         u8 density = 0;
 302 
 303         if (sta) {
 304                 sta_priv = sta_to_rt2x00_sta(sta);
 305                 txdesc->u.ht.wcid = sta_priv->wcid;
 306                 density = sta->ht_cap.ampdu_density;
 307         }
 308 
 309         /*
 310          * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
 311          * mcs rate to be used
 312          */
 313         if (txrate->flags & IEEE80211_TX_RC_MCS) {
 314                 txdesc->u.ht.mcs = txrate->idx;
 315 
 316                 /*
 317                  * MIMO PS should be set to 1 for STA's using dynamic SM PS
 318                  * when using more then one tx stream (>MCS7).
 319                  */
 320                 if (sta && txdesc->u.ht.mcs > 7 &&
 321                     sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
 322                         __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
 323         } else {
 324                 txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
 325                 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
 326                         txdesc->u.ht.mcs |= 0x08;
 327         }
 328 
 329         if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
 330                 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 331                         txdesc->u.ht.txop = TXOP_SIFS;
 332                 else
 333                         txdesc->u.ht.txop = TXOP_BACKOFF;
 334 
 335                 /* Left zero on all other settings. */
 336                 return;
 337         }
 338 
 339         /*
 340          * Only one STBC stream is supported for now.
 341          */
 342         if (tx_info->flags & IEEE80211_TX_CTL_STBC)
 343                 txdesc->u.ht.stbc = 1;
 344 
 345         /*
 346          * This frame is eligible for an AMPDU, however, don't aggregate
 347          * frames that are intended to probe a specific tx rate.
 348          */
 349         if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
 350             !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
 351                 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
 352                 txdesc->u.ht.mpdu_density = density;
 353                 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
 354         }
 355 
 356         /*
 357          * Set 40Mhz mode if necessary (for legacy rates this will
 358          * duplicate the frame to both channels).
 359          */
 360         if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
 361             txrate->flags & IEEE80211_TX_RC_DUP_DATA)
 362                 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
 363         if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
 364                 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
 365 
 366         /*
 367          * Determine IFS values
 368          * - Use TXOP_BACKOFF for management frames except beacons
 369          * - Use TXOP_SIFS for fragment bursts
 370          * - Use TXOP_HTTXOP for everything else
 371          *
 372          * Note: rt2800 devices won't use CTS protection (if used)
 373          * for frames not transmitted with TXOP_HTTXOP
 374          */
 375         if (ieee80211_is_mgmt(hdr->frame_control) &&
 376             !ieee80211_is_beacon(hdr->frame_control))
 377                 txdesc->u.ht.txop = TXOP_BACKOFF;
 378         else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
 379                 txdesc->u.ht.txop = TXOP_SIFS;
 380         else
 381                 txdesc->u.ht.txop = TXOP_HTTXOP;
 382 }
 383 
 384 static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
 385                                              struct sk_buff *skb,
 386                                              struct txentry_desc *txdesc,
 387                                              struct ieee80211_sta *sta)
 388 {
 389         struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 390         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 391         struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
 392         struct ieee80211_rate *rate;
 393         const struct rt2x00_rate *hwrate = NULL;
 394 
 395         memset(txdesc, 0, sizeof(*txdesc));
 396 
 397         /*
 398          * Header and frame information.
 399          */
 400         txdesc->length = skb->len;
 401         txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
 402 
 403         /*
 404          * Check whether this frame is to be acked.
 405          */
 406         if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
 407                 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
 408 
 409         /*
 410          * Check if this is a RTS/CTS frame
 411          */
 412         if (ieee80211_is_rts(hdr->frame_control) ||
 413             ieee80211_is_cts(hdr->frame_control)) {
 414                 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 415                 if (ieee80211_is_rts(hdr->frame_control))
 416                         __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
 417                 else
 418                         __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
 419                 if (tx_info->control.rts_cts_rate_idx >= 0)
 420                         rate =
 421                             ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
 422         }
 423 
 424         /*
 425          * Determine retry information.
 426          */
 427         txdesc->retry_limit = tx_info->control.rates[0].count - 1;
 428         if (txdesc->retry_limit >= rt2x00dev->long_retry)
 429                 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
 430 
 431         /*
 432          * Check if more fragments are pending
 433          */
 434         if (ieee80211_has_morefrags(hdr->frame_control)) {
 435                 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 436                 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
 437         }
 438 
 439         /*
 440          * Check if more frames (!= fragments) are pending
 441          */
 442         if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
 443                 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
 444 
 445         /*
 446          * Beacons and probe responses require the tsf timestamp
 447          * to be inserted into the frame.
 448          */
 449         if (ieee80211_is_beacon(hdr->frame_control) ||
 450             ieee80211_is_probe_resp(hdr->frame_control))
 451                 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
 452 
 453         if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
 454             !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
 455                 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
 456 
 457         /*
 458          * Determine rate modulation.
 459          */
 460         if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
 461                 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
 462         else if (txrate->flags & IEEE80211_TX_RC_MCS)
 463                 txdesc->rate_mode = RATE_MODE_HT_MIX;
 464         else {
 465                 rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
 466                 hwrate = rt2x00_get_rate(rate->hw_value);
 467                 if (hwrate->flags & DEV_RATE_OFDM)
 468                         txdesc->rate_mode = RATE_MODE_OFDM;
 469                 else
 470                         txdesc->rate_mode = RATE_MODE_CCK;
 471         }
 472 
 473         /*
 474          * Apply TX descriptor handling by components
 475          */
 476         rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
 477         rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
 478 
 479         if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
 480                 rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
 481                                                    sta, hwrate);
 482         else
 483                 rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
 484                                                       hwrate);
 485 }
 486 
 487 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
 488                                      struct txentry_desc *txdesc)
 489 {
 490         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 491 
 492         /*
 493          * This should not happen, we already checked the entry
 494          * was ours. When the hardware disagrees there has been
 495          * a queue corruption!
 496          */
 497         if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
 498                      rt2x00dev->ops->lib->get_entry_state(entry))) {
 499                 rt2x00_err(rt2x00dev,
 500                            "Corrupt queue %d, accessing entry which is not ours\n"
 501                            "Please file bug report to %s\n",
 502                            entry->queue->qid, DRV_PROJECT);
 503                 return -EINVAL;
 504         }
 505 
 506         /*
 507          * Add the requested extra tx headroom in front of the skb.
 508          */
 509         skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
 510         memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
 511 
 512         /*
 513          * Call the driver's write_tx_data function, if it exists.
 514          */
 515         if (rt2x00dev->ops->lib->write_tx_data)
 516                 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
 517 
 518         /*
 519          * Map the skb to DMA.
 520          */
 521         if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
 522             rt2x00queue_map_txskb(entry))
 523                 return -ENOMEM;
 524 
 525         return 0;
 526 }
 527 
 528 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
 529                                             struct txentry_desc *txdesc)
 530 {
 531         struct data_queue *queue = entry->queue;
 532 
 533         queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
 534 
 535         /*
 536          * All processing on the frame has been completed, this means
 537          * it is now ready to be dumped to userspace through debugfs.
 538          */
 539         rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
 540 }
 541 
 542 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
 543                                       struct txentry_desc *txdesc)
 544 {
 545         /*
 546          * Check if we need to kick the queue, there are however a few rules
 547          *      1) Don't kick unless this is the last in frame in a burst.
 548          *         When the burst flag is set, this frame is always followed
 549          *         by another frame which in some way are related to eachother.
 550          *         This is true for fragments, RTS or CTS-to-self frames.
 551          *      2) Rule 1 can be broken when the available entries
 552          *         in the queue are less then a certain threshold.
 553          */
 554         if (rt2x00queue_threshold(queue) ||
 555             !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
 556                 queue->rt2x00dev->ops->lib->kick_queue(queue);
 557 }
 558 
 559 static void rt2x00queue_bar_check(struct queue_entry *entry)
 560 {
 561         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
 562         struct ieee80211_bar *bar = (void *) (entry->skb->data +
 563                                     rt2x00dev->extra_tx_headroom);
 564         struct rt2x00_bar_list_entry *bar_entry;
 565 
 566         if (likely(!ieee80211_is_back_req(bar->frame_control)))
 567                 return;
 568 
 569         bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
 570 
 571         /*
 572          * If the alloc fails we still send the BAR out but just don't track
 573          * it in our bar list. And as a result we will report it to mac80211
 574          * back as failed.
 575          */
 576         if (!bar_entry)
 577                 return;
 578 
 579         bar_entry->entry = entry;
 580         bar_entry->block_acked = 0;
 581 
 582         /*
 583          * Copy the relevant parts of the 802.11 BAR into out check list
 584          * such that we can use RCU for less-overhead in the RX path since
 585          * sending BARs and processing the according BlockAck should be
 586          * the exception.
 587          */
 588         memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
 589         memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
 590         bar_entry->control = bar->control;
 591         bar_entry->start_seq_num = bar->start_seq_num;
 592 
 593         /*
 594          * Insert BAR into our BAR check list.
 595          */
 596         spin_lock_bh(&rt2x00dev->bar_list_lock);
 597         list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
 598         spin_unlock_bh(&rt2x00dev->bar_list_lock);
 599 }
 600 
 601 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
 602                                struct ieee80211_sta *sta, bool local)
 603 {
 604         struct ieee80211_tx_info *tx_info;
 605         struct queue_entry *entry;
 606         struct txentry_desc txdesc;
 607         struct skb_frame_desc *skbdesc;
 608         u8 rate_idx, rate_flags;
 609         int ret = 0;
 610 
 611         /*
 612          * Copy all TX descriptor information into txdesc,
 613          * after that we are free to use the skb->cb array
 614          * for our information.
 615          */
 616         rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
 617 
 618         /*
 619          * All information is retrieved from the skb->cb array,
 620          * now we should claim ownership of the driver part of that
 621          * array, preserving the bitrate index and flags.
 622          */
 623         tx_info = IEEE80211_SKB_CB(skb);
 624         rate_idx = tx_info->control.rates[0].idx;
 625         rate_flags = tx_info->control.rates[0].flags;
 626         skbdesc = get_skb_frame_desc(skb);
 627         memset(skbdesc, 0, sizeof(*skbdesc));
 628         skbdesc->tx_rate_idx = rate_idx;
 629         skbdesc->tx_rate_flags = rate_flags;
 630 
 631         if (local)
 632                 skbdesc->flags |= SKBDESC_NOT_MAC80211;
 633 
 634         /*
 635          * When hardware encryption is supported, and this frame
 636          * is to be encrypted, we should strip the IV/EIV data from
 637          * the frame so we can provide it to the driver separately.
 638          */
 639         if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
 640             !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
 641                 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
 642                         rt2x00crypto_tx_copy_iv(skb, &txdesc);
 643                 else
 644                         rt2x00crypto_tx_remove_iv(skb, &txdesc);
 645         }
 646 
 647         /*
 648          * When DMA allocation is required we should guarantee to the
 649          * driver that the DMA is aligned to a 4-byte boundary.
 650          * However some drivers require L2 padding to pad the payload
 651          * rather then the header. This could be a requirement for
 652          * PCI and USB devices, while header alignment only is valid
 653          * for PCI devices.
 654          */
 655         if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
 656                 rt2x00queue_insert_l2pad(skb, txdesc.header_length);
 657         else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
 658                 rt2x00queue_align_frame(skb);
 659 
 660         /*
 661          * That function must be called with bh disabled.
 662          */
 663         spin_lock(&queue->tx_lock);
 664 
 665         if (unlikely(rt2x00queue_full(queue))) {
 666                 rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
 667                            queue->qid);
 668                 ret = -ENOBUFS;
 669                 goto out;
 670         }
 671 
 672         entry = rt2x00queue_get_entry(queue, Q_INDEX);
 673 
 674         if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
 675                                       &entry->flags))) {
 676                 rt2x00_err(queue->rt2x00dev,
 677                            "Arrived at non-free entry in the non-full queue %d\n"
 678                            "Please file bug report to %s\n",
 679                            queue->qid, DRV_PROJECT);
 680                 ret = -EINVAL;
 681                 goto out;
 682         }
 683 
 684         entry->skb = skb;
 685 
 686         /*
 687          * It could be possible that the queue was corrupted and this
 688          * call failed. Since we always return NETDEV_TX_OK to mac80211,
 689          * this frame will simply be dropped.
 690          */
 691         if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
 692                 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
 693                 entry->skb = NULL;
 694                 ret = -EIO;
 695                 goto out;
 696         }
 697 
 698         /*
 699          * Put BlockAckReqs into our check list for driver BA processing.
 700          */
 701         rt2x00queue_bar_check(entry);
 702 
 703         set_bit(ENTRY_DATA_PENDING, &entry->flags);
 704 
 705         rt2x00queue_index_inc(entry, Q_INDEX);
 706         rt2x00queue_write_tx_descriptor(entry, &txdesc);
 707         rt2x00queue_kick_tx_queue(queue, &txdesc);
 708 
 709 out:
 710         /*
 711          * Pausing queue has to be serialized with rt2x00lib_txdone(), so we
 712          * do this under queue->tx_lock. Bottom halve was already disabled
 713          * before ieee80211_xmit() call.
 714          */
 715         if (rt2x00queue_threshold(queue))
 716                 rt2x00queue_pause_queue(queue);
 717 
 718         spin_unlock(&queue->tx_lock);
 719         return ret;
 720 }
 721 
 722 int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
 723                              struct ieee80211_vif *vif)
 724 {
 725         struct rt2x00_intf *intf = vif_to_intf(vif);
 726 
 727         if (unlikely(!intf->beacon))
 728                 return -ENOBUFS;
 729 
 730         /*
 731          * Clean up the beacon skb.
 732          */
 733         rt2x00queue_free_skb(intf->beacon);
 734 
 735         /*
 736          * Clear beacon (single bssid devices don't need to clear the beacon
 737          * since the beacon queue will get stopped anyway).
 738          */
 739         if (rt2x00dev->ops->lib->clear_beacon)
 740                 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
 741 
 742         return 0;
 743 }
 744 
 745 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
 746                               struct ieee80211_vif *vif)
 747 {
 748         struct rt2x00_intf *intf = vif_to_intf(vif);
 749         struct skb_frame_desc *skbdesc;
 750         struct txentry_desc txdesc;
 751 
 752         if (unlikely(!intf->beacon))
 753                 return -ENOBUFS;
 754 
 755         /*
 756          * Clean up the beacon skb.
 757          */
 758         rt2x00queue_free_skb(intf->beacon);
 759 
 760         intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
 761         if (!intf->beacon->skb)
 762                 return -ENOMEM;
 763 
 764         /*
 765          * Copy all TX descriptor information into txdesc,
 766          * after that we are free to use the skb->cb array
 767          * for our information.
 768          */
 769         rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
 770 
 771         /*
 772          * Fill in skb descriptor
 773          */
 774         skbdesc = get_skb_frame_desc(intf->beacon->skb);
 775         memset(skbdesc, 0, sizeof(*skbdesc));
 776 
 777         /*
 778          * Send beacon to hardware.
 779          */
 780         rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
 781 
 782         return 0;
 783 
 784 }
 785 
 786 bool rt2x00queue_for_each_entry(struct data_queue *queue,
 787                                 enum queue_index start,
 788                                 enum queue_index end,
 789                                 void *data,
 790                                 bool (*fn)(struct queue_entry *entry,
 791                                            void *data))
 792 {
 793         unsigned long irqflags;
 794         unsigned int index_start;
 795         unsigned int index_end;
 796         unsigned int i;
 797 
 798         if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
 799                 rt2x00_err(queue->rt2x00dev,
 800                            "Entry requested from invalid index range (%d - %d)\n",
 801                            start, end);
 802                 return true;
 803         }
 804 
 805         /*
 806          * Only protect the range we are going to loop over,
 807          * if during our loop a extra entry is set to pending
 808          * it should not be kicked during this run, since it
 809          * is part of another TX operation.
 810          */
 811         spin_lock_irqsave(&queue->index_lock, irqflags);
 812         index_start = queue->index[start];
 813         index_end = queue->index[end];
 814         spin_unlock_irqrestore(&queue->index_lock, irqflags);
 815 
 816         /*
 817          * Start from the TX done pointer, this guarantees that we will
 818          * send out all frames in the correct order.
 819          */
 820         if (index_start < index_end) {
 821                 for (i = index_start; i < index_end; i++) {
 822                         if (fn(&queue->entries[i], data))
 823                                 return true;
 824                 }
 825         } else {
 826                 for (i = index_start; i < queue->limit; i++) {
 827                         if (fn(&queue->entries[i], data))
 828                                 return true;
 829                 }
 830 
 831                 for (i = 0; i < index_end; i++) {
 832                         if (fn(&queue->entries[i], data))
 833                                 return true;
 834                 }
 835         }
 836 
 837         return false;
 838 }
 839 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
 840 
 841 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
 842                                           enum queue_index index)
 843 {
 844         struct queue_entry *entry;
 845         unsigned long irqflags;
 846 
 847         if (unlikely(index >= Q_INDEX_MAX)) {
 848                 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
 849                            index);
 850                 return NULL;
 851         }
 852 
 853         spin_lock_irqsave(&queue->index_lock, irqflags);
 854 
 855         entry = &queue->entries[queue->index[index]];
 856 
 857         spin_unlock_irqrestore(&queue->index_lock, irqflags);
 858 
 859         return entry;
 860 }
 861 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
 862 
 863 void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
 864 {
 865         struct data_queue *queue = entry->queue;
 866         unsigned long irqflags;
 867 
 868         if (unlikely(index >= Q_INDEX_MAX)) {
 869                 rt2x00_err(queue->rt2x00dev,
 870                            "Index change on invalid index type (%d)\n", index);
 871                 return;
 872         }
 873 
 874         spin_lock_irqsave(&queue->index_lock, irqflags);
 875 
 876         queue->index[index]++;
 877         if (queue->index[index] >= queue->limit)
 878                 queue->index[index] = 0;
 879 
 880         entry->last_action = jiffies;
 881 
 882         if (index == Q_INDEX) {
 883                 queue->length++;
 884         } else if (index == Q_INDEX_DONE) {
 885                 queue->length--;
 886                 queue->count++;
 887         }
 888 
 889         spin_unlock_irqrestore(&queue->index_lock, irqflags);
 890 }
 891 
 892 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
 893 {
 894         switch (queue->qid) {
 895         case QID_AC_VO:
 896         case QID_AC_VI:
 897         case QID_AC_BE:
 898         case QID_AC_BK:
 899                 /*
 900                  * For TX queues, we have to disable the queue
 901                  * inside mac80211.
 902                  */
 903                 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
 904                 break;
 905         default:
 906                 break;
 907         }
 908 }
 909 void rt2x00queue_pause_queue(struct data_queue *queue)
 910 {
 911         if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 912             !test_bit(QUEUE_STARTED, &queue->flags) ||
 913             test_and_set_bit(QUEUE_PAUSED, &queue->flags))
 914                 return;
 915 
 916         rt2x00queue_pause_queue_nocheck(queue);
 917 }
 918 EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
 919 
 920 void rt2x00queue_unpause_queue(struct data_queue *queue)
 921 {
 922         if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 923             !test_bit(QUEUE_STARTED, &queue->flags) ||
 924             !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
 925                 return;
 926 
 927         switch (queue->qid) {
 928         case QID_AC_VO:
 929         case QID_AC_VI:
 930         case QID_AC_BE:
 931         case QID_AC_BK:
 932                 /*
 933                  * For TX queues, we have to enable the queue
 934                  * inside mac80211.
 935                  */
 936                 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
 937                 break;
 938         case QID_RX:
 939                 /*
 940                  * For RX we need to kick the queue now in order to
 941                  * receive frames.
 942                  */
 943                 queue->rt2x00dev->ops->lib->kick_queue(queue);
 944         default:
 945                 break;
 946         }
 947 }
 948 EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
 949 
 950 void rt2x00queue_start_queue(struct data_queue *queue)
 951 {
 952         mutex_lock(&queue->status_lock);
 953 
 954         if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
 955             test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
 956                 mutex_unlock(&queue->status_lock);
 957                 return;
 958         }
 959 
 960         set_bit(QUEUE_PAUSED, &queue->flags);
 961 
 962         queue->rt2x00dev->ops->lib->start_queue(queue);
 963 
 964         rt2x00queue_unpause_queue(queue);
 965 
 966         mutex_unlock(&queue->status_lock);
 967 }
 968 EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
 969 
 970 void rt2x00queue_stop_queue(struct data_queue *queue)
 971 {
 972         mutex_lock(&queue->status_lock);
 973 
 974         if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
 975                 mutex_unlock(&queue->status_lock);
 976                 return;
 977         }
 978 
 979         rt2x00queue_pause_queue_nocheck(queue);
 980 
 981         queue->rt2x00dev->ops->lib->stop_queue(queue);
 982 
 983         mutex_unlock(&queue->status_lock);
 984 }
 985 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
 986 
 987 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
 988 {
 989         bool tx_queue =
 990                 (queue->qid == QID_AC_VO) ||
 991                 (queue->qid == QID_AC_VI) ||
 992                 (queue->qid == QID_AC_BE) ||
 993                 (queue->qid == QID_AC_BK);
 994 
 995         if (rt2x00queue_empty(queue))
 996                 return;
 997 
 998         /*
 999          * If we are not supposed to drop any pending
1000          * frames, this means we must force a start (=kick)
1001          * to the queue to make sure the hardware will
1002          * start transmitting.
1003          */
1004         if (!drop && tx_queue)
1005                 queue->rt2x00dev->ops->lib->kick_queue(queue);
1006 
1007         /*
1008          * Check if driver supports flushing, if that is the case we can
1009          * defer the flushing to the driver. Otherwise we must use the
1010          * alternative which just waits for the queue to become empty.
1011          */
1012         if (likely(queue->rt2x00dev->ops->lib->flush_queue))
1013                 queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
1014 
1015         /*
1016          * The queue flush has failed...
1017          */
1018         if (unlikely(!rt2x00queue_empty(queue)))
1019                 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
1020                             queue->qid);
1021 }
1022 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
1023 
1024 void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
1025 {
1026         struct data_queue *queue;
1027 
1028         /*
1029          * rt2x00queue_start_queue will call ieee80211_wake_queue
1030          * for each queue after is has been properly initialized.
1031          */
1032         tx_queue_for_each(rt2x00dev, queue)
1033                 rt2x00queue_start_queue(queue);
1034 
1035         rt2x00queue_start_queue(rt2x00dev->rx);
1036 }
1037 EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
1038 
1039 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
1040 {
1041         struct data_queue *queue;
1042 
1043         /*
1044          * rt2x00queue_stop_queue will call ieee80211_stop_queue
1045          * as well, but we are completely shutting doing everything
1046          * now, so it is much safer to stop all TX queues at once,
1047          * and use rt2x00queue_stop_queue for cleaning up.
1048          */
1049         ieee80211_stop_queues(rt2x00dev->hw);
1050 
1051         tx_queue_for_each(rt2x00dev, queue)
1052                 rt2x00queue_stop_queue(queue);
1053 
1054         rt2x00queue_stop_queue(rt2x00dev->rx);
1055 }
1056 EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
1057 
1058 void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
1059 {
1060         struct data_queue *queue;
1061 
1062         tx_queue_for_each(rt2x00dev, queue)
1063                 rt2x00queue_flush_queue(queue, drop);
1064 
1065         rt2x00queue_flush_queue(rt2x00dev->rx, drop);
1066 }
1067 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
1068 
1069 static void rt2x00queue_reset(struct data_queue *queue)
1070 {
1071         unsigned long irqflags;
1072         unsigned int i;
1073 
1074         spin_lock_irqsave(&queue->index_lock, irqflags);
1075 
1076         queue->count = 0;
1077         queue->length = 0;
1078 
1079         for (i = 0; i < Q_INDEX_MAX; i++)
1080                 queue->index[i] = 0;
1081 
1082         spin_unlock_irqrestore(&queue->index_lock, irqflags);
1083 }
1084 
1085 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
1086 {
1087         struct data_queue *queue;
1088         unsigned int i;
1089 
1090         queue_for_each(rt2x00dev, queue) {
1091                 rt2x00queue_reset(queue);
1092 
1093                 for (i = 0; i < queue->limit; i++)
1094                         rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
1095         }
1096 }
1097 
1098 static int rt2x00queue_alloc_entries(struct data_queue *queue)
1099 {
1100         struct queue_entry *entries;
1101         unsigned int entry_size;
1102         unsigned int i;
1103 
1104         rt2x00queue_reset(queue);
1105 
1106         /*
1107          * Allocate all queue entries.
1108          */
1109         entry_size = sizeof(*entries) + queue->priv_size;
1110         entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
1111         if (!entries)
1112                 return -ENOMEM;
1113 
1114 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
1115         (((char *)(__base)) + ((__limit) * (__esize)) + \
1116             ((__index) * (__psize)))
1117 
1118         for (i = 0; i < queue->limit; i++) {
1119                 entries[i].flags = 0;
1120                 entries[i].queue = queue;
1121                 entries[i].skb = NULL;
1122                 entries[i].entry_idx = i;
1123                 entries[i].priv_data =
1124                     QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
1125                                             sizeof(*entries), queue->priv_size);
1126         }
1127 
1128 #undef QUEUE_ENTRY_PRIV_OFFSET
1129 
1130         queue->entries = entries;
1131 
1132         return 0;
1133 }
1134 
1135 static void rt2x00queue_free_skbs(struct data_queue *queue)
1136 {
1137         unsigned int i;
1138 
1139         if (!queue->entries)
1140                 return;
1141 
1142         for (i = 0; i < queue->limit; i++) {
1143                 rt2x00queue_free_skb(&queue->entries[i]);
1144         }
1145 }
1146 
1147 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1148 {
1149         unsigned int i;
1150         struct sk_buff *skb;
1151 
1152         for (i = 0; i < queue->limit; i++) {
1153                 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1154                 if (!skb)
1155                         return -ENOMEM;
1156                 queue->entries[i].skb = skb;
1157         }
1158 
1159         return 0;
1160 }
1161 
1162 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
1163 {
1164         struct data_queue *queue;
1165         int status;
1166 
1167         status = rt2x00queue_alloc_entries(rt2x00dev->rx);
1168         if (status)
1169                 goto exit;
1170 
1171         tx_queue_for_each(rt2x00dev, queue) {
1172                 status = rt2x00queue_alloc_entries(queue);
1173                 if (status)
1174                         goto exit;
1175         }
1176 
1177         status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
1178         if (status)
1179                 goto exit;
1180 
1181         if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
1182                 status = rt2x00queue_alloc_entries(rt2x00dev->atim);
1183                 if (status)
1184                         goto exit;
1185         }
1186 
1187         status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
1188         if (status)
1189                 goto exit;
1190 
1191         return 0;
1192 
1193 exit:
1194         rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1195 
1196         rt2x00queue_uninitialize(rt2x00dev);
1197 
1198         return status;
1199 }
1200 
1201 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
1202 {
1203         struct data_queue *queue;
1204 
1205         rt2x00queue_free_skbs(rt2x00dev->rx);
1206 
1207         queue_for_each(rt2x00dev, queue) {
1208                 kfree(queue->entries);
1209                 queue->entries = NULL;
1210         }
1211 }
1212 
1213 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
1214                              struct data_queue *queue, enum data_queue_qid qid)
1215 {
1216         mutex_init(&queue->status_lock);
1217         spin_lock_init(&queue->tx_lock);
1218         spin_lock_init(&queue->index_lock);
1219 
1220         queue->rt2x00dev = rt2x00dev;
1221         queue->qid = qid;
1222         queue->txop = 0;
1223         queue->aifs = 2;
1224         queue->cw_min = 5;
1225         queue->cw_max = 10;
1226 
1227         rt2x00dev->ops->queue_init(queue);
1228 
1229         queue->threshold = DIV_ROUND_UP(queue->limit, 10);
1230 }
1231 
1232 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
1233 {
1234         struct data_queue *queue;
1235         enum data_queue_qid qid;
1236         unsigned int req_atim =
1237             rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
1238 
1239         /*
1240          * We need the following queues:
1241          * RX: 1
1242          * TX: ops->tx_queues
1243          * Beacon: 1
1244          * Atim: 1 (if required)
1245          */
1246         rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
1247 
1248         queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
1249         if (!queue)
1250                 return -ENOMEM;
1251 
1252         /*
1253          * Initialize pointers
1254          */
1255         rt2x00dev->rx = queue;
1256         rt2x00dev->tx = &queue[1];
1257         rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
1258         rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
1259 
1260         /*
1261          * Initialize queue parameters.
1262          * RX: qid = QID_RX
1263          * TX: qid = QID_AC_VO + index
1264          * TX: cw_min: 2^5 = 32.
1265          * TX: cw_max: 2^10 = 1024.
1266          * BCN: qid = QID_BEACON
1267          * ATIM: qid = QID_ATIM
1268          */
1269         rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
1270 
1271         qid = QID_AC_VO;
1272         tx_queue_for_each(rt2x00dev, queue)
1273                 rt2x00queue_init(rt2x00dev, queue, qid++);
1274 
1275         rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
1276         if (req_atim)
1277                 rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
1278 
1279         return 0;
1280 }
1281 
1282 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
1283 {
1284         kfree(rt2x00dev->rx);
1285         rt2x00dev->rx = NULL;
1286         rt2x00dev->tx = NULL;
1287         rt2x00dev->bcn = NULL;
1288 }

/* [<][>][^][v][top][bottom][index][help] */