root/drivers/net/wireless/mediatek/mt76/agg-rx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mt76_aggr_release
  2. mt76_rx_aggr_release_frames
  3. mt76_rx_aggr_release_head
  4. mt76_rx_aggr_check_release
  5. mt76_rx_aggr_reorder_work
  6. mt76_rx_aggr_check_ctl
  7. mt76_rx_aggr_reorder
  8. mt76_rx_aggr_start
  9. mt76_rx_aggr_shutdown
  10. mt76_rx_aggr_stop

   1 // SPDX-License-Identifier: ISC
   2 /*
   3  * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
   4  */
   5 #include "mt76.h"
   6 
   7 #define REORDER_TIMEOUT (HZ / 10)
   8 
   9 static void
  10 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
  11 {
  12         struct sk_buff *skb;
  13 
  14         tid->head = ieee80211_sn_inc(tid->head);
  15 
  16         skb = tid->reorder_buf[idx];
  17         if (!skb)
  18                 return;
  19 
  20         tid->reorder_buf[idx] = NULL;
  21         tid->nframes--;
  22         __skb_queue_tail(frames, skb);
  23 }
  24 
  25 static void
  26 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
  27                             struct sk_buff_head *frames,
  28                             u16 head)
  29 {
  30         int idx;
  31 
  32         while (ieee80211_sn_less(tid->head, head)) {
  33                 idx = tid->head % tid->size;
  34                 mt76_aggr_release(tid, frames, idx);
  35         }
  36 }
  37 
  38 static void
  39 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  40 {
  41         int idx = tid->head % tid->size;
  42 
  43         while (tid->reorder_buf[idx]) {
  44                 mt76_aggr_release(tid, frames, idx);
  45                 idx = tid->head % tid->size;
  46         }
  47 }
  48 
  49 static void
  50 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
  51 {
  52         struct mt76_rx_status *status;
  53         struct sk_buff *skb;
  54         int start, idx, nframes;
  55 
  56         if (!tid->nframes)
  57                 return;
  58 
  59         mt76_rx_aggr_release_head(tid, frames);
  60 
  61         start = tid->head % tid->size;
  62         nframes = tid->nframes;
  63 
  64         for (idx = (tid->head + 1) % tid->size;
  65              idx != start && nframes;
  66              idx = (idx + 1) % tid->size) {
  67                 skb = tid->reorder_buf[idx];
  68                 if (!skb)
  69                         continue;
  70 
  71                 nframes--;
  72                 status = (struct mt76_rx_status *)skb->cb;
  73                 if (!time_after(jiffies,
  74                                 status->reorder_time + REORDER_TIMEOUT))
  75                         continue;
  76 
  77                 mt76_rx_aggr_release_frames(tid, frames, status->seqno);
  78         }
  79 
  80         mt76_rx_aggr_release_head(tid, frames);
  81 }
  82 
  83 static void
  84 mt76_rx_aggr_reorder_work(struct work_struct *work)
  85 {
  86         struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
  87                                                reorder_work.work);
  88         struct mt76_dev *dev = tid->dev;
  89         struct sk_buff_head frames;
  90         int nframes;
  91 
  92         __skb_queue_head_init(&frames);
  93 
  94         local_bh_disable();
  95         rcu_read_lock();
  96 
  97         spin_lock(&tid->lock);
  98         mt76_rx_aggr_check_release(tid, &frames);
  99         nframes = tid->nframes;
 100         spin_unlock(&tid->lock);
 101 
 102         if (nframes)
 103                 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
 104                                              REORDER_TIMEOUT);
 105         mt76_rx_complete(dev, &frames, NULL);
 106 
 107         rcu_read_unlock();
 108         local_bh_enable();
 109 }
 110 
 111 static void
 112 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
 113 {
 114         struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
 115         struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
 116         struct mt76_wcid *wcid = status->wcid;
 117         struct mt76_rx_tid *tid;
 118         u16 seqno;
 119 
 120         if (!ieee80211_is_ctl(bar->frame_control))
 121                 return;
 122 
 123         if (!ieee80211_is_back_req(bar->frame_control))
 124                 return;
 125 
 126         status->tid = le16_to_cpu(bar->control) >> 12;
 127         seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
 128         tid = rcu_dereference(wcid->aggr[status->tid]);
 129         if (!tid)
 130                 return;
 131 
 132         spin_lock_bh(&tid->lock);
 133         mt76_rx_aggr_release_frames(tid, frames, seqno);
 134         mt76_rx_aggr_release_head(tid, frames);
 135         spin_unlock_bh(&tid->lock);
 136 }
 137 
 138 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
 139 {
 140         struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
 141         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 142         struct mt76_wcid *wcid = status->wcid;
 143         struct ieee80211_sta *sta;
 144         struct mt76_rx_tid *tid;
 145         bool sn_less;
 146         u16 seqno, head, size;
 147         u8 ackp, idx;
 148 
 149         __skb_queue_tail(frames, skb);
 150 
 151         sta = wcid_to_sta(wcid);
 152         if (!sta)
 153                 return;
 154 
 155         if (!status->aggr) {
 156                 mt76_rx_aggr_check_ctl(skb, frames);
 157                 return;
 158         }
 159 
 160         /* not part of a BA session */
 161         ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
 162         if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
 163             ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
 164                 return;
 165 
 166         tid = rcu_dereference(wcid->aggr[status->tid]);
 167         if (!tid)
 168                 return;
 169 
 170         status->flag |= RX_FLAG_DUP_VALIDATED;
 171         spin_lock_bh(&tid->lock);
 172 
 173         if (tid->stopped)
 174                 goto out;
 175 
 176         head = tid->head;
 177         seqno = status->seqno;
 178         size = tid->size;
 179         sn_less = ieee80211_sn_less(seqno, head);
 180 
 181         if (!tid->started) {
 182                 if (sn_less)
 183                         goto out;
 184 
 185                 tid->started = true;
 186         }
 187 
 188         if (sn_less) {
 189                 __skb_unlink(skb, frames);
 190                 dev_kfree_skb(skb);
 191                 goto out;
 192         }
 193 
 194         if (seqno == head) {
 195                 tid->head = ieee80211_sn_inc(head);
 196                 if (tid->nframes)
 197                         mt76_rx_aggr_release_head(tid, frames);
 198                 goto out;
 199         }
 200 
 201         __skb_unlink(skb, frames);
 202 
 203         /*
 204          * Frame sequence number exceeds buffering window, free up some space
 205          * by releasing previous frames
 206          */
 207         if (!ieee80211_sn_less(seqno, head + size)) {
 208                 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
 209                 mt76_rx_aggr_release_frames(tid, frames, head);
 210         }
 211 
 212         idx = seqno % size;
 213 
 214         /* Discard if the current slot is already in use */
 215         if (tid->reorder_buf[idx]) {
 216                 dev_kfree_skb(skb);
 217                 goto out;
 218         }
 219 
 220         status->reorder_time = jiffies;
 221         tid->reorder_buf[idx] = skb;
 222         tid->nframes++;
 223         mt76_rx_aggr_release_head(tid, frames);
 224 
 225         ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
 226                                      REORDER_TIMEOUT);
 227 
 228 out:
 229         spin_unlock_bh(&tid->lock);
 230 }
 231 
 232 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
 233                        u16 ssn, u8 size)
 234 {
 235         struct mt76_rx_tid *tid;
 236 
 237         mt76_rx_aggr_stop(dev, wcid, tidno);
 238 
 239         tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
 240         if (!tid)
 241                 return -ENOMEM;
 242 
 243         tid->dev = dev;
 244         tid->head = ssn;
 245         tid->size = size;
 246         INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
 247         spin_lock_init(&tid->lock);
 248 
 249         rcu_assign_pointer(wcid->aggr[tidno], tid);
 250 
 251         return 0;
 252 }
 253 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
 254 
 255 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
 256 {
 257         u8 size = tid->size;
 258         int i;
 259 
 260         cancel_delayed_work(&tid->reorder_work);
 261 
 262         spin_lock_bh(&tid->lock);
 263 
 264         tid->stopped = true;
 265         for (i = 0; tid->nframes && i < size; i++) {
 266                 struct sk_buff *skb = tid->reorder_buf[i];
 267 
 268                 if (!skb)
 269                         continue;
 270 
 271                 tid->nframes--;
 272                 dev_kfree_skb(skb);
 273         }
 274 
 275         spin_unlock_bh(&tid->lock);
 276 }
 277 
 278 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
 279 {
 280         struct mt76_rx_tid *tid;
 281 
 282         rcu_read_lock();
 283 
 284         tid = rcu_dereference(wcid->aggr[tidno]);
 285         if (tid) {
 286                 rcu_assign_pointer(wcid->aggr[tidno], NULL);
 287                 mt76_rx_aggr_shutdown(dev, tid);
 288                 kfree_rcu(tid, rcu_head);
 289         }
 290 
 291         rcu_read_unlock();
 292 }
 293 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);

/* [<][>][^][v][top][bottom][index][help] */